##// END OF EJS Templates
transaction: remember original len(repo) instead of tracking added revs (API)...
Yuya Nishihara -
r39337:5763216b default
parent child Browse files
Show More
@@ -1,563 +1,547 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import (
16 from .thirdparty import (
17 attr,
17 attr,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 )
25 )
26 from .utils import (
26 from .utils import (
27 dateutil,
27 dateutil,
28 stringutil,
28 stringutil,
29 )
29 )
30
30
31 _defaultextra = {'branch': 'default'}
31 _defaultextra = {'branch': 'default'}
32
32
33 def _string_escape(text):
33 def _string_escape(text):
34 """
34 """
35 >>> from .pycompat import bytechr as chr
35 >>> from .pycompat import bytechr as chr
36 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
36 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
37 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s
38 >>> s
39 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
39 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
40 >>> res = _string_escape(s)
40 >>> res = _string_escape(s)
41 >>> s == stringutil.unescapestr(res)
41 >>> s == stringutil.unescapestr(res)
42 True
42 True
43 """
43 """
44 # subset of the string_escape codec
44 # subset of the string_escape codec
45 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
45 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
46 return text.replace('\0', '\\0')
46 return text.replace('\0', '\\0')
47
47
48 def decodeextra(text):
48 def decodeextra(text):
49 """
49 """
50 >>> from .pycompat import bytechr as chr
50 >>> from .pycompat import bytechr as chr
51 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
51 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
52 ... ).items())
52 ... ).items())
53 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
53 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
54 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
54 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
55 ... b'baz': chr(92) + chr(0) + b'2'})
55 ... b'baz': chr(92) + chr(0) + b'2'})
56 ... ).items())
56 ... ).items())
57 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
57 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
58 """
58 """
59 extra = _defaultextra.copy()
59 extra = _defaultextra.copy()
60 for l in text.split('\0'):
60 for l in text.split('\0'):
61 if l:
61 if l:
62 if '\\0' in l:
62 if '\\0' in l:
63 # fix up \0 without getting into trouble with \\0
63 # fix up \0 without getting into trouble with \\0
64 l = l.replace('\\\\', '\\\\\n')
64 l = l.replace('\\\\', '\\\\\n')
65 l = l.replace('\\0', '\0')
65 l = l.replace('\\0', '\0')
66 l = l.replace('\n', '')
66 l = l.replace('\n', '')
67 k, v = stringutil.unescapestr(l).split(':', 1)
67 k, v = stringutil.unescapestr(l).split(':', 1)
68 extra[k] = v
68 extra[k] = v
69 return extra
69 return extra
70
70
71 def encodeextra(d):
71 def encodeextra(d):
72 # keys must be sorted to produce a deterministic changelog entry
72 # keys must be sorted to produce a deterministic changelog entry
73 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
73 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
74 return "\0".join(items)
74 return "\0".join(items)
75
75
76 def stripdesc(desc):
76 def stripdesc(desc):
77 """strip trailing whitespace and leading and trailing empty lines"""
77 """strip trailing whitespace and leading and trailing empty lines"""
78 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
78 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
79
79
80 class appender(object):
80 class appender(object):
81 '''the changelog index must be updated last on disk, so we use this class
81 '''the changelog index must be updated last on disk, so we use this class
82 to delay writes to it'''
82 to delay writes to it'''
83 def __init__(self, vfs, name, mode, buf):
83 def __init__(self, vfs, name, mode, buf):
84 self.data = buf
84 self.data = buf
85 fp = vfs(name, mode)
85 fp = vfs(name, mode)
86 self.fp = fp
86 self.fp = fp
87 self.offset = fp.tell()
87 self.offset = fp.tell()
88 self.size = vfs.fstat(fp).st_size
88 self.size = vfs.fstat(fp).st_size
89 self._end = self.size
89 self._end = self.size
90
90
91 def end(self):
91 def end(self):
92 return self._end
92 return self._end
93 def tell(self):
93 def tell(self):
94 return self.offset
94 return self.offset
95 def flush(self):
95 def flush(self):
96 pass
96 pass
97
97
98 @property
98 @property
99 def closed(self):
99 def closed(self):
100 return self.fp.closed
100 return self.fp.closed
101
101
102 def close(self):
102 def close(self):
103 self.fp.close()
103 self.fp.close()
104
104
105 def seek(self, offset, whence=0):
105 def seek(self, offset, whence=0):
106 '''virtual file offset spans real file and data'''
106 '''virtual file offset spans real file and data'''
107 if whence == 0:
107 if whence == 0:
108 self.offset = offset
108 self.offset = offset
109 elif whence == 1:
109 elif whence == 1:
110 self.offset += offset
110 self.offset += offset
111 elif whence == 2:
111 elif whence == 2:
112 self.offset = self.end() + offset
112 self.offset = self.end() + offset
113 if self.offset < self.size:
113 if self.offset < self.size:
114 self.fp.seek(self.offset)
114 self.fp.seek(self.offset)
115
115
116 def read(self, count=-1):
116 def read(self, count=-1):
117 '''only trick here is reads that span real file and data'''
117 '''only trick here is reads that span real file and data'''
118 ret = ""
118 ret = ""
119 if self.offset < self.size:
119 if self.offset < self.size:
120 s = self.fp.read(count)
120 s = self.fp.read(count)
121 ret = s
121 ret = s
122 self.offset += len(s)
122 self.offset += len(s)
123 if count > 0:
123 if count > 0:
124 count -= len(s)
124 count -= len(s)
125 if count != 0:
125 if count != 0:
126 doff = self.offset - self.size
126 doff = self.offset - self.size
127 self.data.insert(0, "".join(self.data))
127 self.data.insert(0, "".join(self.data))
128 del self.data[1:]
128 del self.data[1:]
129 s = self.data[0][doff:doff + count]
129 s = self.data[0][doff:doff + count]
130 self.offset += len(s)
130 self.offset += len(s)
131 ret += s
131 ret += s
132 return ret
132 return ret
133
133
134 def write(self, s):
134 def write(self, s):
135 self.data.append(bytes(s))
135 self.data.append(bytes(s))
136 self.offset += len(s)
136 self.offset += len(s)
137 self._end += len(s)
137 self._end += len(s)
138
138
139 def __enter__(self):
139 def __enter__(self):
140 self.fp.__enter__()
140 self.fp.__enter__()
141 return self
141 return self
142
142
143 def __exit__(self, *args):
143 def __exit__(self, *args):
144 return self.fp.__exit__(*args)
144 return self.fp.__exit__(*args)
145
145
146 def _divertopener(opener, target):
146 def _divertopener(opener, target):
147 """build an opener that writes in 'target.a' instead of 'target'"""
147 """build an opener that writes in 'target.a' instead of 'target'"""
148 def _divert(name, mode='r', checkambig=False):
148 def _divert(name, mode='r', checkambig=False):
149 if name != target:
149 if name != target:
150 return opener(name, mode)
150 return opener(name, mode)
151 return opener(name + ".a", mode)
151 return opener(name + ".a", mode)
152 return _divert
152 return _divert
153
153
154 def _delayopener(opener, target, buf):
154 def _delayopener(opener, target, buf):
155 """build an opener that stores chunks in 'buf' instead of 'target'"""
155 """build an opener that stores chunks in 'buf' instead of 'target'"""
156 def _delay(name, mode='r', checkambig=False):
156 def _delay(name, mode='r', checkambig=False):
157 if name != target:
157 if name != target:
158 return opener(name, mode)
158 return opener(name, mode)
159 return appender(opener, name, mode, buf)
159 return appender(opener, name, mode, buf)
160 return _delay
160 return _delay
161
161
162 @attr.s
162 @attr.s
163 class _changelogrevision(object):
163 class _changelogrevision(object):
164 # Extensions might modify _defaultextra, so let the constructor below pass
164 # Extensions might modify _defaultextra, so let the constructor below pass
165 # it in
165 # it in
166 extra = attr.ib()
166 extra = attr.ib()
167 manifest = attr.ib(default=nullid)
167 manifest = attr.ib(default=nullid)
168 user = attr.ib(default='')
168 user = attr.ib(default='')
169 date = attr.ib(default=(0, 0))
169 date = attr.ib(default=(0, 0))
170 files = attr.ib(default=attr.Factory(list))
170 files = attr.ib(default=attr.Factory(list))
171 description = attr.ib(default='')
171 description = attr.ib(default='')
172
172
173 class changelogrevision(object):
173 class changelogrevision(object):
174 """Holds results of a parsed changelog revision.
174 """Holds results of a parsed changelog revision.
175
175
176 Changelog revisions consist of multiple pieces of data, including
176 Changelog revisions consist of multiple pieces of data, including
177 the manifest node, user, and date. This object exposes a view into
177 the manifest node, user, and date. This object exposes a view into
178 the parsed object.
178 the parsed object.
179 """
179 """
180
180
181 __slots__ = (
181 __slots__ = (
182 u'_offsets',
182 u'_offsets',
183 u'_text',
183 u'_text',
184 )
184 )
185
185
186 def __new__(cls, text):
186 def __new__(cls, text):
187 if not text:
187 if not text:
188 return _changelogrevision(extra=_defaultextra)
188 return _changelogrevision(extra=_defaultextra)
189
189
190 self = super(changelogrevision, cls).__new__(cls)
190 self = super(changelogrevision, cls).__new__(cls)
191 # We could return here and implement the following as an __init__.
191 # We could return here and implement the following as an __init__.
192 # But doing it here is equivalent and saves an extra function call.
192 # But doing it here is equivalent and saves an extra function call.
193
193
194 # format used:
194 # format used:
195 # nodeid\n : manifest node in ascii
195 # nodeid\n : manifest node in ascii
196 # user\n : user, no \n or \r allowed
196 # user\n : user, no \n or \r allowed
197 # time tz extra\n : date (time is int or float, timezone is int)
197 # time tz extra\n : date (time is int or float, timezone is int)
198 # : extra is metadata, encoded and separated by '\0'
198 # : extra is metadata, encoded and separated by '\0'
199 # : older versions ignore it
199 # : older versions ignore it
200 # files\n\n : files modified by the cset, no \n or \r allowed
200 # files\n\n : files modified by the cset, no \n or \r allowed
201 # (.*) : comment (free text, ideally utf-8)
201 # (.*) : comment (free text, ideally utf-8)
202 #
202 #
203 # changelog v0 doesn't use extra
203 # changelog v0 doesn't use extra
204
204
205 nl1 = text.index('\n')
205 nl1 = text.index('\n')
206 nl2 = text.index('\n', nl1 + 1)
206 nl2 = text.index('\n', nl1 + 1)
207 nl3 = text.index('\n', nl2 + 1)
207 nl3 = text.index('\n', nl2 + 1)
208
208
209 # The list of files may be empty. Which means nl3 is the first of the
209 # The list of files may be empty. Which means nl3 is the first of the
210 # double newline that precedes the description.
210 # double newline that precedes the description.
211 if text[nl3 + 1:nl3 + 2] == '\n':
211 if text[nl3 + 1:nl3 + 2] == '\n':
212 doublenl = nl3
212 doublenl = nl3
213 else:
213 else:
214 doublenl = text.index('\n\n', nl3 + 1)
214 doublenl = text.index('\n\n', nl3 + 1)
215
215
216 self._offsets = (nl1, nl2, nl3, doublenl)
216 self._offsets = (nl1, nl2, nl3, doublenl)
217 self._text = text
217 self._text = text
218
218
219 return self
219 return self
220
220
221 @property
221 @property
222 def manifest(self):
222 def manifest(self):
223 return bin(self._text[0:self._offsets[0]])
223 return bin(self._text[0:self._offsets[0]])
224
224
225 @property
225 @property
226 def user(self):
226 def user(self):
227 off = self._offsets
227 off = self._offsets
228 return encoding.tolocal(self._text[off[0] + 1:off[1]])
228 return encoding.tolocal(self._text[off[0] + 1:off[1]])
229
229
230 @property
230 @property
231 def _rawdate(self):
231 def _rawdate(self):
232 off = self._offsets
232 off = self._offsets
233 dateextra = self._text[off[1] + 1:off[2]]
233 dateextra = self._text[off[1] + 1:off[2]]
234 return dateextra.split(' ', 2)[0:2]
234 return dateextra.split(' ', 2)[0:2]
235
235
236 @property
236 @property
237 def _rawextra(self):
237 def _rawextra(self):
238 off = self._offsets
238 off = self._offsets
239 dateextra = self._text[off[1] + 1:off[2]]
239 dateextra = self._text[off[1] + 1:off[2]]
240 fields = dateextra.split(' ', 2)
240 fields = dateextra.split(' ', 2)
241 if len(fields) != 3:
241 if len(fields) != 3:
242 return None
242 return None
243
243
244 return fields[2]
244 return fields[2]
245
245
246 @property
246 @property
247 def date(self):
247 def date(self):
248 raw = self._rawdate
248 raw = self._rawdate
249 time = float(raw[0])
249 time = float(raw[0])
250 # Various tools did silly things with the timezone.
250 # Various tools did silly things with the timezone.
251 try:
251 try:
252 timezone = int(raw[1])
252 timezone = int(raw[1])
253 except ValueError:
253 except ValueError:
254 timezone = 0
254 timezone = 0
255
255
256 return time, timezone
256 return time, timezone
257
257
258 @property
258 @property
259 def extra(self):
259 def extra(self):
260 raw = self._rawextra
260 raw = self._rawextra
261 if raw is None:
261 if raw is None:
262 return _defaultextra
262 return _defaultextra
263
263
264 return decodeextra(raw)
264 return decodeextra(raw)
265
265
266 @property
266 @property
267 def files(self):
267 def files(self):
268 off = self._offsets
268 off = self._offsets
269 if off[2] == off[3]:
269 if off[2] == off[3]:
270 return []
270 return []
271
271
272 return self._text[off[2] + 1:off[3]].split('\n')
272 return self._text[off[2] + 1:off[3]].split('\n')
273
273
274 @property
274 @property
275 def description(self):
275 def description(self):
276 return encoding.tolocal(self._text[self._offsets[3] + 2:])
276 return encoding.tolocal(self._text[self._offsets[3] + 2:])
277
277
278 class changelog(revlog.revlog):
278 class changelog(revlog.revlog):
279 def __init__(self, opener, trypending=False):
279 def __init__(self, opener, trypending=False):
280 """Load a changelog revlog using an opener.
280 """Load a changelog revlog using an opener.
281
281
282 If ``trypending`` is true, we attempt to load the index from a
282 If ``trypending`` is true, we attempt to load the index from a
283 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
283 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
284 The ``00changelog.i.a`` file contains index (and possibly inline
284 The ``00changelog.i.a`` file contains index (and possibly inline
285 revision) data for a transaction that hasn't been finalized yet.
285 revision) data for a transaction that hasn't been finalized yet.
286 It exists in a separate file to facilitate readers (such as
286 It exists in a separate file to facilitate readers (such as
287 hooks processes) accessing data before a transaction is finalized.
287 hooks processes) accessing data before a transaction is finalized.
288 """
288 """
289 if trypending and opener.exists('00changelog.i.a'):
289 if trypending and opener.exists('00changelog.i.a'):
290 indexfile = '00changelog.i.a'
290 indexfile = '00changelog.i.a'
291 else:
291 else:
292 indexfile = '00changelog.i'
292 indexfile = '00changelog.i'
293
293
294 datafile = '00changelog.d'
294 datafile = '00changelog.d'
295 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
295 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
296 checkambig=True, mmaplargeindex=True)
296 checkambig=True, mmaplargeindex=True)
297
297
298 if self._initempty:
298 if self._initempty:
299 # changelogs don't benefit from generaldelta
299 # changelogs don't benefit from generaldelta
300 self.version &= ~revlog.FLAG_GENERALDELTA
300 self.version &= ~revlog.FLAG_GENERALDELTA
301 self._generaldelta = False
301 self._generaldelta = False
302
302
303 # Delta chains for changelogs tend to be very small because entries
303 # Delta chains for changelogs tend to be very small because entries
304 # tend to be small and don't delta well with each. So disable delta
304 # tend to be small and don't delta well with each. So disable delta
305 # chains.
305 # chains.
306 self._storedeltachains = False
306 self._storedeltachains = False
307
307
308 self._realopener = opener
308 self._realopener = opener
309 self._delayed = False
309 self._delayed = False
310 self._delaybuf = None
310 self._delaybuf = None
311 self._divert = False
311 self._divert = False
312 self.filteredrevs = frozenset()
312 self.filteredrevs = frozenset()
313
313
314 def tiprev(self):
314 def tiprev(self):
315 for i in pycompat.xrange(len(self) -1, -2, -1):
315 for i in pycompat.xrange(len(self) -1, -2, -1):
316 if i not in self.filteredrevs:
316 if i not in self.filteredrevs:
317 return i
317 return i
318
318
319 def tip(self):
319 def tip(self):
320 """filtered version of revlog.tip"""
320 """filtered version of revlog.tip"""
321 return self.node(self.tiprev())
321 return self.node(self.tiprev())
322
322
323 def __contains__(self, rev):
323 def __contains__(self, rev):
324 """filtered version of revlog.__contains__"""
324 """filtered version of revlog.__contains__"""
325 return (0 <= rev < len(self)
325 return (0 <= rev < len(self)
326 and rev not in self.filteredrevs)
326 and rev not in self.filteredrevs)
327
327
328 def __iter__(self):
328 def __iter__(self):
329 """filtered version of revlog.__iter__"""
329 """filtered version of revlog.__iter__"""
330 if len(self.filteredrevs) == 0:
330 if len(self.filteredrevs) == 0:
331 return revlog.revlog.__iter__(self)
331 return revlog.revlog.__iter__(self)
332
332
333 def filterediter():
333 def filterediter():
334 for i in pycompat.xrange(len(self)):
334 for i in pycompat.xrange(len(self)):
335 if i not in self.filteredrevs:
335 if i not in self.filteredrevs:
336 yield i
336 yield i
337
337
338 return filterediter()
338 return filterediter()
339
339
340 def revs(self, start=0, stop=None):
340 def revs(self, start=0, stop=None):
341 """filtered version of revlog.revs"""
341 """filtered version of revlog.revs"""
342 for i in super(changelog, self).revs(start, stop):
342 for i in super(changelog, self).revs(start, stop):
343 if i not in self.filteredrevs:
343 if i not in self.filteredrevs:
344 yield i
344 yield i
345
345
346 def reachableroots(self, minroot, heads, roots, includepath=False):
346 def reachableroots(self, minroot, heads, roots, includepath=False):
347 return self.index.reachableroots2(minroot, heads, roots, includepath)
347 return self.index.reachableroots2(minroot, heads, roots, includepath)
348
348
349 def headrevs(self):
349 def headrevs(self):
350 if self.filteredrevs:
350 if self.filteredrevs:
351 try:
351 try:
352 return self.index.headrevsfiltered(self.filteredrevs)
352 return self.index.headrevsfiltered(self.filteredrevs)
353 # AttributeError covers non-c-extension environments and
353 # AttributeError covers non-c-extension environments and
354 # old c extensions without filter handling.
354 # old c extensions without filter handling.
355 except AttributeError:
355 except AttributeError:
356 return self._headrevs()
356 return self._headrevs()
357
357
358 return super(changelog, self).headrevs()
358 return super(changelog, self).headrevs()
359
359
360 def strip(self, *args, **kwargs):
360 def strip(self, *args, **kwargs):
361 # XXX make something better than assert
361 # XXX make something better than assert
362 # We can't expect proper strip behavior if we are filtered.
362 # We can't expect proper strip behavior if we are filtered.
363 assert not self.filteredrevs
363 assert not self.filteredrevs
364 super(changelog, self).strip(*args, **kwargs)
364 super(changelog, self).strip(*args, **kwargs)
365
365
366 def rev(self, node):
366 def rev(self, node):
367 """filtered version of revlog.rev"""
367 """filtered version of revlog.rev"""
368 r = super(changelog, self).rev(node)
368 r = super(changelog, self).rev(node)
369 if r in self.filteredrevs:
369 if r in self.filteredrevs:
370 raise error.FilteredLookupError(hex(node), self.indexfile,
370 raise error.FilteredLookupError(hex(node), self.indexfile,
371 _('filtered node'))
371 _('filtered node'))
372 return r
372 return r
373
373
374 def node(self, rev):
374 def node(self, rev):
375 """filtered version of revlog.node"""
375 """filtered version of revlog.node"""
376 if rev in self.filteredrevs:
376 if rev in self.filteredrevs:
377 raise error.FilteredIndexError(rev)
377 raise error.FilteredIndexError(rev)
378 return super(changelog, self).node(rev)
378 return super(changelog, self).node(rev)
379
379
380 def linkrev(self, rev):
380 def linkrev(self, rev):
381 """filtered version of revlog.linkrev"""
381 """filtered version of revlog.linkrev"""
382 if rev in self.filteredrevs:
382 if rev in self.filteredrevs:
383 raise error.FilteredIndexError(rev)
383 raise error.FilteredIndexError(rev)
384 return super(changelog, self).linkrev(rev)
384 return super(changelog, self).linkrev(rev)
385
385
386 def parentrevs(self, rev):
386 def parentrevs(self, rev):
387 """filtered version of revlog.parentrevs"""
387 """filtered version of revlog.parentrevs"""
388 if rev in self.filteredrevs:
388 if rev in self.filteredrevs:
389 raise error.FilteredIndexError(rev)
389 raise error.FilteredIndexError(rev)
390 return super(changelog, self).parentrevs(rev)
390 return super(changelog, self).parentrevs(rev)
391
391
392 def flags(self, rev):
392 def flags(self, rev):
393 """filtered version of revlog.flags"""
393 """filtered version of revlog.flags"""
394 if rev in self.filteredrevs:
394 if rev in self.filteredrevs:
395 raise error.FilteredIndexError(rev)
395 raise error.FilteredIndexError(rev)
396 return super(changelog, self).flags(rev)
396 return super(changelog, self).flags(rev)
397
397
398 def delayupdate(self, tr):
398 def delayupdate(self, tr):
399 "delay visibility of index updates to other readers"
399 "delay visibility of index updates to other readers"
400
400
401 if not self._delayed:
401 if not self._delayed:
402 if len(self) == 0:
402 if len(self) == 0:
403 self._divert = True
403 self._divert = True
404 if self._realopener.exists(self.indexfile + '.a'):
404 if self._realopener.exists(self.indexfile + '.a'):
405 self._realopener.unlink(self.indexfile + '.a')
405 self._realopener.unlink(self.indexfile + '.a')
406 self.opener = _divertopener(self._realopener, self.indexfile)
406 self.opener = _divertopener(self._realopener, self.indexfile)
407 else:
407 else:
408 self._delaybuf = []
408 self._delaybuf = []
409 self.opener = _delayopener(self._realopener, self.indexfile,
409 self.opener = _delayopener(self._realopener, self.indexfile,
410 self._delaybuf)
410 self._delaybuf)
411 self._delayed = True
411 self._delayed = True
412 tr.addpending('cl-%i' % id(self), self._writepending)
412 tr.addpending('cl-%i' % id(self), self._writepending)
413 tr.addfinalize('cl-%i' % id(self), self._finalize)
413 tr.addfinalize('cl-%i' % id(self), self._finalize)
414
414
415 def _finalize(self, tr):
415 def _finalize(self, tr):
416 "finalize index updates"
416 "finalize index updates"
417 self._delayed = False
417 self._delayed = False
418 self.opener = self._realopener
418 self.opener = self._realopener
419 # move redirected index data back into place
419 # move redirected index data back into place
420 if self._divert:
420 if self._divert:
421 assert not self._delaybuf
421 assert not self._delaybuf
422 tmpname = self.indexfile + ".a"
422 tmpname = self.indexfile + ".a"
423 nfile = self.opener.open(tmpname)
423 nfile = self.opener.open(tmpname)
424 nfile.close()
424 nfile.close()
425 self.opener.rename(tmpname, self.indexfile, checkambig=True)
425 self.opener.rename(tmpname, self.indexfile, checkambig=True)
426 elif self._delaybuf:
426 elif self._delaybuf:
427 fp = self.opener(self.indexfile, 'a', checkambig=True)
427 fp = self.opener(self.indexfile, 'a', checkambig=True)
428 fp.write("".join(self._delaybuf))
428 fp.write("".join(self._delaybuf))
429 fp.close()
429 fp.close()
430 self._delaybuf = None
430 self._delaybuf = None
431 self._divert = False
431 self._divert = False
432 # split when we're done
432 # split when we're done
433 self._enforceinlinesize(tr)
433 self._enforceinlinesize(tr)
434
434
435 def _writepending(self, tr):
435 def _writepending(self, tr):
436 "create a file containing the unfinalized state for pretxnchangegroup"
436 "create a file containing the unfinalized state for pretxnchangegroup"
437 if self._delaybuf:
437 if self._delaybuf:
438 # make a temporary copy of the index
438 # make a temporary copy of the index
439 fp1 = self._realopener(self.indexfile)
439 fp1 = self._realopener(self.indexfile)
440 pendingfilename = self.indexfile + ".a"
440 pendingfilename = self.indexfile + ".a"
441 # register as a temp file to ensure cleanup on failure
441 # register as a temp file to ensure cleanup on failure
442 tr.registertmp(pendingfilename)
442 tr.registertmp(pendingfilename)
443 # write existing data
443 # write existing data
444 fp2 = self._realopener(pendingfilename, "w")
444 fp2 = self._realopener(pendingfilename, "w")
445 fp2.write(fp1.read())
445 fp2.write(fp1.read())
446 # add pending data
446 # add pending data
447 fp2.write("".join(self._delaybuf))
447 fp2.write("".join(self._delaybuf))
448 fp2.close()
448 fp2.close()
449 # switch modes so finalize can simply rename
449 # switch modes so finalize can simply rename
450 self._delaybuf = None
450 self._delaybuf = None
451 self._divert = True
451 self._divert = True
452 self.opener = _divertopener(self._realopener, self.indexfile)
452 self.opener = _divertopener(self._realopener, self.indexfile)
453
453
454 if self._divert:
454 if self._divert:
455 return True
455 return True
456
456
457 return False
457 return False
458
458
459 def _enforceinlinesize(self, tr, fp=None):
459 def _enforceinlinesize(self, tr, fp=None):
460 if not self._delayed:
460 if not self._delayed:
461 revlog.revlog._enforceinlinesize(self, tr, fp)
461 revlog.revlog._enforceinlinesize(self, tr, fp)
462
462
463 def read(self, node):
463 def read(self, node):
464 """Obtain data from a parsed changelog revision.
464 """Obtain data from a parsed changelog revision.
465
465
466 Returns a 6-tuple of:
466 Returns a 6-tuple of:
467
467
468 - manifest node in binary
468 - manifest node in binary
469 - author/user as a localstr
469 - author/user as a localstr
470 - date as a 2-tuple of (time, timezone)
470 - date as a 2-tuple of (time, timezone)
471 - list of files
471 - list of files
472 - commit message as a localstr
472 - commit message as a localstr
473 - dict of extra metadata
473 - dict of extra metadata
474
474
475 Unless you need to access all fields, consider calling
475 Unless you need to access all fields, consider calling
476 ``changelogrevision`` instead, as it is faster for partial object
476 ``changelogrevision`` instead, as it is faster for partial object
477 access.
477 access.
478 """
478 """
479 c = changelogrevision(self.revision(node))
479 c = changelogrevision(self.revision(node))
480 return (
480 return (
481 c.manifest,
481 c.manifest,
482 c.user,
482 c.user,
483 c.date,
483 c.date,
484 c.files,
484 c.files,
485 c.description,
485 c.description,
486 c.extra
486 c.extra
487 )
487 )
488
488
489 def changelogrevision(self, nodeorrev):
489 def changelogrevision(self, nodeorrev):
490 """Obtain a ``changelogrevision`` for a node or revision."""
490 """Obtain a ``changelogrevision`` for a node or revision."""
491 return changelogrevision(self.revision(nodeorrev))
491 return changelogrevision(self.revision(nodeorrev))
492
492
493 def readfiles(self, node):
493 def readfiles(self, node):
494 """
494 """
495 short version of read that only returns the files modified by the cset
495 short version of read that only returns the files modified by the cset
496 """
496 """
497 text = self.revision(node)
497 text = self.revision(node)
498 if not text:
498 if not text:
499 return []
499 return []
500 last = text.index("\n\n")
500 last = text.index("\n\n")
501 l = text[:last].split('\n')
501 l = text[:last].split('\n')
502 return l[3:]
502 return l[3:]
503
503
504 def add(self, manifest, files, desc, transaction, p1, p2,
504 def add(self, manifest, files, desc, transaction, p1, p2,
505 user, date=None, extra=None):
505 user, date=None, extra=None):
506 # Convert to UTF-8 encoded bytestrings as the very first
506 # Convert to UTF-8 encoded bytestrings as the very first
507 # thing: calling any method on a localstr object will turn it
507 # thing: calling any method on a localstr object will turn it
508 # into a str object and the cached UTF-8 string is thus lost.
508 # into a str object and the cached UTF-8 string is thus lost.
509 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
509 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
510
510
511 user = user.strip()
511 user = user.strip()
512 # An empty username or a username with a "\n" will make the
512 # An empty username or a username with a "\n" will make the
513 # revision text contain two "\n\n" sequences -> corrupt
513 # revision text contain two "\n\n" sequences -> corrupt
514 # repository since read cannot unpack the revision.
514 # repository since read cannot unpack the revision.
515 if not user:
515 if not user:
516 raise error.RevlogError(_("empty username"))
516 raise error.RevlogError(_("empty username"))
517 if "\n" in user:
517 if "\n" in user:
518 raise error.RevlogError(_("username %r contains a newline")
518 raise error.RevlogError(_("username %r contains a newline")
519 % pycompat.bytestr(user))
519 % pycompat.bytestr(user))
520
520
521 desc = stripdesc(desc)
521 desc = stripdesc(desc)
522
522
523 if date:
523 if date:
524 parseddate = "%d %d" % dateutil.parsedate(date)
524 parseddate = "%d %d" % dateutil.parsedate(date)
525 else:
525 else:
526 parseddate = "%d %d" % dateutil.makedate()
526 parseddate = "%d %d" % dateutil.makedate()
527 if extra:
527 if extra:
528 branch = extra.get("branch")
528 branch = extra.get("branch")
529 if branch in ("default", ""):
529 if branch in ("default", ""):
530 del extra["branch"]
530 del extra["branch"]
531 elif branch in (".", "null", "tip"):
531 elif branch in (".", "null", "tip"):
532 raise error.RevlogError(_('the name \'%s\' is reserved')
532 raise error.RevlogError(_('the name \'%s\' is reserved')
533 % branch)
533 % branch)
534 if extra:
534 if extra:
535 extra = encodeextra(extra)
535 extra = encodeextra(extra)
536 parseddate = "%s %s" % (parseddate, extra)
536 parseddate = "%s %s" % (parseddate, extra)
537 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
537 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
538 text = "\n".join(l)
538 text = "\n".join(l)
539 return self.addrevision(text, transaction, len(self), p1, p2)
539 return self.addrevision(text, transaction, len(self), p1, p2)
540
540
541 def branchinfo(self, rev):
541 def branchinfo(self, rev):
542 """return the branch name and open/close state of a revision
542 """return the branch name and open/close state of a revision
543
543
544 This function exists because creating a changectx object
544 This function exists because creating a changectx object
545 just to access this is costly."""
545 just to access this is costly."""
546 extra = self.read(rev)[5]
546 extra = self.read(rev)[5]
547 return encoding.tolocal(extra.get("branch")), 'close' in extra
547 return encoding.tolocal(extra.get("branch")), 'close' in extra
548
549 def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
550 # overlay over the standard revlog._addrevision to track the new
551 # revision on the transaction.
552 rev = len(self)
553 node = super(changelog, self)._addrevision(node, rawtext, transaction,
554 *args, **kwargs)
555 revs = transaction.changes.get('revs')
556 if revs is not None:
557 if revs:
558 assert revs[-1] + 1 == rev
559 revs = pycompat.membershiprange(revs[0], rev + 1)
560 else:
561 revs = pycompat.membershiprange(rev, rev + 1)
562 transaction.changes['revs'] = revs
563 return node
@@ -1,2435 +1,2435 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 'internal-phase'
397 'internal-phase'
398 }
398 }
399 openerreqs = {
399 openerreqs = {
400 'revlogv1',
400 'revlogv1',
401 'generaldelta',
401 'generaldelta',
402 'treemanifest',
402 'treemanifest',
403 }
403 }
404
404
405 # list of prefix for file which can be written without 'wlock'
405 # list of prefix for file which can be written without 'wlock'
406 # Extensions should extend this list when needed
406 # Extensions should extend this list when needed
407 _wlockfreeprefix = {
407 _wlockfreeprefix = {
408 # We migh consider requiring 'wlock' for the next
408 # We migh consider requiring 'wlock' for the next
409 # two, but pretty much all the existing code assume
409 # two, but pretty much all the existing code assume
410 # wlock is not needed so we keep them excluded for
410 # wlock is not needed so we keep them excluded for
411 # now.
411 # now.
412 'hgrc',
412 'hgrc',
413 'requires',
413 'requires',
414 # XXX cache is a complicatged business someone
414 # XXX cache is a complicatged business someone
415 # should investigate this in depth at some point
415 # should investigate this in depth at some point
416 'cache/',
416 'cache/',
417 # XXX shouldn't be dirstate covered by the wlock?
417 # XXX shouldn't be dirstate covered by the wlock?
418 'dirstate',
418 'dirstate',
419 # XXX bisect was still a bit too messy at the time
419 # XXX bisect was still a bit too messy at the time
420 # this changeset was introduced. Someone should fix
420 # this changeset was introduced. Someone should fix
421 # the remainig bit and drop this line
421 # the remainig bit and drop this line
422 'bisect.state',
422 'bisect.state',
423 }
423 }
424
424
425 def __init__(self, baseui, path, create=False, intents=None):
425 def __init__(self, baseui, path, create=False, intents=None):
426 self.requirements = set()
426 self.requirements = set()
427 self.filtername = None
427 self.filtername = None
428 # wvfs: rooted at the repository root, used to access the working copy
428 # wvfs: rooted at the repository root, used to access the working copy
429 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
430 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 # vfs: rooted at .hg, used to access repo files outside of .hg/store
431 self.vfs = None
431 self.vfs = None
432 # svfs: usually rooted at .hg/store, used to access repository history
432 # svfs: usually rooted at .hg/store, used to access repository history
433 # If this is a shared repository, this vfs may point to another
433 # If this is a shared repository, this vfs may point to another
434 # repository's .hg/store directory.
434 # repository's .hg/store directory.
435 self.svfs = None
435 self.svfs = None
436 self.root = self.wvfs.base
436 self.root = self.wvfs.base
437 self.path = self.wvfs.join(".hg")
437 self.path = self.wvfs.join(".hg")
438 self.origroot = path
438 self.origroot = path
439 # This is only used by context.workingctx.match in order to
439 # This is only used by context.workingctx.match in order to
440 # detect files in subrepos.
440 # detect files in subrepos.
441 self.auditor = pathutil.pathauditor(
441 self.auditor = pathutil.pathauditor(
442 self.root, callback=self._checknested)
442 self.root, callback=self._checknested)
443 # This is only used by context.basectx.match in order to detect
443 # This is only used by context.basectx.match in order to detect
444 # files in subrepos.
444 # files in subrepos.
445 self.nofsauditor = pathutil.pathauditor(
445 self.nofsauditor = pathutil.pathauditor(
446 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.root, callback=self._checknested, realfs=False, cached=True)
447 self.baseui = baseui
447 self.baseui = baseui
448 self.ui = baseui.copy()
448 self.ui = baseui.copy()
449 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.ui.copy = baseui.copy # prevent copying repo configuration
450 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
451 if (self.ui.configbool('devel', 'all-warnings') or
451 if (self.ui.configbool('devel', 'all-warnings') or
452 self.ui.configbool('devel', 'check-locks')):
452 self.ui.configbool('devel', 'check-locks')):
453 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 self.vfs.audit = self._getvfsward(self.vfs.audit)
454 # A list of callback to shape the phase if no data were found.
454 # A list of callback to shape the phase if no data were found.
455 # Callback are in the form: func(repo, roots) --> processed root.
455 # Callback are in the form: func(repo, roots) --> processed root.
456 # This list it to be filled by extension during repo setup
456 # This list it to be filled by extension during repo setup
457 self._phasedefaults = []
457 self._phasedefaults = []
458 try:
458 try:
459 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
460 self._loadextensions()
460 self._loadextensions()
461 except IOError:
461 except IOError:
462 pass
462 pass
463
463
464 if featuresetupfuncs:
464 if featuresetupfuncs:
465 self.supported = set(self._basesupported) # use private copy
465 self.supported = set(self._basesupported) # use private copy
466 extmods = set(m.__name__ for n, m
466 extmods = set(m.__name__ for n, m
467 in extensions.extensions(self.ui))
467 in extensions.extensions(self.ui))
468 for setupfunc in featuresetupfuncs:
468 for setupfunc in featuresetupfuncs:
469 if setupfunc.__module__ in extmods:
469 if setupfunc.__module__ in extmods:
470 setupfunc(self.ui, self.supported)
470 setupfunc(self.ui, self.supported)
471 else:
471 else:
472 self.supported = self._basesupported
472 self.supported = self._basesupported
473 color.setup(self.ui)
473 color.setup(self.ui)
474
474
475 # Add compression engines.
475 # Add compression engines.
476 for name in util.compengines:
476 for name in util.compengines:
477 engine = util.compengines[name]
477 engine = util.compengines[name]
478 if engine.revlogheader():
478 if engine.revlogheader():
479 self.supported.add('exp-compression-%s' % name)
479 self.supported.add('exp-compression-%s' % name)
480
480
481 if not self.vfs.isdir():
481 if not self.vfs.isdir():
482 if create:
482 if create:
483 self.requirements = newreporequirements(self)
483 self.requirements = newreporequirements(self)
484
484
485 if not self.wvfs.exists():
485 if not self.wvfs.exists():
486 self.wvfs.makedirs()
486 self.wvfs.makedirs()
487 self.vfs.makedir(notindexed=True)
487 self.vfs.makedir(notindexed=True)
488
488
489 if 'store' in self.requirements:
489 if 'store' in self.requirements:
490 self.vfs.mkdir("store")
490 self.vfs.mkdir("store")
491
491
492 # create an invalid changelog
492 # create an invalid changelog
493 self.vfs.append(
493 self.vfs.append(
494 "00changelog.i",
494 "00changelog.i",
495 '\0\0\0\2' # represents revlogv2
495 '\0\0\0\2' # represents revlogv2
496 ' dummy changelog to prevent using the old repo layout'
496 ' dummy changelog to prevent using the old repo layout'
497 )
497 )
498 else:
498 else:
499 try:
499 try:
500 self.vfs.stat()
500 self.vfs.stat()
501 except OSError as inst:
501 except OSError as inst:
502 if inst.errno != errno.ENOENT:
502 if inst.errno != errno.ENOENT:
503 raise
503 raise
504 raise error.RepoError(_("repository %s not found") % path)
504 raise error.RepoError(_("repository %s not found") % path)
505 elif create:
505 elif create:
506 raise error.RepoError(_("repository %s already exists") % path)
506 raise error.RepoError(_("repository %s already exists") % path)
507 else:
507 else:
508 try:
508 try:
509 self.requirements = scmutil.readrequires(
509 self.requirements = scmutil.readrequires(
510 self.vfs, self.supported)
510 self.vfs, self.supported)
511 except IOError as inst:
511 except IOError as inst:
512 if inst.errno != errno.ENOENT:
512 if inst.errno != errno.ENOENT:
513 raise
513 raise
514
514
515 cachepath = self.vfs.join('cache')
515 cachepath = self.vfs.join('cache')
516 self.sharedpath = self.path
516 self.sharedpath = self.path
517 try:
517 try:
518 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
518 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
519 if 'relshared' in self.requirements:
519 if 'relshared' in self.requirements:
520 sharedpath = self.vfs.join(sharedpath)
520 sharedpath = self.vfs.join(sharedpath)
521 vfs = vfsmod.vfs(sharedpath, realpath=True)
521 vfs = vfsmod.vfs(sharedpath, realpath=True)
522 cachepath = vfs.join('cache')
522 cachepath = vfs.join('cache')
523 s = vfs.base
523 s = vfs.base
524 if not vfs.exists():
524 if not vfs.exists():
525 raise error.RepoError(
525 raise error.RepoError(
526 _('.hg/sharedpath points to nonexistent directory %s') % s)
526 _('.hg/sharedpath points to nonexistent directory %s') % s)
527 self.sharedpath = s
527 self.sharedpath = s
528 except IOError as inst:
528 except IOError as inst:
529 if inst.errno != errno.ENOENT:
529 if inst.errno != errno.ENOENT:
530 raise
530 raise
531
531
532 if 'exp-sparse' in self.requirements and not sparse.enabled:
532 if 'exp-sparse' in self.requirements and not sparse.enabled:
533 raise error.RepoError(_('repository is using sparse feature but '
533 raise error.RepoError(_('repository is using sparse feature but '
534 'sparse is not enabled; enable the '
534 'sparse is not enabled; enable the '
535 '"sparse" extensions to access'))
535 '"sparse" extensions to access'))
536
536
537 self.store = store.store(
537 self.store = store.store(
538 self.requirements, self.sharedpath,
538 self.requirements, self.sharedpath,
539 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 lambda base: vfsmod.vfs(base, cacheaudited=True))
540 self.spath = self.store.path
540 self.spath = self.store.path
541 self.svfs = self.store.vfs
541 self.svfs = self.store.vfs
542 self.sjoin = self.store.join
542 self.sjoin = self.store.join
543 self.vfs.createmode = self.store.createmode
543 self.vfs.createmode = self.store.createmode
544 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 self.cachevfs.createmode = self.store.createmode
545 self.cachevfs.createmode = self.store.createmode
546 if (self.ui.configbool('devel', 'all-warnings') or
546 if (self.ui.configbool('devel', 'all-warnings') or
547 self.ui.configbool('devel', 'check-locks')):
547 self.ui.configbool('devel', 'check-locks')):
548 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
548 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
549 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
549 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
550 else: # standard vfs
550 else: # standard vfs
551 self.svfs.audit = self._getsvfsward(self.svfs.audit)
551 self.svfs.audit = self._getsvfsward(self.svfs.audit)
552 self._applyopenerreqs()
552 self._applyopenerreqs()
553 if create:
553 if create:
554 self._writerequirements()
554 self._writerequirements()
555
555
556 self._dirstatevalidatewarned = False
556 self._dirstatevalidatewarned = False
557
557
558 self._branchcaches = {}
558 self._branchcaches = {}
559 self._revbranchcache = None
559 self._revbranchcache = None
560 self._filterpats = {}
560 self._filterpats = {}
561 self._datafilters = {}
561 self._datafilters = {}
562 self._transref = self._lockref = self._wlockref = None
562 self._transref = self._lockref = self._wlockref = None
563
563
564 # A cache for various files under .hg/ that tracks file changes,
564 # A cache for various files under .hg/ that tracks file changes,
565 # (used by the filecache decorator)
565 # (used by the filecache decorator)
566 #
566 #
567 # Maps a property name to its util.filecacheentry
567 # Maps a property name to its util.filecacheentry
568 self._filecache = {}
568 self._filecache = {}
569
569
570 # hold sets of revision to be filtered
570 # hold sets of revision to be filtered
571 # should be cleared when something might have changed the filter value:
571 # should be cleared when something might have changed the filter value:
572 # - new changesets,
572 # - new changesets,
573 # - phase change,
573 # - phase change,
574 # - new obsolescence marker,
574 # - new obsolescence marker,
575 # - working directory parent change,
575 # - working directory parent change,
576 # - bookmark changes
576 # - bookmark changes
577 self.filteredrevcache = {}
577 self.filteredrevcache = {}
578
578
579 # post-dirstate-status hooks
579 # post-dirstate-status hooks
580 self._postdsstatus = []
580 self._postdsstatus = []
581
581
582 # generic mapping between names and nodes
582 # generic mapping between names and nodes
583 self.names = namespaces.namespaces()
583 self.names = namespaces.namespaces()
584
584
585 # Key to signature value.
585 # Key to signature value.
586 self._sparsesignaturecache = {}
586 self._sparsesignaturecache = {}
587 # Signature to cached matcher instance.
587 # Signature to cached matcher instance.
588 self._sparsematchercache = {}
588 self._sparsematchercache = {}
589
589
590 def _getvfsward(self, origfunc):
590 def _getvfsward(self, origfunc):
591 """build a ward for self.vfs"""
591 """build a ward for self.vfs"""
592 rref = weakref.ref(self)
592 rref = weakref.ref(self)
593 def checkvfs(path, mode=None):
593 def checkvfs(path, mode=None):
594 ret = origfunc(path, mode=mode)
594 ret = origfunc(path, mode=mode)
595 repo = rref()
595 repo = rref()
596 if (repo is None
596 if (repo is None
597 or not util.safehasattr(repo, '_wlockref')
597 or not util.safehasattr(repo, '_wlockref')
598 or not util.safehasattr(repo, '_lockref')):
598 or not util.safehasattr(repo, '_lockref')):
599 return
599 return
600 if mode in (None, 'r', 'rb'):
600 if mode in (None, 'r', 'rb'):
601 return
601 return
602 if path.startswith(repo.path):
602 if path.startswith(repo.path):
603 # truncate name relative to the repository (.hg)
603 # truncate name relative to the repository (.hg)
604 path = path[len(repo.path) + 1:]
604 path = path[len(repo.path) + 1:]
605 if path.startswith('cache/'):
605 if path.startswith('cache/'):
606 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
606 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
607 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
607 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
608 if path.startswith('journal.'):
608 if path.startswith('journal.'):
609 # journal is covered by 'lock'
609 # journal is covered by 'lock'
610 if repo._currentlock(repo._lockref) is None:
610 if repo._currentlock(repo._lockref) is None:
611 repo.ui.develwarn('write with no lock: "%s"' % path,
611 repo.ui.develwarn('write with no lock: "%s"' % path,
612 stacklevel=2, config='check-locks')
612 stacklevel=2, config='check-locks')
613 elif repo._currentlock(repo._wlockref) is None:
613 elif repo._currentlock(repo._wlockref) is None:
614 # rest of vfs files are covered by 'wlock'
614 # rest of vfs files are covered by 'wlock'
615 #
615 #
616 # exclude special files
616 # exclude special files
617 for prefix in self._wlockfreeprefix:
617 for prefix in self._wlockfreeprefix:
618 if path.startswith(prefix):
618 if path.startswith(prefix):
619 return
619 return
620 repo.ui.develwarn('write with no wlock: "%s"' % path,
620 repo.ui.develwarn('write with no wlock: "%s"' % path,
621 stacklevel=2, config='check-locks')
621 stacklevel=2, config='check-locks')
622 return ret
622 return ret
623 return checkvfs
623 return checkvfs
624
624
625 def _getsvfsward(self, origfunc):
625 def _getsvfsward(self, origfunc):
626 """build a ward for self.svfs"""
626 """build a ward for self.svfs"""
627 rref = weakref.ref(self)
627 rref = weakref.ref(self)
628 def checksvfs(path, mode=None):
628 def checksvfs(path, mode=None):
629 ret = origfunc(path, mode=mode)
629 ret = origfunc(path, mode=mode)
630 repo = rref()
630 repo = rref()
631 if repo is None or not util.safehasattr(repo, '_lockref'):
631 if repo is None or not util.safehasattr(repo, '_lockref'):
632 return
632 return
633 if mode in (None, 'r', 'rb'):
633 if mode in (None, 'r', 'rb'):
634 return
634 return
635 if path.startswith(repo.sharedpath):
635 if path.startswith(repo.sharedpath):
636 # truncate name relative to the repository (.hg)
636 # truncate name relative to the repository (.hg)
637 path = path[len(repo.sharedpath) + 1:]
637 path = path[len(repo.sharedpath) + 1:]
638 if repo._currentlock(repo._lockref) is None:
638 if repo._currentlock(repo._lockref) is None:
639 repo.ui.develwarn('write with no lock: "%s"' % path,
639 repo.ui.develwarn('write with no lock: "%s"' % path,
640 stacklevel=3)
640 stacklevel=3)
641 return ret
641 return ret
642 return checksvfs
642 return checksvfs
643
643
644 def close(self):
644 def close(self):
645 self._writecaches()
645 self._writecaches()
646
646
647 def _loadextensions(self):
647 def _loadextensions(self):
648 extensions.loadall(self.ui)
648 extensions.loadall(self.ui)
649
649
650 def _writecaches(self):
650 def _writecaches(self):
651 if self._revbranchcache:
651 if self._revbranchcache:
652 self._revbranchcache.write()
652 self._revbranchcache.write()
653
653
654 def _restrictcapabilities(self, caps):
654 def _restrictcapabilities(self, caps):
655 if self.ui.configbool('experimental', 'bundle2-advertise'):
655 if self.ui.configbool('experimental', 'bundle2-advertise'):
656 caps = set(caps)
656 caps = set(caps)
657 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
657 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
658 role='client'))
658 role='client'))
659 caps.add('bundle2=' + urlreq.quote(capsblob))
659 caps.add('bundle2=' + urlreq.quote(capsblob))
660 return caps
660 return caps
661
661
662 def _applyopenerreqs(self):
662 def _applyopenerreqs(self):
663 self.svfs.options = dict((r, 1) for r in self.requirements
663 self.svfs.options = dict((r, 1) for r in self.requirements
664 if r in self.openerreqs)
664 if r in self.openerreqs)
665 # experimental config: format.chunkcachesize
665 # experimental config: format.chunkcachesize
666 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
666 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
667 if chunkcachesize is not None:
667 if chunkcachesize is not None:
668 self.svfs.options['chunkcachesize'] = chunkcachesize
668 self.svfs.options['chunkcachesize'] = chunkcachesize
669 # experimental config: format.maxchainlen
669 # experimental config: format.maxchainlen
670 maxchainlen = self.ui.configint('format', 'maxchainlen')
670 maxchainlen = self.ui.configint('format', 'maxchainlen')
671 if maxchainlen is not None:
671 if maxchainlen is not None:
672 self.svfs.options['maxchainlen'] = maxchainlen
672 self.svfs.options['maxchainlen'] = maxchainlen
673 # experimental config: format.manifestcachesize
673 # experimental config: format.manifestcachesize
674 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
674 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
675 if manifestcachesize is not None:
675 if manifestcachesize is not None:
676 self.svfs.options['manifestcachesize'] = manifestcachesize
676 self.svfs.options['manifestcachesize'] = manifestcachesize
677 deltabothparents = self.ui.configbool('storage',
677 deltabothparents = self.ui.configbool('storage',
678 'revlog.optimize-delta-parent-choice')
678 'revlog.optimize-delta-parent-choice')
679 self.svfs.options['deltabothparents'] = deltabothparents
679 self.svfs.options['deltabothparents'] = deltabothparents
680 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
680 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
681 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
681 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
682 if 0 <= chainspan:
682 if 0 <= chainspan:
683 self.svfs.options['maxdeltachainspan'] = chainspan
683 self.svfs.options['maxdeltachainspan'] = chainspan
684 mmapindexthreshold = self.ui.configbytes('experimental',
684 mmapindexthreshold = self.ui.configbytes('experimental',
685 'mmapindexthreshold')
685 'mmapindexthreshold')
686 if mmapindexthreshold is not None:
686 if mmapindexthreshold is not None:
687 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
687 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
688 withsparseread = self.ui.configbool('experimental', 'sparse-read')
688 withsparseread = self.ui.configbool('experimental', 'sparse-read')
689 srdensitythres = float(self.ui.config('experimental',
689 srdensitythres = float(self.ui.config('experimental',
690 'sparse-read.density-threshold'))
690 'sparse-read.density-threshold'))
691 srmingapsize = self.ui.configbytes('experimental',
691 srmingapsize = self.ui.configbytes('experimental',
692 'sparse-read.min-gap-size')
692 'sparse-read.min-gap-size')
693 self.svfs.options['with-sparse-read'] = withsparseread
693 self.svfs.options['with-sparse-read'] = withsparseread
694 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
694 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
695 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
695 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
696 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
696 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
697 self.svfs.options['sparse-revlog'] = sparserevlog
697 self.svfs.options['sparse-revlog'] = sparserevlog
698 if sparserevlog:
698 if sparserevlog:
699 self.svfs.options['generaldelta'] = True
699 self.svfs.options['generaldelta'] = True
700
700
701 for r in self.requirements:
701 for r in self.requirements:
702 if r.startswith('exp-compression-'):
702 if r.startswith('exp-compression-'):
703 self.svfs.options['compengine'] = r[len('exp-compression-'):]
703 self.svfs.options['compengine'] = r[len('exp-compression-'):]
704
704
705 # TODO move "revlogv2" to openerreqs once finalized.
705 # TODO move "revlogv2" to openerreqs once finalized.
706 if REVLOGV2_REQUIREMENT in self.requirements:
706 if REVLOGV2_REQUIREMENT in self.requirements:
707 self.svfs.options['revlogv2'] = True
707 self.svfs.options['revlogv2'] = True
708
708
709 def _writerequirements(self):
709 def _writerequirements(self):
710 scmutil.writerequires(self.vfs, self.requirements)
710 scmutil.writerequires(self.vfs, self.requirements)
711
711
712 def _checknested(self, path):
712 def _checknested(self, path):
713 """Determine if path is a legal nested repository."""
713 """Determine if path is a legal nested repository."""
714 if not path.startswith(self.root):
714 if not path.startswith(self.root):
715 return False
715 return False
716 subpath = path[len(self.root) + 1:]
716 subpath = path[len(self.root) + 1:]
717 normsubpath = util.pconvert(subpath)
717 normsubpath = util.pconvert(subpath)
718
718
719 # XXX: Checking against the current working copy is wrong in
719 # XXX: Checking against the current working copy is wrong in
720 # the sense that it can reject things like
720 # the sense that it can reject things like
721 #
721 #
722 # $ hg cat -r 10 sub/x.txt
722 # $ hg cat -r 10 sub/x.txt
723 #
723 #
724 # if sub/ is no longer a subrepository in the working copy
724 # if sub/ is no longer a subrepository in the working copy
725 # parent revision.
725 # parent revision.
726 #
726 #
727 # However, it can of course also allow things that would have
727 # However, it can of course also allow things that would have
728 # been rejected before, such as the above cat command if sub/
728 # been rejected before, such as the above cat command if sub/
729 # is a subrepository now, but was a normal directory before.
729 # is a subrepository now, but was a normal directory before.
730 # The old path auditor would have rejected by mistake since it
730 # The old path auditor would have rejected by mistake since it
731 # panics when it sees sub/.hg/.
731 # panics when it sees sub/.hg/.
732 #
732 #
733 # All in all, checking against the working copy seems sensible
733 # All in all, checking against the working copy seems sensible
734 # since we want to prevent access to nested repositories on
734 # since we want to prevent access to nested repositories on
735 # the filesystem *now*.
735 # the filesystem *now*.
736 ctx = self[None]
736 ctx = self[None]
737 parts = util.splitpath(subpath)
737 parts = util.splitpath(subpath)
738 while parts:
738 while parts:
739 prefix = '/'.join(parts)
739 prefix = '/'.join(parts)
740 if prefix in ctx.substate:
740 if prefix in ctx.substate:
741 if prefix == normsubpath:
741 if prefix == normsubpath:
742 return True
742 return True
743 else:
743 else:
744 sub = ctx.sub(prefix)
744 sub = ctx.sub(prefix)
745 return sub.checknested(subpath[len(prefix) + 1:])
745 return sub.checknested(subpath[len(prefix) + 1:])
746 else:
746 else:
747 parts.pop()
747 parts.pop()
748 return False
748 return False
749
749
750 def peer(self):
750 def peer(self):
751 return localpeer(self) # not cached to avoid reference cycle
751 return localpeer(self) # not cached to avoid reference cycle
752
752
753 def unfiltered(self):
753 def unfiltered(self):
754 """Return unfiltered version of the repository
754 """Return unfiltered version of the repository
755
755
756 Intended to be overwritten by filtered repo."""
756 Intended to be overwritten by filtered repo."""
757 return self
757 return self
758
758
759 def filtered(self, name, visibilityexceptions=None):
759 def filtered(self, name, visibilityexceptions=None):
760 """Return a filtered version of a repository"""
760 """Return a filtered version of a repository"""
761 cls = repoview.newtype(self.unfiltered().__class__)
761 cls = repoview.newtype(self.unfiltered().__class__)
762 return cls(self, name, visibilityexceptions)
762 return cls(self, name, visibilityexceptions)
763
763
764 @repofilecache('bookmarks', 'bookmarks.current')
764 @repofilecache('bookmarks', 'bookmarks.current')
765 def _bookmarks(self):
765 def _bookmarks(self):
766 return bookmarks.bmstore(self)
766 return bookmarks.bmstore(self)
767
767
768 @property
768 @property
769 def _activebookmark(self):
769 def _activebookmark(self):
770 return self._bookmarks.active
770 return self._bookmarks.active
771
771
772 # _phasesets depend on changelog. what we need is to call
772 # _phasesets depend on changelog. what we need is to call
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 # can't be easily expressed in filecache mechanism.
774 # can't be easily expressed in filecache mechanism.
775 @storecache('phaseroots', '00changelog.i')
775 @storecache('phaseroots', '00changelog.i')
776 def _phasecache(self):
776 def _phasecache(self):
777 return phases.phasecache(self, self._phasedefaults)
777 return phases.phasecache(self, self._phasedefaults)
778
778
779 @storecache('obsstore')
779 @storecache('obsstore')
780 def obsstore(self):
780 def obsstore(self):
781 return obsolete.makestore(self.ui, self)
781 return obsolete.makestore(self.ui, self)
782
782
783 @storecache('00changelog.i')
783 @storecache('00changelog.i')
784 def changelog(self):
784 def changelog(self):
785 return changelog.changelog(self.svfs,
785 return changelog.changelog(self.svfs,
786 trypending=txnutil.mayhavepending(self.root))
786 trypending=txnutil.mayhavepending(self.root))
787
787
788 def _constructmanifest(self):
788 def _constructmanifest(self):
789 # This is a temporary function while we migrate from manifest to
789 # This is a temporary function while we migrate from manifest to
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 # manifest creation.
791 # manifest creation.
792 return manifest.manifestrevlog(self.svfs)
792 return manifest.manifestrevlog(self.svfs)
793
793
794 @storecache('00manifest.i')
794 @storecache('00manifest.i')
795 def manifestlog(self):
795 def manifestlog(self):
796 return manifest.manifestlog(self.svfs, self)
796 return manifest.manifestlog(self.svfs, self)
797
797
798 @repofilecache('dirstate')
798 @repofilecache('dirstate')
799 def dirstate(self):
799 def dirstate(self):
800 return self._makedirstate()
800 return self._makedirstate()
801
801
802 def _makedirstate(self):
802 def _makedirstate(self):
803 """Extension point for wrapping the dirstate per-repo."""
803 """Extension point for wrapping the dirstate per-repo."""
804 sparsematchfn = lambda: sparse.matcher(self)
804 sparsematchfn = lambda: sparse.matcher(self)
805
805
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 self._dirstatevalidate, sparsematchfn)
807 self._dirstatevalidate, sparsematchfn)
808
808
809 def _dirstatevalidate(self, node):
809 def _dirstatevalidate(self, node):
810 try:
810 try:
811 self.changelog.rev(node)
811 self.changelog.rev(node)
812 return node
812 return node
813 except error.LookupError:
813 except error.LookupError:
814 if not self._dirstatevalidatewarned:
814 if not self._dirstatevalidatewarned:
815 self._dirstatevalidatewarned = True
815 self._dirstatevalidatewarned = True
816 self.ui.warn(_("warning: ignoring unknown"
816 self.ui.warn(_("warning: ignoring unknown"
817 " working parent %s!\n") % short(node))
817 " working parent %s!\n") % short(node))
818 return nullid
818 return nullid
819
819
820 @storecache(narrowspec.FILENAME)
820 @storecache(narrowspec.FILENAME)
821 def narrowpats(self):
821 def narrowpats(self):
822 """matcher patterns for this repository's narrowspec
822 """matcher patterns for this repository's narrowspec
823
823
824 A tuple of (includes, excludes).
824 A tuple of (includes, excludes).
825 """
825 """
826 source = self
826 source = self
827 if self.shared():
827 if self.shared():
828 from . import hg
828 from . import hg
829 source = hg.sharedreposource(self)
829 source = hg.sharedreposource(self)
830 return narrowspec.load(source)
830 return narrowspec.load(source)
831
831
832 @storecache(narrowspec.FILENAME)
832 @storecache(narrowspec.FILENAME)
833 def _narrowmatch(self):
833 def _narrowmatch(self):
834 if repository.NARROW_REQUIREMENT not in self.requirements:
834 if repository.NARROW_REQUIREMENT not in self.requirements:
835 return matchmod.always(self.root, '')
835 return matchmod.always(self.root, '')
836 include, exclude = self.narrowpats
836 include, exclude = self.narrowpats
837 return narrowspec.match(self.root, include=include, exclude=exclude)
837 return narrowspec.match(self.root, include=include, exclude=exclude)
838
838
839 # TODO(martinvonz): make this property-like instead?
839 # TODO(martinvonz): make this property-like instead?
840 def narrowmatch(self):
840 def narrowmatch(self):
841 return self._narrowmatch
841 return self._narrowmatch
842
842
843 def setnarrowpats(self, newincludes, newexcludes):
843 def setnarrowpats(self, newincludes, newexcludes):
844 target = self
844 target = self
845 if self.shared():
845 if self.shared():
846 from . import hg
846 from . import hg
847 target = hg.sharedreposource(self)
847 target = hg.sharedreposource(self)
848 narrowspec.save(target, newincludes, newexcludes)
848 narrowspec.save(target, newincludes, newexcludes)
849 self.invalidate(clearfilecache=True)
849 self.invalidate(clearfilecache=True)
850
850
851 def __getitem__(self, changeid):
851 def __getitem__(self, changeid):
852 if changeid is None:
852 if changeid is None:
853 return context.workingctx(self)
853 return context.workingctx(self)
854 if isinstance(changeid, context.basectx):
854 if isinstance(changeid, context.basectx):
855 return changeid
855 return changeid
856 if isinstance(changeid, slice):
856 if isinstance(changeid, slice):
857 # wdirrev isn't contiguous so the slice shouldn't include it
857 # wdirrev isn't contiguous so the slice shouldn't include it
858 return [context.changectx(self, i)
858 return [context.changectx(self, i)
859 for i in pycompat.xrange(*changeid.indices(len(self)))
859 for i in pycompat.xrange(*changeid.indices(len(self)))
860 if i not in self.changelog.filteredrevs]
860 if i not in self.changelog.filteredrevs]
861 try:
861 try:
862 return context.changectx(self, changeid)
862 return context.changectx(self, changeid)
863 except error.WdirUnsupported:
863 except error.WdirUnsupported:
864 return context.workingctx(self)
864 return context.workingctx(self)
865
865
866 def __contains__(self, changeid):
866 def __contains__(self, changeid):
867 """True if the given changeid exists
867 """True if the given changeid exists
868
868
869 error.AmbiguousPrefixLookupError is raised if an ambiguous node
869 error.AmbiguousPrefixLookupError is raised if an ambiguous node
870 specified.
870 specified.
871 """
871 """
872 try:
872 try:
873 self[changeid]
873 self[changeid]
874 return True
874 return True
875 except error.RepoLookupError:
875 except error.RepoLookupError:
876 return False
876 return False
877
877
878 def __nonzero__(self):
878 def __nonzero__(self):
879 return True
879 return True
880
880
881 __bool__ = __nonzero__
881 __bool__ = __nonzero__
882
882
883 def __len__(self):
883 def __len__(self):
884 # no need to pay the cost of repoview.changelog
884 # no need to pay the cost of repoview.changelog
885 unfi = self.unfiltered()
885 unfi = self.unfiltered()
886 return len(unfi.changelog)
886 return len(unfi.changelog)
887
887
888 def __iter__(self):
888 def __iter__(self):
889 return iter(self.changelog)
889 return iter(self.changelog)
890
890
891 def revs(self, expr, *args):
891 def revs(self, expr, *args):
892 '''Find revisions matching a revset.
892 '''Find revisions matching a revset.
893
893
894 The revset is specified as a string ``expr`` that may contain
894 The revset is specified as a string ``expr`` that may contain
895 %-formatting to escape certain types. See ``revsetlang.formatspec``.
895 %-formatting to escape certain types. See ``revsetlang.formatspec``.
896
896
897 Revset aliases from the configuration are not expanded. To expand
897 Revset aliases from the configuration are not expanded. To expand
898 user aliases, consider calling ``scmutil.revrange()`` or
898 user aliases, consider calling ``scmutil.revrange()`` or
899 ``repo.anyrevs([expr], user=True)``.
899 ``repo.anyrevs([expr], user=True)``.
900
900
901 Returns a revset.abstractsmartset, which is a list-like interface
901 Returns a revset.abstractsmartset, which is a list-like interface
902 that contains integer revisions.
902 that contains integer revisions.
903 '''
903 '''
904 expr = revsetlang.formatspec(expr, *args)
904 expr = revsetlang.formatspec(expr, *args)
905 m = revset.match(None, expr)
905 m = revset.match(None, expr)
906 return m(self)
906 return m(self)
907
907
908 def set(self, expr, *args):
908 def set(self, expr, *args):
909 '''Find revisions matching a revset and emit changectx instances.
909 '''Find revisions matching a revset and emit changectx instances.
910
910
911 This is a convenience wrapper around ``revs()`` that iterates the
911 This is a convenience wrapper around ``revs()`` that iterates the
912 result and is a generator of changectx instances.
912 result and is a generator of changectx instances.
913
913
914 Revset aliases from the configuration are not expanded. To expand
914 Revset aliases from the configuration are not expanded. To expand
915 user aliases, consider calling ``scmutil.revrange()``.
915 user aliases, consider calling ``scmutil.revrange()``.
916 '''
916 '''
917 for r in self.revs(expr, *args):
917 for r in self.revs(expr, *args):
918 yield self[r]
918 yield self[r]
919
919
920 def anyrevs(self, specs, user=False, localalias=None):
920 def anyrevs(self, specs, user=False, localalias=None):
921 '''Find revisions matching one of the given revsets.
921 '''Find revisions matching one of the given revsets.
922
922
923 Revset aliases from the configuration are not expanded by default. To
923 Revset aliases from the configuration are not expanded by default. To
924 expand user aliases, specify ``user=True``. To provide some local
924 expand user aliases, specify ``user=True``. To provide some local
925 definitions overriding user aliases, set ``localalias`` to
925 definitions overriding user aliases, set ``localalias`` to
926 ``{name: definitionstring}``.
926 ``{name: definitionstring}``.
927 '''
927 '''
928 if user:
928 if user:
929 m = revset.matchany(self.ui, specs,
929 m = revset.matchany(self.ui, specs,
930 lookup=revset.lookupfn(self),
930 lookup=revset.lookupfn(self),
931 localalias=localalias)
931 localalias=localalias)
932 else:
932 else:
933 m = revset.matchany(None, specs, localalias=localalias)
933 m = revset.matchany(None, specs, localalias=localalias)
934 return m(self)
934 return m(self)
935
935
936 def url(self):
936 def url(self):
937 return 'file:' + self.root
937 return 'file:' + self.root
938
938
939 def hook(self, name, throw=False, **args):
939 def hook(self, name, throw=False, **args):
940 """Call a hook, passing this repo instance.
940 """Call a hook, passing this repo instance.
941
941
942 This a convenience method to aid invoking hooks. Extensions likely
942 This a convenience method to aid invoking hooks. Extensions likely
943 won't call this unless they have registered a custom hook or are
943 won't call this unless they have registered a custom hook or are
944 replacing code that is expected to call a hook.
944 replacing code that is expected to call a hook.
945 """
945 """
946 return hook.hook(self.ui, self, name, throw, **args)
946 return hook.hook(self.ui, self, name, throw, **args)
947
947
948 @filteredpropertycache
948 @filteredpropertycache
949 def _tagscache(self):
949 def _tagscache(self):
950 '''Returns a tagscache object that contains various tags related
950 '''Returns a tagscache object that contains various tags related
951 caches.'''
951 caches.'''
952
952
953 # This simplifies its cache management by having one decorated
953 # This simplifies its cache management by having one decorated
954 # function (this one) and the rest simply fetch things from it.
954 # function (this one) and the rest simply fetch things from it.
955 class tagscache(object):
955 class tagscache(object):
956 def __init__(self):
956 def __init__(self):
957 # These two define the set of tags for this repository. tags
957 # These two define the set of tags for this repository. tags
958 # maps tag name to node; tagtypes maps tag name to 'global' or
958 # maps tag name to node; tagtypes maps tag name to 'global' or
959 # 'local'. (Global tags are defined by .hgtags across all
959 # 'local'. (Global tags are defined by .hgtags across all
960 # heads, and local tags are defined in .hg/localtags.)
960 # heads, and local tags are defined in .hg/localtags.)
961 # They constitute the in-memory cache of tags.
961 # They constitute the in-memory cache of tags.
962 self.tags = self.tagtypes = None
962 self.tags = self.tagtypes = None
963
963
964 self.nodetagscache = self.tagslist = None
964 self.nodetagscache = self.tagslist = None
965
965
966 cache = tagscache()
966 cache = tagscache()
967 cache.tags, cache.tagtypes = self._findtags()
967 cache.tags, cache.tagtypes = self._findtags()
968
968
969 return cache
969 return cache
970
970
971 def tags(self):
971 def tags(self):
972 '''return a mapping of tag to node'''
972 '''return a mapping of tag to node'''
973 t = {}
973 t = {}
974 if self.changelog.filteredrevs:
974 if self.changelog.filteredrevs:
975 tags, tt = self._findtags()
975 tags, tt = self._findtags()
976 else:
976 else:
977 tags = self._tagscache.tags
977 tags = self._tagscache.tags
978 for k, v in tags.iteritems():
978 for k, v in tags.iteritems():
979 try:
979 try:
980 # ignore tags to unknown nodes
980 # ignore tags to unknown nodes
981 self.changelog.rev(v)
981 self.changelog.rev(v)
982 t[k] = v
982 t[k] = v
983 except (error.LookupError, ValueError):
983 except (error.LookupError, ValueError):
984 pass
984 pass
985 return t
985 return t
986
986
987 def _findtags(self):
987 def _findtags(self):
988 '''Do the hard work of finding tags. Return a pair of dicts
988 '''Do the hard work of finding tags. Return a pair of dicts
989 (tags, tagtypes) where tags maps tag name to node, and tagtypes
989 (tags, tagtypes) where tags maps tag name to node, and tagtypes
990 maps tag name to a string like \'global\' or \'local\'.
990 maps tag name to a string like \'global\' or \'local\'.
991 Subclasses or extensions are free to add their own tags, but
991 Subclasses or extensions are free to add their own tags, but
992 should be aware that the returned dicts will be retained for the
992 should be aware that the returned dicts will be retained for the
993 duration of the localrepo object.'''
993 duration of the localrepo object.'''
994
994
995 # XXX what tagtype should subclasses/extensions use? Currently
995 # XXX what tagtype should subclasses/extensions use? Currently
996 # mq and bookmarks add tags, but do not set the tagtype at all.
996 # mq and bookmarks add tags, but do not set the tagtype at all.
997 # Should each extension invent its own tag type? Should there
997 # Should each extension invent its own tag type? Should there
998 # be one tagtype for all such "virtual" tags? Or is the status
998 # be one tagtype for all such "virtual" tags? Or is the status
999 # quo fine?
999 # quo fine?
1000
1000
1001
1001
1002 # map tag name to (node, hist)
1002 # map tag name to (node, hist)
1003 alltags = tagsmod.findglobaltags(self.ui, self)
1003 alltags = tagsmod.findglobaltags(self.ui, self)
1004 # map tag name to tag type
1004 # map tag name to tag type
1005 tagtypes = dict((tag, 'global') for tag in alltags)
1005 tagtypes = dict((tag, 'global') for tag in alltags)
1006
1006
1007 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1007 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1008
1008
1009 # Build the return dicts. Have to re-encode tag names because
1009 # Build the return dicts. Have to re-encode tag names because
1010 # the tags module always uses UTF-8 (in order not to lose info
1010 # the tags module always uses UTF-8 (in order not to lose info
1011 # writing to the cache), but the rest of Mercurial wants them in
1011 # writing to the cache), but the rest of Mercurial wants them in
1012 # local encoding.
1012 # local encoding.
1013 tags = {}
1013 tags = {}
1014 for (name, (node, hist)) in alltags.iteritems():
1014 for (name, (node, hist)) in alltags.iteritems():
1015 if node != nullid:
1015 if node != nullid:
1016 tags[encoding.tolocal(name)] = node
1016 tags[encoding.tolocal(name)] = node
1017 tags['tip'] = self.changelog.tip()
1017 tags['tip'] = self.changelog.tip()
1018 tagtypes = dict([(encoding.tolocal(name), value)
1018 tagtypes = dict([(encoding.tolocal(name), value)
1019 for (name, value) in tagtypes.iteritems()])
1019 for (name, value) in tagtypes.iteritems()])
1020 return (tags, tagtypes)
1020 return (tags, tagtypes)
1021
1021
1022 def tagtype(self, tagname):
1022 def tagtype(self, tagname):
1023 '''
1023 '''
1024 return the type of the given tag. result can be:
1024 return the type of the given tag. result can be:
1025
1025
1026 'local' : a local tag
1026 'local' : a local tag
1027 'global' : a global tag
1027 'global' : a global tag
1028 None : tag does not exist
1028 None : tag does not exist
1029 '''
1029 '''
1030
1030
1031 return self._tagscache.tagtypes.get(tagname)
1031 return self._tagscache.tagtypes.get(tagname)
1032
1032
1033 def tagslist(self):
1033 def tagslist(self):
1034 '''return a list of tags ordered by revision'''
1034 '''return a list of tags ordered by revision'''
1035 if not self._tagscache.tagslist:
1035 if not self._tagscache.tagslist:
1036 l = []
1036 l = []
1037 for t, n in self.tags().iteritems():
1037 for t, n in self.tags().iteritems():
1038 l.append((self.changelog.rev(n), t, n))
1038 l.append((self.changelog.rev(n), t, n))
1039 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1039 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1040
1040
1041 return self._tagscache.tagslist
1041 return self._tagscache.tagslist
1042
1042
1043 def nodetags(self, node):
1043 def nodetags(self, node):
1044 '''return the tags associated with a node'''
1044 '''return the tags associated with a node'''
1045 if not self._tagscache.nodetagscache:
1045 if not self._tagscache.nodetagscache:
1046 nodetagscache = {}
1046 nodetagscache = {}
1047 for t, n in self._tagscache.tags.iteritems():
1047 for t, n in self._tagscache.tags.iteritems():
1048 nodetagscache.setdefault(n, []).append(t)
1048 nodetagscache.setdefault(n, []).append(t)
1049 for tags in nodetagscache.itervalues():
1049 for tags in nodetagscache.itervalues():
1050 tags.sort()
1050 tags.sort()
1051 self._tagscache.nodetagscache = nodetagscache
1051 self._tagscache.nodetagscache = nodetagscache
1052 return self._tagscache.nodetagscache.get(node, [])
1052 return self._tagscache.nodetagscache.get(node, [])
1053
1053
1054 def nodebookmarks(self, node):
1054 def nodebookmarks(self, node):
1055 """return the list of bookmarks pointing to the specified node"""
1055 """return the list of bookmarks pointing to the specified node"""
1056 return self._bookmarks.names(node)
1056 return self._bookmarks.names(node)
1057
1057
1058 def branchmap(self):
1058 def branchmap(self):
1059 '''returns a dictionary {branch: [branchheads]} with branchheads
1059 '''returns a dictionary {branch: [branchheads]} with branchheads
1060 ordered by increasing revision number'''
1060 ordered by increasing revision number'''
1061 branchmap.updatecache(self)
1061 branchmap.updatecache(self)
1062 return self._branchcaches[self.filtername]
1062 return self._branchcaches[self.filtername]
1063
1063
1064 @unfilteredmethod
1064 @unfilteredmethod
1065 def revbranchcache(self):
1065 def revbranchcache(self):
1066 if not self._revbranchcache:
1066 if not self._revbranchcache:
1067 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1067 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1068 return self._revbranchcache
1068 return self._revbranchcache
1069
1069
1070 def branchtip(self, branch, ignoremissing=False):
1070 def branchtip(self, branch, ignoremissing=False):
1071 '''return the tip node for a given branch
1071 '''return the tip node for a given branch
1072
1072
1073 If ignoremissing is True, then this method will not raise an error.
1073 If ignoremissing is True, then this method will not raise an error.
1074 This is helpful for callers that only expect None for a missing branch
1074 This is helpful for callers that only expect None for a missing branch
1075 (e.g. namespace).
1075 (e.g. namespace).
1076
1076
1077 '''
1077 '''
1078 try:
1078 try:
1079 return self.branchmap().branchtip(branch)
1079 return self.branchmap().branchtip(branch)
1080 except KeyError:
1080 except KeyError:
1081 if not ignoremissing:
1081 if not ignoremissing:
1082 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1082 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1083 else:
1083 else:
1084 pass
1084 pass
1085
1085
1086 def lookup(self, key):
1086 def lookup(self, key):
1087 return scmutil.revsymbol(self, key).node()
1087 return scmutil.revsymbol(self, key).node()
1088
1088
1089 def lookupbranch(self, key):
1089 def lookupbranch(self, key):
1090 if key in self.branchmap():
1090 if key in self.branchmap():
1091 return key
1091 return key
1092
1092
1093 return scmutil.revsymbol(self, key).branch()
1093 return scmutil.revsymbol(self, key).branch()
1094
1094
1095 def known(self, nodes):
1095 def known(self, nodes):
1096 cl = self.changelog
1096 cl = self.changelog
1097 nm = cl.nodemap
1097 nm = cl.nodemap
1098 filtered = cl.filteredrevs
1098 filtered = cl.filteredrevs
1099 result = []
1099 result = []
1100 for n in nodes:
1100 for n in nodes:
1101 r = nm.get(n)
1101 r = nm.get(n)
1102 resp = not (r is None or r in filtered)
1102 resp = not (r is None or r in filtered)
1103 result.append(resp)
1103 result.append(resp)
1104 return result
1104 return result
1105
1105
1106 def local(self):
1106 def local(self):
1107 return self
1107 return self
1108
1108
1109 def publishing(self):
1109 def publishing(self):
1110 # it's safe (and desirable) to trust the publish flag unconditionally
1110 # it's safe (and desirable) to trust the publish flag unconditionally
1111 # so that we don't finalize changes shared between users via ssh or nfs
1111 # so that we don't finalize changes shared between users via ssh or nfs
1112 return self.ui.configbool('phases', 'publish', untrusted=True)
1112 return self.ui.configbool('phases', 'publish', untrusted=True)
1113
1113
1114 def cancopy(self):
1114 def cancopy(self):
1115 # so statichttprepo's override of local() works
1115 # so statichttprepo's override of local() works
1116 if not self.local():
1116 if not self.local():
1117 return False
1117 return False
1118 if not self.publishing():
1118 if not self.publishing():
1119 return True
1119 return True
1120 # if publishing we can't copy if there is filtered content
1120 # if publishing we can't copy if there is filtered content
1121 return not self.filtered('visible').changelog.filteredrevs
1121 return not self.filtered('visible').changelog.filteredrevs
1122
1122
1123 def shared(self):
1123 def shared(self):
1124 '''the type of shared repository (None if not shared)'''
1124 '''the type of shared repository (None if not shared)'''
1125 if self.sharedpath != self.path:
1125 if self.sharedpath != self.path:
1126 return 'store'
1126 return 'store'
1127 return None
1127 return None
1128
1128
1129 def wjoin(self, f, *insidef):
1129 def wjoin(self, f, *insidef):
1130 return self.vfs.reljoin(self.root, f, *insidef)
1130 return self.vfs.reljoin(self.root, f, *insidef)
1131
1131
1132 def file(self, f):
1132 def file(self, f):
1133 if f[0] == '/':
1133 if f[0] == '/':
1134 f = f[1:]
1134 f = f[1:]
1135 return filelog.filelog(self.svfs, f)
1135 return filelog.filelog(self.svfs, f)
1136
1136
1137 def setparents(self, p1, p2=nullid):
1137 def setparents(self, p1, p2=nullid):
1138 with self.dirstate.parentchange():
1138 with self.dirstate.parentchange():
1139 copies = self.dirstate.setparents(p1, p2)
1139 copies = self.dirstate.setparents(p1, p2)
1140 pctx = self[p1]
1140 pctx = self[p1]
1141 if copies:
1141 if copies:
1142 # Adjust copy records, the dirstate cannot do it, it
1142 # Adjust copy records, the dirstate cannot do it, it
1143 # requires access to parents manifests. Preserve them
1143 # requires access to parents manifests. Preserve them
1144 # only for entries added to first parent.
1144 # only for entries added to first parent.
1145 for f in copies:
1145 for f in copies:
1146 if f not in pctx and copies[f] in pctx:
1146 if f not in pctx and copies[f] in pctx:
1147 self.dirstate.copy(copies[f], f)
1147 self.dirstate.copy(copies[f], f)
1148 if p2 == nullid:
1148 if p2 == nullid:
1149 for f, s in sorted(self.dirstate.copies().items()):
1149 for f, s in sorted(self.dirstate.copies().items()):
1150 if f not in pctx and s not in pctx:
1150 if f not in pctx and s not in pctx:
1151 self.dirstate.copy(None, f)
1151 self.dirstate.copy(None, f)
1152
1152
1153 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1153 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1154 """changeid can be a changeset revision, node, or tag.
1154 """changeid can be a changeset revision, node, or tag.
1155 fileid can be a file revision or node."""
1155 fileid can be a file revision or node."""
1156 return context.filectx(self, path, changeid, fileid,
1156 return context.filectx(self, path, changeid, fileid,
1157 changectx=changectx)
1157 changectx=changectx)
1158
1158
1159 def getcwd(self):
1159 def getcwd(self):
1160 return self.dirstate.getcwd()
1160 return self.dirstate.getcwd()
1161
1161
1162 def pathto(self, f, cwd=None):
1162 def pathto(self, f, cwd=None):
1163 return self.dirstate.pathto(f, cwd)
1163 return self.dirstate.pathto(f, cwd)
1164
1164
1165 def _loadfilter(self, filter):
1165 def _loadfilter(self, filter):
1166 if filter not in self._filterpats:
1166 if filter not in self._filterpats:
1167 l = []
1167 l = []
1168 for pat, cmd in self.ui.configitems(filter):
1168 for pat, cmd in self.ui.configitems(filter):
1169 if cmd == '!':
1169 if cmd == '!':
1170 continue
1170 continue
1171 mf = matchmod.match(self.root, '', [pat])
1171 mf = matchmod.match(self.root, '', [pat])
1172 fn = None
1172 fn = None
1173 params = cmd
1173 params = cmd
1174 for name, filterfn in self._datafilters.iteritems():
1174 for name, filterfn in self._datafilters.iteritems():
1175 if cmd.startswith(name):
1175 if cmd.startswith(name):
1176 fn = filterfn
1176 fn = filterfn
1177 params = cmd[len(name):].lstrip()
1177 params = cmd[len(name):].lstrip()
1178 break
1178 break
1179 if not fn:
1179 if not fn:
1180 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1180 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1181 # Wrap old filters not supporting keyword arguments
1181 # Wrap old filters not supporting keyword arguments
1182 if not pycompat.getargspec(fn)[2]:
1182 if not pycompat.getargspec(fn)[2]:
1183 oldfn = fn
1183 oldfn = fn
1184 fn = lambda s, c, **kwargs: oldfn(s, c)
1184 fn = lambda s, c, **kwargs: oldfn(s, c)
1185 l.append((mf, fn, params))
1185 l.append((mf, fn, params))
1186 self._filterpats[filter] = l
1186 self._filterpats[filter] = l
1187 return self._filterpats[filter]
1187 return self._filterpats[filter]
1188
1188
1189 def _filter(self, filterpats, filename, data):
1189 def _filter(self, filterpats, filename, data):
1190 for mf, fn, cmd in filterpats:
1190 for mf, fn, cmd in filterpats:
1191 if mf(filename):
1191 if mf(filename):
1192 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1192 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1193 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1193 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1194 break
1194 break
1195
1195
1196 return data
1196 return data
1197
1197
1198 @unfilteredpropertycache
1198 @unfilteredpropertycache
1199 def _encodefilterpats(self):
1199 def _encodefilterpats(self):
1200 return self._loadfilter('encode')
1200 return self._loadfilter('encode')
1201
1201
1202 @unfilteredpropertycache
1202 @unfilteredpropertycache
1203 def _decodefilterpats(self):
1203 def _decodefilterpats(self):
1204 return self._loadfilter('decode')
1204 return self._loadfilter('decode')
1205
1205
1206 def adddatafilter(self, name, filter):
1206 def adddatafilter(self, name, filter):
1207 self._datafilters[name] = filter
1207 self._datafilters[name] = filter
1208
1208
1209 def wread(self, filename):
1209 def wread(self, filename):
1210 if self.wvfs.islink(filename):
1210 if self.wvfs.islink(filename):
1211 data = self.wvfs.readlink(filename)
1211 data = self.wvfs.readlink(filename)
1212 else:
1212 else:
1213 data = self.wvfs.read(filename)
1213 data = self.wvfs.read(filename)
1214 return self._filter(self._encodefilterpats, filename, data)
1214 return self._filter(self._encodefilterpats, filename, data)
1215
1215
1216 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1216 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1217 """write ``data`` into ``filename`` in the working directory
1217 """write ``data`` into ``filename`` in the working directory
1218
1218
1219 This returns length of written (maybe decoded) data.
1219 This returns length of written (maybe decoded) data.
1220 """
1220 """
1221 data = self._filter(self._decodefilterpats, filename, data)
1221 data = self._filter(self._decodefilterpats, filename, data)
1222 if 'l' in flags:
1222 if 'l' in flags:
1223 self.wvfs.symlink(data, filename)
1223 self.wvfs.symlink(data, filename)
1224 else:
1224 else:
1225 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1225 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1226 **kwargs)
1226 **kwargs)
1227 if 'x' in flags:
1227 if 'x' in flags:
1228 self.wvfs.setflags(filename, False, True)
1228 self.wvfs.setflags(filename, False, True)
1229 else:
1229 else:
1230 self.wvfs.setflags(filename, False, False)
1230 self.wvfs.setflags(filename, False, False)
1231 return len(data)
1231 return len(data)
1232
1232
1233 def wwritedata(self, filename, data):
1233 def wwritedata(self, filename, data):
1234 return self._filter(self._decodefilterpats, filename, data)
1234 return self._filter(self._decodefilterpats, filename, data)
1235
1235
1236 def currenttransaction(self):
1236 def currenttransaction(self):
1237 """return the current transaction or None if non exists"""
1237 """return the current transaction or None if non exists"""
1238 if self._transref:
1238 if self._transref:
1239 tr = self._transref()
1239 tr = self._transref()
1240 else:
1240 else:
1241 tr = None
1241 tr = None
1242
1242
1243 if tr and tr.running():
1243 if tr and tr.running():
1244 return tr
1244 return tr
1245 return None
1245 return None
1246
1246
1247 def transaction(self, desc, report=None):
1247 def transaction(self, desc, report=None):
1248 if (self.ui.configbool('devel', 'all-warnings')
1248 if (self.ui.configbool('devel', 'all-warnings')
1249 or self.ui.configbool('devel', 'check-locks')):
1249 or self.ui.configbool('devel', 'check-locks')):
1250 if self._currentlock(self._lockref) is None:
1250 if self._currentlock(self._lockref) is None:
1251 raise error.ProgrammingError('transaction requires locking')
1251 raise error.ProgrammingError('transaction requires locking')
1252 tr = self.currenttransaction()
1252 tr = self.currenttransaction()
1253 if tr is not None:
1253 if tr is not None:
1254 return tr.nest(name=desc)
1254 return tr.nest(name=desc)
1255
1255
1256 # abort here if the journal already exists
1256 # abort here if the journal already exists
1257 if self.svfs.exists("journal"):
1257 if self.svfs.exists("journal"):
1258 raise error.RepoError(
1258 raise error.RepoError(
1259 _("abandoned transaction found"),
1259 _("abandoned transaction found"),
1260 hint=_("run 'hg recover' to clean up transaction"))
1260 hint=_("run 'hg recover' to clean up transaction"))
1261
1261
1262 idbase = "%.40f#%f" % (random.random(), time.time())
1262 idbase = "%.40f#%f" % (random.random(), time.time())
1263 ha = hex(hashlib.sha1(idbase).digest())
1263 ha = hex(hashlib.sha1(idbase).digest())
1264 txnid = 'TXN:' + ha
1264 txnid = 'TXN:' + ha
1265 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1265 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1266
1266
1267 self._writejournal(desc)
1267 self._writejournal(desc)
1268 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1268 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1269 if report:
1269 if report:
1270 rp = report
1270 rp = report
1271 else:
1271 else:
1272 rp = self.ui.warn
1272 rp = self.ui.warn
1273 vfsmap = {'plain': self.vfs} # root of .hg/
1273 vfsmap = {'plain': self.vfs} # root of .hg/
1274 # we must avoid cyclic reference between repo and transaction.
1274 # we must avoid cyclic reference between repo and transaction.
1275 reporef = weakref.ref(self)
1275 reporef = weakref.ref(self)
1276 # Code to track tag movement
1276 # Code to track tag movement
1277 #
1277 #
1278 # Since tags are all handled as file content, it is actually quite hard
1278 # Since tags are all handled as file content, it is actually quite hard
1279 # to track these movement from a code perspective. So we fallback to a
1279 # to track these movement from a code perspective. So we fallback to a
1280 # tracking at the repository level. One could envision to track changes
1280 # tracking at the repository level. One could envision to track changes
1281 # to the '.hgtags' file through changegroup apply but that fails to
1281 # to the '.hgtags' file through changegroup apply but that fails to
1282 # cope with case where transaction expose new heads without changegroup
1282 # cope with case where transaction expose new heads without changegroup
1283 # being involved (eg: phase movement).
1283 # being involved (eg: phase movement).
1284 #
1284 #
1285 # For now, We gate the feature behind a flag since this likely comes
1285 # For now, We gate the feature behind a flag since this likely comes
1286 # with performance impacts. The current code run more often than needed
1286 # with performance impacts. The current code run more often than needed
1287 # and do not use caches as much as it could. The current focus is on
1287 # and do not use caches as much as it could. The current focus is on
1288 # the behavior of the feature so we disable it by default. The flag
1288 # the behavior of the feature so we disable it by default. The flag
1289 # will be removed when we are happy with the performance impact.
1289 # will be removed when we are happy with the performance impact.
1290 #
1290 #
1291 # Once this feature is no longer experimental move the following
1291 # Once this feature is no longer experimental move the following
1292 # documentation to the appropriate help section:
1292 # documentation to the appropriate help section:
1293 #
1293 #
1294 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1294 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1295 # tags (new or changed or deleted tags). In addition the details of
1295 # tags (new or changed or deleted tags). In addition the details of
1296 # these changes are made available in a file at:
1296 # these changes are made available in a file at:
1297 # ``REPOROOT/.hg/changes/tags.changes``.
1297 # ``REPOROOT/.hg/changes/tags.changes``.
1298 # Make sure you check for HG_TAG_MOVED before reading that file as it
1298 # Make sure you check for HG_TAG_MOVED before reading that file as it
1299 # might exist from a previous transaction even if no tag were touched
1299 # might exist from a previous transaction even if no tag were touched
1300 # in this one. Changes are recorded in a line base format::
1300 # in this one. Changes are recorded in a line base format::
1301 #
1301 #
1302 # <action> <hex-node> <tag-name>\n
1302 # <action> <hex-node> <tag-name>\n
1303 #
1303 #
1304 # Actions are defined as follow:
1304 # Actions are defined as follow:
1305 # "-R": tag is removed,
1305 # "-R": tag is removed,
1306 # "+A": tag is added,
1306 # "+A": tag is added,
1307 # "-M": tag is moved (old value),
1307 # "-M": tag is moved (old value),
1308 # "+M": tag is moved (new value),
1308 # "+M": tag is moved (new value),
1309 tracktags = lambda x: None
1309 tracktags = lambda x: None
1310 # experimental config: experimental.hook-track-tags
1310 # experimental config: experimental.hook-track-tags
1311 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1311 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1312 if desc != 'strip' and shouldtracktags:
1312 if desc != 'strip' and shouldtracktags:
1313 oldheads = self.changelog.headrevs()
1313 oldheads = self.changelog.headrevs()
1314 def tracktags(tr2):
1314 def tracktags(tr2):
1315 repo = reporef()
1315 repo = reporef()
1316 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1316 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1317 newheads = repo.changelog.headrevs()
1317 newheads = repo.changelog.headrevs()
1318 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1318 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1319 # notes: we compare lists here.
1319 # notes: we compare lists here.
1320 # As we do it only once buiding set would not be cheaper
1320 # As we do it only once buiding set would not be cheaper
1321 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1321 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1322 if changes:
1322 if changes:
1323 tr2.hookargs['tag_moved'] = '1'
1323 tr2.hookargs['tag_moved'] = '1'
1324 with repo.vfs('changes/tags.changes', 'w',
1324 with repo.vfs('changes/tags.changes', 'w',
1325 atomictemp=True) as changesfile:
1325 atomictemp=True) as changesfile:
1326 # note: we do not register the file to the transaction
1326 # note: we do not register the file to the transaction
1327 # because we needs it to still exist on the transaction
1327 # because we needs it to still exist on the transaction
1328 # is close (for txnclose hooks)
1328 # is close (for txnclose hooks)
1329 tagsmod.writediff(changesfile, changes)
1329 tagsmod.writediff(changesfile, changes)
1330 def validate(tr2):
1330 def validate(tr2):
1331 """will run pre-closing hooks"""
1331 """will run pre-closing hooks"""
1332 # XXX the transaction API is a bit lacking here so we take a hacky
1332 # XXX the transaction API is a bit lacking here so we take a hacky
1333 # path for now
1333 # path for now
1334 #
1334 #
1335 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1335 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1336 # dict is copied before these run. In addition we needs the data
1336 # dict is copied before these run. In addition we needs the data
1337 # available to in memory hooks too.
1337 # available to in memory hooks too.
1338 #
1338 #
1339 # Moreover, we also need to make sure this runs before txnclose
1339 # Moreover, we also need to make sure this runs before txnclose
1340 # hooks and there is no "pending" mechanism that would execute
1340 # hooks and there is no "pending" mechanism that would execute
1341 # logic only if hooks are about to run.
1341 # logic only if hooks are about to run.
1342 #
1342 #
1343 # Fixing this limitation of the transaction is also needed to track
1343 # Fixing this limitation of the transaction is also needed to track
1344 # other families of changes (bookmarks, phases, obsolescence).
1344 # other families of changes (bookmarks, phases, obsolescence).
1345 #
1345 #
1346 # This will have to be fixed before we remove the experimental
1346 # This will have to be fixed before we remove the experimental
1347 # gating.
1347 # gating.
1348 tracktags(tr2)
1348 tracktags(tr2)
1349 repo = reporef()
1349 repo = reporef()
1350 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1350 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1351 scmutil.enforcesinglehead(repo, tr2, desc)
1351 scmutil.enforcesinglehead(repo, tr2, desc)
1352 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1352 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1353 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1353 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1354 args = tr.hookargs.copy()
1354 args = tr.hookargs.copy()
1355 args.update(bookmarks.preparehookargs(name, old, new))
1355 args.update(bookmarks.preparehookargs(name, old, new))
1356 repo.hook('pretxnclose-bookmark', throw=True,
1356 repo.hook('pretxnclose-bookmark', throw=True,
1357 txnname=desc,
1357 txnname=desc,
1358 **pycompat.strkwargs(args))
1358 **pycompat.strkwargs(args))
1359 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1359 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1360 cl = repo.unfiltered().changelog
1360 cl = repo.unfiltered().changelog
1361 for rev, (old, new) in tr.changes['phases'].items():
1361 for rev, (old, new) in tr.changes['phases'].items():
1362 args = tr.hookargs.copy()
1362 args = tr.hookargs.copy()
1363 node = hex(cl.node(rev))
1363 node = hex(cl.node(rev))
1364 args.update(phases.preparehookargs(node, old, new))
1364 args.update(phases.preparehookargs(node, old, new))
1365 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1365 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1366 **pycompat.strkwargs(args))
1366 **pycompat.strkwargs(args))
1367
1367
1368 repo.hook('pretxnclose', throw=True,
1368 repo.hook('pretxnclose', throw=True,
1369 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1369 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1370 def releasefn(tr, success):
1370 def releasefn(tr, success):
1371 repo = reporef()
1371 repo = reporef()
1372 if success:
1372 if success:
1373 # this should be explicitly invoked here, because
1373 # this should be explicitly invoked here, because
1374 # in-memory changes aren't written out at closing
1374 # in-memory changes aren't written out at closing
1375 # transaction, if tr.addfilegenerator (via
1375 # transaction, if tr.addfilegenerator (via
1376 # dirstate.write or so) isn't invoked while
1376 # dirstate.write or so) isn't invoked while
1377 # transaction running
1377 # transaction running
1378 repo.dirstate.write(None)
1378 repo.dirstate.write(None)
1379 else:
1379 else:
1380 # discard all changes (including ones already written
1380 # discard all changes (including ones already written
1381 # out) in this transaction
1381 # out) in this transaction
1382 narrowspec.restorebackup(self, 'journal.narrowspec')
1382 narrowspec.restorebackup(self, 'journal.narrowspec')
1383 repo.dirstate.restorebackup(None, 'journal.dirstate')
1383 repo.dirstate.restorebackup(None, 'journal.dirstate')
1384
1384
1385 repo.invalidate(clearfilecache=True)
1385 repo.invalidate(clearfilecache=True)
1386
1386
1387 tr = transaction.transaction(rp, self.svfs, vfsmap,
1387 tr = transaction.transaction(rp, self.svfs, vfsmap,
1388 "journal",
1388 "journal",
1389 "undo",
1389 "undo",
1390 aftertrans(renames),
1390 aftertrans(renames),
1391 self.store.createmode,
1391 self.store.createmode,
1392 validator=validate,
1392 validator=validate,
1393 releasefn=releasefn,
1393 releasefn=releasefn,
1394 checkambigfiles=_cachedfiles,
1394 checkambigfiles=_cachedfiles,
1395 name=desc)
1395 name=desc)
1396 tr.changes['revs'] = pycompat.xrange(0, 0)
1396 tr.changes['origrepolen'] = len(self)
1397 tr.changes['obsmarkers'] = set()
1397 tr.changes['obsmarkers'] = set()
1398 tr.changes['phases'] = {}
1398 tr.changes['phases'] = {}
1399 tr.changes['bookmarks'] = {}
1399 tr.changes['bookmarks'] = {}
1400
1400
1401 tr.hookargs['txnid'] = txnid
1401 tr.hookargs['txnid'] = txnid
1402 # note: writing the fncache only during finalize mean that the file is
1402 # note: writing the fncache only during finalize mean that the file is
1403 # outdated when running hooks. As fncache is used for streaming clone,
1403 # outdated when running hooks. As fncache is used for streaming clone,
1404 # this is not expected to break anything that happen during the hooks.
1404 # this is not expected to break anything that happen during the hooks.
1405 tr.addfinalize('flush-fncache', self.store.write)
1405 tr.addfinalize('flush-fncache', self.store.write)
1406 def txnclosehook(tr2):
1406 def txnclosehook(tr2):
1407 """To be run if transaction is successful, will schedule a hook run
1407 """To be run if transaction is successful, will schedule a hook run
1408 """
1408 """
1409 # Don't reference tr2 in hook() so we don't hold a reference.
1409 # Don't reference tr2 in hook() so we don't hold a reference.
1410 # This reduces memory consumption when there are multiple
1410 # This reduces memory consumption when there are multiple
1411 # transactions per lock. This can likely go away if issue5045
1411 # transactions per lock. This can likely go away if issue5045
1412 # fixes the function accumulation.
1412 # fixes the function accumulation.
1413 hookargs = tr2.hookargs
1413 hookargs = tr2.hookargs
1414
1414
1415 def hookfunc():
1415 def hookfunc():
1416 repo = reporef()
1416 repo = reporef()
1417 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1417 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1418 bmchanges = sorted(tr.changes['bookmarks'].items())
1418 bmchanges = sorted(tr.changes['bookmarks'].items())
1419 for name, (old, new) in bmchanges:
1419 for name, (old, new) in bmchanges:
1420 args = tr.hookargs.copy()
1420 args = tr.hookargs.copy()
1421 args.update(bookmarks.preparehookargs(name, old, new))
1421 args.update(bookmarks.preparehookargs(name, old, new))
1422 repo.hook('txnclose-bookmark', throw=False,
1422 repo.hook('txnclose-bookmark', throw=False,
1423 txnname=desc, **pycompat.strkwargs(args))
1423 txnname=desc, **pycompat.strkwargs(args))
1424
1424
1425 if hook.hashook(repo.ui, 'txnclose-phase'):
1425 if hook.hashook(repo.ui, 'txnclose-phase'):
1426 cl = repo.unfiltered().changelog
1426 cl = repo.unfiltered().changelog
1427 phasemv = sorted(tr.changes['phases'].items())
1427 phasemv = sorted(tr.changes['phases'].items())
1428 for rev, (old, new) in phasemv:
1428 for rev, (old, new) in phasemv:
1429 args = tr.hookargs.copy()
1429 args = tr.hookargs.copy()
1430 node = hex(cl.node(rev))
1430 node = hex(cl.node(rev))
1431 args.update(phases.preparehookargs(node, old, new))
1431 args.update(phases.preparehookargs(node, old, new))
1432 repo.hook('txnclose-phase', throw=False, txnname=desc,
1432 repo.hook('txnclose-phase', throw=False, txnname=desc,
1433 **pycompat.strkwargs(args))
1433 **pycompat.strkwargs(args))
1434
1434
1435 repo.hook('txnclose', throw=False, txnname=desc,
1435 repo.hook('txnclose', throw=False, txnname=desc,
1436 **pycompat.strkwargs(hookargs))
1436 **pycompat.strkwargs(hookargs))
1437 reporef()._afterlock(hookfunc)
1437 reporef()._afterlock(hookfunc)
1438 tr.addfinalize('txnclose-hook', txnclosehook)
1438 tr.addfinalize('txnclose-hook', txnclosehook)
1439 # Include a leading "-" to make it happen before the transaction summary
1439 # Include a leading "-" to make it happen before the transaction summary
1440 # reports registered via scmutil.registersummarycallback() whose names
1440 # reports registered via scmutil.registersummarycallback() whose names
1441 # are 00-txnreport etc. That way, the caches will be warm when the
1441 # are 00-txnreport etc. That way, the caches will be warm when the
1442 # callbacks run.
1442 # callbacks run.
1443 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1443 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1444 def txnaborthook(tr2):
1444 def txnaborthook(tr2):
1445 """To be run if transaction is aborted
1445 """To be run if transaction is aborted
1446 """
1446 """
1447 reporef().hook('txnabort', throw=False, txnname=desc,
1447 reporef().hook('txnabort', throw=False, txnname=desc,
1448 **pycompat.strkwargs(tr2.hookargs))
1448 **pycompat.strkwargs(tr2.hookargs))
1449 tr.addabort('txnabort-hook', txnaborthook)
1449 tr.addabort('txnabort-hook', txnaborthook)
1450 # avoid eager cache invalidation. in-memory data should be identical
1450 # avoid eager cache invalidation. in-memory data should be identical
1451 # to stored data if transaction has no error.
1451 # to stored data if transaction has no error.
1452 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1452 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1453 self._transref = weakref.ref(tr)
1453 self._transref = weakref.ref(tr)
1454 scmutil.registersummarycallback(self, tr, desc)
1454 scmutil.registersummarycallback(self, tr, desc)
1455 return tr
1455 return tr
1456
1456
1457 def _journalfiles(self):
1457 def _journalfiles(self):
1458 return ((self.svfs, 'journal'),
1458 return ((self.svfs, 'journal'),
1459 (self.vfs, 'journal.dirstate'),
1459 (self.vfs, 'journal.dirstate'),
1460 (self.vfs, 'journal.branch'),
1460 (self.vfs, 'journal.branch'),
1461 (self.vfs, 'journal.desc'),
1461 (self.vfs, 'journal.desc'),
1462 (self.vfs, 'journal.bookmarks'),
1462 (self.vfs, 'journal.bookmarks'),
1463 (self.svfs, 'journal.phaseroots'))
1463 (self.svfs, 'journal.phaseroots'))
1464
1464
1465 def undofiles(self):
1465 def undofiles(self):
1466 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1466 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1467
1467
1468 @unfilteredmethod
1468 @unfilteredmethod
1469 def _writejournal(self, desc):
1469 def _writejournal(self, desc):
1470 self.dirstate.savebackup(None, 'journal.dirstate')
1470 self.dirstate.savebackup(None, 'journal.dirstate')
1471 narrowspec.savebackup(self, 'journal.narrowspec')
1471 narrowspec.savebackup(self, 'journal.narrowspec')
1472 self.vfs.write("journal.branch",
1472 self.vfs.write("journal.branch",
1473 encoding.fromlocal(self.dirstate.branch()))
1473 encoding.fromlocal(self.dirstate.branch()))
1474 self.vfs.write("journal.desc",
1474 self.vfs.write("journal.desc",
1475 "%d\n%s\n" % (len(self), desc))
1475 "%d\n%s\n" % (len(self), desc))
1476 self.vfs.write("journal.bookmarks",
1476 self.vfs.write("journal.bookmarks",
1477 self.vfs.tryread("bookmarks"))
1477 self.vfs.tryread("bookmarks"))
1478 self.svfs.write("journal.phaseroots",
1478 self.svfs.write("journal.phaseroots",
1479 self.svfs.tryread("phaseroots"))
1479 self.svfs.tryread("phaseroots"))
1480
1480
1481 def recover(self):
1481 def recover(self):
1482 with self.lock():
1482 with self.lock():
1483 if self.svfs.exists("journal"):
1483 if self.svfs.exists("journal"):
1484 self.ui.status(_("rolling back interrupted transaction\n"))
1484 self.ui.status(_("rolling back interrupted transaction\n"))
1485 vfsmap = {'': self.svfs,
1485 vfsmap = {'': self.svfs,
1486 'plain': self.vfs,}
1486 'plain': self.vfs,}
1487 transaction.rollback(self.svfs, vfsmap, "journal",
1487 transaction.rollback(self.svfs, vfsmap, "journal",
1488 self.ui.warn,
1488 self.ui.warn,
1489 checkambigfiles=_cachedfiles)
1489 checkambigfiles=_cachedfiles)
1490 self.invalidate()
1490 self.invalidate()
1491 return True
1491 return True
1492 else:
1492 else:
1493 self.ui.warn(_("no interrupted transaction available\n"))
1493 self.ui.warn(_("no interrupted transaction available\n"))
1494 return False
1494 return False
1495
1495
1496 def rollback(self, dryrun=False, force=False):
1496 def rollback(self, dryrun=False, force=False):
1497 wlock = lock = dsguard = None
1497 wlock = lock = dsguard = None
1498 try:
1498 try:
1499 wlock = self.wlock()
1499 wlock = self.wlock()
1500 lock = self.lock()
1500 lock = self.lock()
1501 if self.svfs.exists("undo"):
1501 if self.svfs.exists("undo"):
1502 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1502 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1503
1503
1504 return self._rollback(dryrun, force, dsguard)
1504 return self._rollback(dryrun, force, dsguard)
1505 else:
1505 else:
1506 self.ui.warn(_("no rollback information available\n"))
1506 self.ui.warn(_("no rollback information available\n"))
1507 return 1
1507 return 1
1508 finally:
1508 finally:
1509 release(dsguard, lock, wlock)
1509 release(dsguard, lock, wlock)
1510
1510
1511 @unfilteredmethod # Until we get smarter cache management
1511 @unfilteredmethod # Until we get smarter cache management
1512 def _rollback(self, dryrun, force, dsguard):
1512 def _rollback(self, dryrun, force, dsguard):
1513 ui = self.ui
1513 ui = self.ui
1514 try:
1514 try:
1515 args = self.vfs.read('undo.desc').splitlines()
1515 args = self.vfs.read('undo.desc').splitlines()
1516 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1516 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1517 if len(args) >= 3:
1517 if len(args) >= 3:
1518 detail = args[2]
1518 detail = args[2]
1519 oldtip = oldlen - 1
1519 oldtip = oldlen - 1
1520
1520
1521 if detail and ui.verbose:
1521 if detail and ui.verbose:
1522 msg = (_('repository tip rolled back to revision %d'
1522 msg = (_('repository tip rolled back to revision %d'
1523 ' (undo %s: %s)\n')
1523 ' (undo %s: %s)\n')
1524 % (oldtip, desc, detail))
1524 % (oldtip, desc, detail))
1525 else:
1525 else:
1526 msg = (_('repository tip rolled back to revision %d'
1526 msg = (_('repository tip rolled back to revision %d'
1527 ' (undo %s)\n')
1527 ' (undo %s)\n')
1528 % (oldtip, desc))
1528 % (oldtip, desc))
1529 except IOError:
1529 except IOError:
1530 msg = _('rolling back unknown transaction\n')
1530 msg = _('rolling back unknown transaction\n')
1531 desc = None
1531 desc = None
1532
1532
1533 if not force and self['.'] != self['tip'] and desc == 'commit':
1533 if not force and self['.'] != self['tip'] and desc == 'commit':
1534 raise error.Abort(
1534 raise error.Abort(
1535 _('rollback of last commit while not checked out '
1535 _('rollback of last commit while not checked out '
1536 'may lose data'), hint=_('use -f to force'))
1536 'may lose data'), hint=_('use -f to force'))
1537
1537
1538 ui.status(msg)
1538 ui.status(msg)
1539 if dryrun:
1539 if dryrun:
1540 return 0
1540 return 0
1541
1541
1542 parents = self.dirstate.parents()
1542 parents = self.dirstate.parents()
1543 self.destroying()
1543 self.destroying()
1544 vfsmap = {'plain': self.vfs, '': self.svfs}
1544 vfsmap = {'plain': self.vfs, '': self.svfs}
1545 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1545 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1546 checkambigfiles=_cachedfiles)
1546 checkambigfiles=_cachedfiles)
1547 if self.vfs.exists('undo.bookmarks'):
1547 if self.vfs.exists('undo.bookmarks'):
1548 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1548 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1549 if self.svfs.exists('undo.phaseroots'):
1549 if self.svfs.exists('undo.phaseroots'):
1550 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1550 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1551 self.invalidate()
1551 self.invalidate()
1552
1552
1553 parentgone = (parents[0] not in self.changelog.nodemap or
1553 parentgone = (parents[0] not in self.changelog.nodemap or
1554 parents[1] not in self.changelog.nodemap)
1554 parents[1] not in self.changelog.nodemap)
1555 if parentgone:
1555 if parentgone:
1556 # prevent dirstateguard from overwriting already restored one
1556 # prevent dirstateguard from overwriting already restored one
1557 dsguard.close()
1557 dsguard.close()
1558
1558
1559 narrowspec.restorebackup(self, 'undo.narrowspec')
1559 narrowspec.restorebackup(self, 'undo.narrowspec')
1560 self.dirstate.restorebackup(None, 'undo.dirstate')
1560 self.dirstate.restorebackup(None, 'undo.dirstate')
1561 try:
1561 try:
1562 branch = self.vfs.read('undo.branch')
1562 branch = self.vfs.read('undo.branch')
1563 self.dirstate.setbranch(encoding.tolocal(branch))
1563 self.dirstate.setbranch(encoding.tolocal(branch))
1564 except IOError:
1564 except IOError:
1565 ui.warn(_('named branch could not be reset: '
1565 ui.warn(_('named branch could not be reset: '
1566 'current branch is still \'%s\'\n')
1566 'current branch is still \'%s\'\n')
1567 % self.dirstate.branch())
1567 % self.dirstate.branch())
1568
1568
1569 parents = tuple([p.rev() for p in self[None].parents()])
1569 parents = tuple([p.rev() for p in self[None].parents()])
1570 if len(parents) > 1:
1570 if len(parents) > 1:
1571 ui.status(_('working directory now based on '
1571 ui.status(_('working directory now based on '
1572 'revisions %d and %d\n') % parents)
1572 'revisions %d and %d\n') % parents)
1573 else:
1573 else:
1574 ui.status(_('working directory now based on '
1574 ui.status(_('working directory now based on '
1575 'revision %d\n') % parents)
1575 'revision %d\n') % parents)
1576 mergemod.mergestate.clean(self, self['.'].node())
1576 mergemod.mergestate.clean(self, self['.'].node())
1577
1577
1578 # TODO: if we know which new heads may result from this rollback, pass
1578 # TODO: if we know which new heads may result from this rollback, pass
1579 # them to destroy(), which will prevent the branchhead cache from being
1579 # them to destroy(), which will prevent the branchhead cache from being
1580 # invalidated.
1580 # invalidated.
1581 self.destroyed()
1581 self.destroyed()
1582 return 0
1582 return 0
1583
1583
1584 def _buildcacheupdater(self, newtransaction):
1584 def _buildcacheupdater(self, newtransaction):
1585 """called during transaction to build the callback updating cache
1585 """called during transaction to build the callback updating cache
1586
1586
1587 Lives on the repository to help extension who might want to augment
1587 Lives on the repository to help extension who might want to augment
1588 this logic. For this purpose, the created transaction is passed to the
1588 this logic. For this purpose, the created transaction is passed to the
1589 method.
1589 method.
1590 """
1590 """
1591 # we must avoid cyclic reference between repo and transaction.
1591 # we must avoid cyclic reference between repo and transaction.
1592 reporef = weakref.ref(self)
1592 reporef = weakref.ref(self)
1593 def updater(tr):
1593 def updater(tr):
1594 repo = reporef()
1594 repo = reporef()
1595 repo.updatecaches(tr)
1595 repo.updatecaches(tr)
1596 return updater
1596 return updater
1597
1597
1598 @unfilteredmethod
1598 @unfilteredmethod
1599 def updatecaches(self, tr=None, full=False):
1599 def updatecaches(self, tr=None, full=False):
1600 """warm appropriate caches
1600 """warm appropriate caches
1601
1601
1602 If this function is called after a transaction closed. The transaction
1602 If this function is called after a transaction closed. The transaction
1603 will be available in the 'tr' argument. This can be used to selectively
1603 will be available in the 'tr' argument. This can be used to selectively
1604 update caches relevant to the changes in that transaction.
1604 update caches relevant to the changes in that transaction.
1605
1605
1606 If 'full' is set, make sure all caches the function knows about have
1606 If 'full' is set, make sure all caches the function knows about have
1607 up-to-date data. Even the ones usually loaded more lazily.
1607 up-to-date data. Even the ones usually loaded more lazily.
1608 """
1608 """
1609 if tr is not None and tr.hookargs.get('source') == 'strip':
1609 if tr is not None and tr.hookargs.get('source') == 'strip':
1610 # During strip, many caches are invalid but
1610 # During strip, many caches are invalid but
1611 # later call to `destroyed` will refresh them.
1611 # later call to `destroyed` will refresh them.
1612 return
1612 return
1613
1613
1614 if tr is None or tr.changes['revs']:
1614 if tr is None or tr.changes['origrepolen'] < len(self):
1615 # updating the unfiltered branchmap should refresh all the others,
1615 # updating the unfiltered branchmap should refresh all the others,
1616 self.ui.debug('updating the branch cache\n')
1616 self.ui.debug('updating the branch cache\n')
1617 branchmap.updatecache(self.filtered('served'))
1617 branchmap.updatecache(self.filtered('served'))
1618
1618
1619 if full:
1619 if full:
1620 rbc = self.revbranchcache()
1620 rbc = self.revbranchcache()
1621 for r in self.changelog:
1621 for r in self.changelog:
1622 rbc.branchinfo(r)
1622 rbc.branchinfo(r)
1623 rbc.write()
1623 rbc.write()
1624
1624
1625 # ensure the working copy parents are in the manifestfulltextcache
1625 # ensure the working copy parents are in the manifestfulltextcache
1626 for ctx in self['.'].parents():
1626 for ctx in self['.'].parents():
1627 ctx.manifest() # accessing the manifest is enough
1627 ctx.manifest() # accessing the manifest is enough
1628
1628
1629 def invalidatecaches(self):
1629 def invalidatecaches(self):
1630
1630
1631 if '_tagscache' in vars(self):
1631 if '_tagscache' in vars(self):
1632 # can't use delattr on proxy
1632 # can't use delattr on proxy
1633 del self.__dict__['_tagscache']
1633 del self.__dict__['_tagscache']
1634
1634
1635 self.unfiltered()._branchcaches.clear()
1635 self.unfiltered()._branchcaches.clear()
1636 self.invalidatevolatilesets()
1636 self.invalidatevolatilesets()
1637 self._sparsesignaturecache.clear()
1637 self._sparsesignaturecache.clear()
1638
1638
1639 def invalidatevolatilesets(self):
1639 def invalidatevolatilesets(self):
1640 self.filteredrevcache.clear()
1640 self.filteredrevcache.clear()
1641 obsolete.clearobscaches(self)
1641 obsolete.clearobscaches(self)
1642
1642
1643 def invalidatedirstate(self):
1643 def invalidatedirstate(self):
1644 '''Invalidates the dirstate, causing the next call to dirstate
1644 '''Invalidates the dirstate, causing the next call to dirstate
1645 to check if it was modified since the last time it was read,
1645 to check if it was modified since the last time it was read,
1646 rereading it if it has.
1646 rereading it if it has.
1647
1647
1648 This is different to dirstate.invalidate() that it doesn't always
1648 This is different to dirstate.invalidate() that it doesn't always
1649 rereads the dirstate. Use dirstate.invalidate() if you want to
1649 rereads the dirstate. Use dirstate.invalidate() if you want to
1650 explicitly read the dirstate again (i.e. restoring it to a previous
1650 explicitly read the dirstate again (i.e. restoring it to a previous
1651 known good state).'''
1651 known good state).'''
1652 if hasunfilteredcache(self, 'dirstate'):
1652 if hasunfilteredcache(self, 'dirstate'):
1653 for k in self.dirstate._filecache:
1653 for k in self.dirstate._filecache:
1654 try:
1654 try:
1655 delattr(self.dirstate, k)
1655 delattr(self.dirstate, k)
1656 except AttributeError:
1656 except AttributeError:
1657 pass
1657 pass
1658 delattr(self.unfiltered(), 'dirstate')
1658 delattr(self.unfiltered(), 'dirstate')
1659
1659
1660 def invalidate(self, clearfilecache=False):
1660 def invalidate(self, clearfilecache=False):
1661 '''Invalidates both store and non-store parts other than dirstate
1661 '''Invalidates both store and non-store parts other than dirstate
1662
1662
1663 If a transaction is running, invalidation of store is omitted,
1663 If a transaction is running, invalidation of store is omitted,
1664 because discarding in-memory changes might cause inconsistency
1664 because discarding in-memory changes might cause inconsistency
1665 (e.g. incomplete fncache causes unintentional failure, but
1665 (e.g. incomplete fncache causes unintentional failure, but
1666 redundant one doesn't).
1666 redundant one doesn't).
1667 '''
1667 '''
1668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1669 for k in list(self._filecache.keys()):
1669 for k in list(self._filecache.keys()):
1670 # dirstate is invalidated separately in invalidatedirstate()
1670 # dirstate is invalidated separately in invalidatedirstate()
1671 if k == 'dirstate':
1671 if k == 'dirstate':
1672 continue
1672 continue
1673 if (k == 'changelog' and
1673 if (k == 'changelog' and
1674 self.currenttransaction() and
1674 self.currenttransaction() and
1675 self.changelog._delayed):
1675 self.changelog._delayed):
1676 # The changelog object may store unwritten revisions. We don't
1676 # The changelog object may store unwritten revisions. We don't
1677 # want to lose them.
1677 # want to lose them.
1678 # TODO: Solve the problem instead of working around it.
1678 # TODO: Solve the problem instead of working around it.
1679 continue
1679 continue
1680
1680
1681 if clearfilecache:
1681 if clearfilecache:
1682 del self._filecache[k]
1682 del self._filecache[k]
1683 try:
1683 try:
1684 delattr(unfiltered, k)
1684 delattr(unfiltered, k)
1685 except AttributeError:
1685 except AttributeError:
1686 pass
1686 pass
1687 self.invalidatecaches()
1687 self.invalidatecaches()
1688 if not self.currenttransaction():
1688 if not self.currenttransaction():
1689 # TODO: Changing contents of store outside transaction
1689 # TODO: Changing contents of store outside transaction
1690 # causes inconsistency. We should make in-memory store
1690 # causes inconsistency. We should make in-memory store
1691 # changes detectable, and abort if changed.
1691 # changes detectable, and abort if changed.
1692 self.store.invalidatecaches()
1692 self.store.invalidatecaches()
1693
1693
1694 def invalidateall(self):
1694 def invalidateall(self):
1695 '''Fully invalidates both store and non-store parts, causing the
1695 '''Fully invalidates both store and non-store parts, causing the
1696 subsequent operation to reread any outside changes.'''
1696 subsequent operation to reread any outside changes.'''
1697 # extension should hook this to invalidate its caches
1697 # extension should hook this to invalidate its caches
1698 self.invalidate()
1698 self.invalidate()
1699 self.invalidatedirstate()
1699 self.invalidatedirstate()
1700
1700
1701 @unfilteredmethod
1701 @unfilteredmethod
1702 def _refreshfilecachestats(self, tr):
1702 def _refreshfilecachestats(self, tr):
1703 """Reload stats of cached files so that they are flagged as valid"""
1703 """Reload stats of cached files so that they are flagged as valid"""
1704 for k, ce in self._filecache.items():
1704 for k, ce in self._filecache.items():
1705 k = pycompat.sysstr(k)
1705 k = pycompat.sysstr(k)
1706 if k == r'dirstate' or k not in self.__dict__:
1706 if k == r'dirstate' or k not in self.__dict__:
1707 continue
1707 continue
1708 ce.refresh()
1708 ce.refresh()
1709
1709
1710 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1710 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1711 inheritchecker=None, parentenvvar=None):
1711 inheritchecker=None, parentenvvar=None):
1712 parentlock = None
1712 parentlock = None
1713 # the contents of parentenvvar are used by the underlying lock to
1713 # the contents of parentenvvar are used by the underlying lock to
1714 # determine whether it can be inherited
1714 # determine whether it can be inherited
1715 if parentenvvar is not None:
1715 if parentenvvar is not None:
1716 parentlock = encoding.environ.get(parentenvvar)
1716 parentlock = encoding.environ.get(parentenvvar)
1717
1717
1718 timeout = 0
1718 timeout = 0
1719 warntimeout = 0
1719 warntimeout = 0
1720 if wait:
1720 if wait:
1721 timeout = self.ui.configint("ui", "timeout")
1721 timeout = self.ui.configint("ui", "timeout")
1722 warntimeout = self.ui.configint("ui", "timeout.warn")
1722 warntimeout = self.ui.configint("ui", "timeout.warn")
1723 # internal config: ui.signal-safe-lock
1723 # internal config: ui.signal-safe-lock
1724 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1724 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1725
1725
1726 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1726 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1727 releasefn=releasefn,
1727 releasefn=releasefn,
1728 acquirefn=acquirefn, desc=desc,
1728 acquirefn=acquirefn, desc=desc,
1729 inheritchecker=inheritchecker,
1729 inheritchecker=inheritchecker,
1730 parentlock=parentlock,
1730 parentlock=parentlock,
1731 signalsafe=signalsafe)
1731 signalsafe=signalsafe)
1732 return l
1732 return l
1733
1733
1734 def _afterlock(self, callback):
1734 def _afterlock(self, callback):
1735 """add a callback to be run when the repository is fully unlocked
1735 """add a callback to be run when the repository is fully unlocked
1736
1736
1737 The callback will be executed when the outermost lock is released
1737 The callback will be executed when the outermost lock is released
1738 (with wlock being higher level than 'lock')."""
1738 (with wlock being higher level than 'lock')."""
1739 for ref in (self._wlockref, self._lockref):
1739 for ref in (self._wlockref, self._lockref):
1740 l = ref and ref()
1740 l = ref and ref()
1741 if l and l.held:
1741 if l and l.held:
1742 l.postrelease.append(callback)
1742 l.postrelease.append(callback)
1743 break
1743 break
1744 else: # no lock have been found.
1744 else: # no lock have been found.
1745 callback()
1745 callback()
1746
1746
1747 def lock(self, wait=True):
1747 def lock(self, wait=True):
1748 '''Lock the repository store (.hg/store) and return a weak reference
1748 '''Lock the repository store (.hg/store) and return a weak reference
1749 to the lock. Use this before modifying the store (e.g. committing or
1749 to the lock. Use this before modifying the store (e.g. committing or
1750 stripping). If you are opening a transaction, get a lock as well.)
1750 stripping). If you are opening a transaction, get a lock as well.)
1751
1751
1752 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1752 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1753 'wlock' first to avoid a dead-lock hazard.'''
1753 'wlock' first to avoid a dead-lock hazard.'''
1754 l = self._currentlock(self._lockref)
1754 l = self._currentlock(self._lockref)
1755 if l is not None:
1755 if l is not None:
1756 l.lock()
1756 l.lock()
1757 return l
1757 return l
1758
1758
1759 l = self._lock(self.svfs, "lock", wait, None,
1759 l = self._lock(self.svfs, "lock", wait, None,
1760 self.invalidate, _('repository %s') % self.origroot)
1760 self.invalidate, _('repository %s') % self.origroot)
1761 self._lockref = weakref.ref(l)
1761 self._lockref = weakref.ref(l)
1762 return l
1762 return l
1763
1763
1764 def _wlockchecktransaction(self):
1764 def _wlockchecktransaction(self):
1765 if self.currenttransaction() is not None:
1765 if self.currenttransaction() is not None:
1766 raise error.LockInheritanceContractViolation(
1766 raise error.LockInheritanceContractViolation(
1767 'wlock cannot be inherited in the middle of a transaction')
1767 'wlock cannot be inherited in the middle of a transaction')
1768
1768
1769 def wlock(self, wait=True):
1769 def wlock(self, wait=True):
1770 '''Lock the non-store parts of the repository (everything under
1770 '''Lock the non-store parts of the repository (everything under
1771 .hg except .hg/store) and return a weak reference to the lock.
1771 .hg except .hg/store) and return a weak reference to the lock.
1772
1772
1773 Use this before modifying files in .hg.
1773 Use this before modifying files in .hg.
1774
1774
1775 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1775 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1776 'wlock' first to avoid a dead-lock hazard.'''
1776 'wlock' first to avoid a dead-lock hazard.'''
1777 l = self._wlockref and self._wlockref()
1777 l = self._wlockref and self._wlockref()
1778 if l is not None and l.held:
1778 if l is not None and l.held:
1779 l.lock()
1779 l.lock()
1780 return l
1780 return l
1781
1781
1782 # We do not need to check for non-waiting lock acquisition. Such
1782 # We do not need to check for non-waiting lock acquisition. Such
1783 # acquisition would not cause dead-lock as they would just fail.
1783 # acquisition would not cause dead-lock as they would just fail.
1784 if wait and (self.ui.configbool('devel', 'all-warnings')
1784 if wait and (self.ui.configbool('devel', 'all-warnings')
1785 or self.ui.configbool('devel', 'check-locks')):
1785 or self.ui.configbool('devel', 'check-locks')):
1786 if self._currentlock(self._lockref) is not None:
1786 if self._currentlock(self._lockref) is not None:
1787 self.ui.develwarn('"wlock" acquired after "lock"')
1787 self.ui.develwarn('"wlock" acquired after "lock"')
1788
1788
1789 def unlock():
1789 def unlock():
1790 if self.dirstate.pendingparentchange():
1790 if self.dirstate.pendingparentchange():
1791 self.dirstate.invalidate()
1791 self.dirstate.invalidate()
1792 else:
1792 else:
1793 self.dirstate.write(None)
1793 self.dirstate.write(None)
1794
1794
1795 self._filecache['dirstate'].refresh()
1795 self._filecache['dirstate'].refresh()
1796
1796
1797 l = self._lock(self.vfs, "wlock", wait, unlock,
1797 l = self._lock(self.vfs, "wlock", wait, unlock,
1798 self.invalidatedirstate, _('working directory of %s') %
1798 self.invalidatedirstate, _('working directory of %s') %
1799 self.origroot,
1799 self.origroot,
1800 inheritchecker=self._wlockchecktransaction,
1800 inheritchecker=self._wlockchecktransaction,
1801 parentenvvar='HG_WLOCK_LOCKER')
1801 parentenvvar='HG_WLOCK_LOCKER')
1802 self._wlockref = weakref.ref(l)
1802 self._wlockref = weakref.ref(l)
1803 return l
1803 return l
1804
1804
1805 def _currentlock(self, lockref):
1805 def _currentlock(self, lockref):
1806 """Returns the lock if it's held, or None if it's not."""
1806 """Returns the lock if it's held, or None if it's not."""
1807 if lockref is None:
1807 if lockref is None:
1808 return None
1808 return None
1809 l = lockref()
1809 l = lockref()
1810 if l is None or not l.held:
1810 if l is None or not l.held:
1811 return None
1811 return None
1812 return l
1812 return l
1813
1813
1814 def currentwlock(self):
1814 def currentwlock(self):
1815 """Returns the wlock if it's held, or None if it's not."""
1815 """Returns the wlock if it's held, or None if it's not."""
1816 return self._currentlock(self._wlockref)
1816 return self._currentlock(self._wlockref)
1817
1817
1818 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1818 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1819 """
1819 """
1820 commit an individual file as part of a larger transaction
1820 commit an individual file as part of a larger transaction
1821 """
1821 """
1822
1822
1823 fname = fctx.path()
1823 fname = fctx.path()
1824 fparent1 = manifest1.get(fname, nullid)
1824 fparent1 = manifest1.get(fname, nullid)
1825 fparent2 = manifest2.get(fname, nullid)
1825 fparent2 = manifest2.get(fname, nullid)
1826 if isinstance(fctx, context.filectx):
1826 if isinstance(fctx, context.filectx):
1827 node = fctx.filenode()
1827 node = fctx.filenode()
1828 if node in [fparent1, fparent2]:
1828 if node in [fparent1, fparent2]:
1829 self.ui.debug('reusing %s filelog entry\n' % fname)
1829 self.ui.debug('reusing %s filelog entry\n' % fname)
1830 if manifest1.flags(fname) != fctx.flags():
1830 if manifest1.flags(fname) != fctx.flags():
1831 changelist.append(fname)
1831 changelist.append(fname)
1832 return node
1832 return node
1833
1833
1834 flog = self.file(fname)
1834 flog = self.file(fname)
1835 meta = {}
1835 meta = {}
1836 copy = fctx.renamed()
1836 copy = fctx.renamed()
1837 if copy and copy[0] != fname:
1837 if copy and copy[0] != fname:
1838 # Mark the new revision of this file as a copy of another
1838 # Mark the new revision of this file as a copy of another
1839 # file. This copy data will effectively act as a parent
1839 # file. This copy data will effectively act as a parent
1840 # of this new revision. If this is a merge, the first
1840 # of this new revision. If this is a merge, the first
1841 # parent will be the nullid (meaning "look up the copy data")
1841 # parent will be the nullid (meaning "look up the copy data")
1842 # and the second one will be the other parent. For example:
1842 # and the second one will be the other parent. For example:
1843 #
1843 #
1844 # 0 --- 1 --- 3 rev1 changes file foo
1844 # 0 --- 1 --- 3 rev1 changes file foo
1845 # \ / rev2 renames foo to bar and changes it
1845 # \ / rev2 renames foo to bar and changes it
1846 # \- 2 -/ rev3 should have bar with all changes and
1846 # \- 2 -/ rev3 should have bar with all changes and
1847 # should record that bar descends from
1847 # should record that bar descends from
1848 # bar in rev2 and foo in rev1
1848 # bar in rev2 and foo in rev1
1849 #
1849 #
1850 # this allows this merge to succeed:
1850 # this allows this merge to succeed:
1851 #
1851 #
1852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1853 # \ / merging rev3 and rev4 should use bar@rev2
1853 # \ / merging rev3 and rev4 should use bar@rev2
1854 # \- 2 --- 4 as the merge base
1854 # \- 2 --- 4 as the merge base
1855 #
1855 #
1856
1856
1857 cfname = copy[0]
1857 cfname = copy[0]
1858 crev = manifest1.get(cfname)
1858 crev = manifest1.get(cfname)
1859 newfparent = fparent2
1859 newfparent = fparent2
1860
1860
1861 if manifest2: # branch merge
1861 if manifest2: # branch merge
1862 if fparent2 == nullid or crev is None: # copied on remote side
1862 if fparent2 == nullid or crev is None: # copied on remote side
1863 if cfname in manifest2:
1863 if cfname in manifest2:
1864 crev = manifest2[cfname]
1864 crev = manifest2[cfname]
1865 newfparent = fparent1
1865 newfparent = fparent1
1866
1866
1867 # Here, we used to search backwards through history to try to find
1867 # Here, we used to search backwards through history to try to find
1868 # where the file copy came from if the source of a copy was not in
1868 # where the file copy came from if the source of a copy was not in
1869 # the parent directory. However, this doesn't actually make sense to
1869 # the parent directory. However, this doesn't actually make sense to
1870 # do (what does a copy from something not in your working copy even
1870 # do (what does a copy from something not in your working copy even
1871 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1871 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1872 # the user that copy information was dropped, so if they didn't
1872 # the user that copy information was dropped, so if they didn't
1873 # expect this outcome it can be fixed, but this is the correct
1873 # expect this outcome it can be fixed, but this is the correct
1874 # behavior in this circumstance.
1874 # behavior in this circumstance.
1875
1875
1876 if crev:
1876 if crev:
1877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1878 meta["copy"] = cfname
1878 meta["copy"] = cfname
1879 meta["copyrev"] = hex(crev)
1879 meta["copyrev"] = hex(crev)
1880 fparent1, fparent2 = nullid, newfparent
1880 fparent1, fparent2 = nullid, newfparent
1881 else:
1881 else:
1882 self.ui.warn(_("warning: can't find ancestor for '%s' "
1882 self.ui.warn(_("warning: can't find ancestor for '%s' "
1883 "copied from '%s'!\n") % (fname, cfname))
1883 "copied from '%s'!\n") % (fname, cfname))
1884
1884
1885 elif fparent1 == nullid:
1885 elif fparent1 == nullid:
1886 fparent1, fparent2 = fparent2, nullid
1886 fparent1, fparent2 = fparent2, nullid
1887 elif fparent2 != nullid:
1887 elif fparent2 != nullid:
1888 # is one parent an ancestor of the other?
1888 # is one parent an ancestor of the other?
1889 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1889 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1890 if fparent1 in fparentancestors:
1890 if fparent1 in fparentancestors:
1891 fparent1, fparent2 = fparent2, nullid
1891 fparent1, fparent2 = fparent2, nullid
1892 elif fparent2 in fparentancestors:
1892 elif fparent2 in fparentancestors:
1893 fparent2 = nullid
1893 fparent2 = nullid
1894
1894
1895 # is the file changed?
1895 # is the file changed?
1896 text = fctx.data()
1896 text = fctx.data()
1897 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1897 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1898 changelist.append(fname)
1898 changelist.append(fname)
1899 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1899 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1900 # are just the flags changed during merge?
1900 # are just the flags changed during merge?
1901 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1901 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1902 changelist.append(fname)
1902 changelist.append(fname)
1903
1903
1904 return fparent1
1904 return fparent1
1905
1905
1906 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1906 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1907 """check for commit arguments that aren't committable"""
1907 """check for commit arguments that aren't committable"""
1908 if match.isexact() or match.prefix():
1908 if match.isexact() or match.prefix():
1909 matched = set(status.modified + status.added + status.removed)
1909 matched = set(status.modified + status.added + status.removed)
1910
1910
1911 for f in match.files():
1911 for f in match.files():
1912 f = self.dirstate.normalize(f)
1912 f = self.dirstate.normalize(f)
1913 if f == '.' or f in matched or f in wctx.substate:
1913 if f == '.' or f in matched or f in wctx.substate:
1914 continue
1914 continue
1915 if f in status.deleted:
1915 if f in status.deleted:
1916 fail(f, _('file not found!'))
1916 fail(f, _('file not found!'))
1917 if f in vdirs: # visited directory
1917 if f in vdirs: # visited directory
1918 d = f + '/'
1918 d = f + '/'
1919 for mf in matched:
1919 for mf in matched:
1920 if mf.startswith(d):
1920 if mf.startswith(d):
1921 break
1921 break
1922 else:
1922 else:
1923 fail(f, _("no match under directory!"))
1923 fail(f, _("no match under directory!"))
1924 elif f not in self.dirstate:
1924 elif f not in self.dirstate:
1925 fail(f, _("file not tracked!"))
1925 fail(f, _("file not tracked!"))
1926
1926
1927 @unfilteredmethod
1927 @unfilteredmethod
1928 def commit(self, text="", user=None, date=None, match=None, force=False,
1928 def commit(self, text="", user=None, date=None, match=None, force=False,
1929 editor=False, extra=None):
1929 editor=False, extra=None):
1930 """Add a new revision to current repository.
1930 """Add a new revision to current repository.
1931
1931
1932 Revision information is gathered from the working directory,
1932 Revision information is gathered from the working directory,
1933 match can be used to filter the committed files. If editor is
1933 match can be used to filter the committed files. If editor is
1934 supplied, it is called to get a commit message.
1934 supplied, it is called to get a commit message.
1935 """
1935 """
1936 if extra is None:
1936 if extra is None:
1937 extra = {}
1937 extra = {}
1938
1938
1939 def fail(f, msg):
1939 def fail(f, msg):
1940 raise error.Abort('%s: %s' % (f, msg))
1940 raise error.Abort('%s: %s' % (f, msg))
1941
1941
1942 if not match:
1942 if not match:
1943 match = matchmod.always(self.root, '')
1943 match = matchmod.always(self.root, '')
1944
1944
1945 if not force:
1945 if not force:
1946 vdirs = []
1946 vdirs = []
1947 match.explicitdir = vdirs.append
1947 match.explicitdir = vdirs.append
1948 match.bad = fail
1948 match.bad = fail
1949
1949
1950 wlock = lock = tr = None
1950 wlock = lock = tr = None
1951 try:
1951 try:
1952 wlock = self.wlock()
1952 wlock = self.wlock()
1953 lock = self.lock() # for recent changelog (see issue4368)
1953 lock = self.lock() # for recent changelog (see issue4368)
1954
1954
1955 wctx = self[None]
1955 wctx = self[None]
1956 merge = len(wctx.parents()) > 1
1956 merge = len(wctx.parents()) > 1
1957
1957
1958 if not force and merge and not match.always():
1958 if not force and merge and not match.always():
1959 raise error.Abort(_('cannot partially commit a merge '
1959 raise error.Abort(_('cannot partially commit a merge '
1960 '(do not specify files or patterns)'))
1960 '(do not specify files or patterns)'))
1961
1961
1962 status = self.status(match=match, clean=force)
1962 status = self.status(match=match, clean=force)
1963 if force:
1963 if force:
1964 status.modified.extend(status.clean) # mq may commit clean files
1964 status.modified.extend(status.clean) # mq may commit clean files
1965
1965
1966 # check subrepos
1966 # check subrepos
1967 subs, commitsubs, newstate = subrepoutil.precommit(
1967 subs, commitsubs, newstate = subrepoutil.precommit(
1968 self.ui, wctx, status, match, force=force)
1968 self.ui, wctx, status, match, force=force)
1969
1969
1970 # make sure all explicit patterns are matched
1970 # make sure all explicit patterns are matched
1971 if not force:
1971 if not force:
1972 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1972 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1973
1973
1974 cctx = context.workingcommitctx(self, status,
1974 cctx = context.workingcommitctx(self, status,
1975 text, user, date, extra)
1975 text, user, date, extra)
1976
1976
1977 # internal config: ui.allowemptycommit
1977 # internal config: ui.allowemptycommit
1978 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1978 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1979 or extra.get('close') or merge or cctx.files()
1979 or extra.get('close') or merge or cctx.files()
1980 or self.ui.configbool('ui', 'allowemptycommit'))
1980 or self.ui.configbool('ui', 'allowemptycommit'))
1981 if not allowemptycommit:
1981 if not allowemptycommit:
1982 return None
1982 return None
1983
1983
1984 if merge and cctx.deleted():
1984 if merge and cctx.deleted():
1985 raise error.Abort(_("cannot commit merge with missing files"))
1985 raise error.Abort(_("cannot commit merge with missing files"))
1986
1986
1987 ms = mergemod.mergestate.read(self)
1987 ms = mergemod.mergestate.read(self)
1988 mergeutil.checkunresolved(ms)
1988 mergeutil.checkunresolved(ms)
1989
1989
1990 if editor:
1990 if editor:
1991 cctx._text = editor(self, cctx, subs)
1991 cctx._text = editor(self, cctx, subs)
1992 edited = (text != cctx._text)
1992 edited = (text != cctx._text)
1993
1993
1994 # Save commit message in case this transaction gets rolled back
1994 # Save commit message in case this transaction gets rolled back
1995 # (e.g. by a pretxncommit hook). Leave the content alone on
1995 # (e.g. by a pretxncommit hook). Leave the content alone on
1996 # the assumption that the user will use the same editor again.
1996 # the assumption that the user will use the same editor again.
1997 msgfn = self.savecommitmessage(cctx._text)
1997 msgfn = self.savecommitmessage(cctx._text)
1998
1998
1999 # commit subs and write new state
1999 # commit subs and write new state
2000 if subs:
2000 if subs:
2001 for s in sorted(commitsubs):
2001 for s in sorted(commitsubs):
2002 sub = wctx.sub(s)
2002 sub = wctx.sub(s)
2003 self.ui.status(_('committing subrepository %s\n') %
2003 self.ui.status(_('committing subrepository %s\n') %
2004 subrepoutil.subrelpath(sub))
2004 subrepoutil.subrelpath(sub))
2005 sr = sub.commit(cctx._text, user, date)
2005 sr = sub.commit(cctx._text, user, date)
2006 newstate[s] = (newstate[s][0], sr)
2006 newstate[s] = (newstate[s][0], sr)
2007 subrepoutil.writestate(self, newstate)
2007 subrepoutil.writestate(self, newstate)
2008
2008
2009 p1, p2 = self.dirstate.parents()
2009 p1, p2 = self.dirstate.parents()
2010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2011 try:
2011 try:
2012 self.hook("precommit", throw=True, parent1=hookp1,
2012 self.hook("precommit", throw=True, parent1=hookp1,
2013 parent2=hookp2)
2013 parent2=hookp2)
2014 tr = self.transaction('commit')
2014 tr = self.transaction('commit')
2015 ret = self.commitctx(cctx, True)
2015 ret = self.commitctx(cctx, True)
2016 except: # re-raises
2016 except: # re-raises
2017 if edited:
2017 if edited:
2018 self.ui.write(
2018 self.ui.write(
2019 _('note: commit message saved in %s\n') % msgfn)
2019 _('note: commit message saved in %s\n') % msgfn)
2020 raise
2020 raise
2021 # update bookmarks, dirstate and mergestate
2021 # update bookmarks, dirstate and mergestate
2022 bookmarks.update(self, [p1, p2], ret)
2022 bookmarks.update(self, [p1, p2], ret)
2023 cctx.markcommitted(ret)
2023 cctx.markcommitted(ret)
2024 ms.reset()
2024 ms.reset()
2025 tr.close()
2025 tr.close()
2026
2026
2027 finally:
2027 finally:
2028 lockmod.release(tr, lock, wlock)
2028 lockmod.release(tr, lock, wlock)
2029
2029
2030 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2030 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2031 # hack for command that use a temporary commit (eg: histedit)
2031 # hack for command that use a temporary commit (eg: histedit)
2032 # temporary commit got stripped before hook release
2032 # temporary commit got stripped before hook release
2033 if self.changelog.hasnode(ret):
2033 if self.changelog.hasnode(ret):
2034 self.hook("commit", node=node, parent1=parent1,
2034 self.hook("commit", node=node, parent1=parent1,
2035 parent2=parent2)
2035 parent2=parent2)
2036 self._afterlock(commithook)
2036 self._afterlock(commithook)
2037 return ret
2037 return ret
2038
2038
2039 @unfilteredmethod
2039 @unfilteredmethod
2040 def commitctx(self, ctx, error=False):
2040 def commitctx(self, ctx, error=False):
2041 """Add a new revision to current repository.
2041 """Add a new revision to current repository.
2042 Revision information is passed via the context argument.
2042 Revision information is passed via the context argument.
2043
2043
2044 ctx.files() should list all files involved in this commit, i.e.
2044 ctx.files() should list all files involved in this commit, i.e.
2045 modified/added/removed files. On merge, it may be wider than the
2045 modified/added/removed files. On merge, it may be wider than the
2046 ctx.files() to be committed, since any file nodes derived directly
2046 ctx.files() to be committed, since any file nodes derived directly
2047 from p1 or p2 are excluded from the committed ctx.files().
2047 from p1 or p2 are excluded from the committed ctx.files().
2048 """
2048 """
2049
2049
2050 tr = None
2050 tr = None
2051 p1, p2 = ctx.p1(), ctx.p2()
2051 p1, p2 = ctx.p1(), ctx.p2()
2052 user = ctx.user()
2052 user = ctx.user()
2053
2053
2054 lock = self.lock()
2054 lock = self.lock()
2055 try:
2055 try:
2056 tr = self.transaction("commit")
2056 tr = self.transaction("commit")
2057 trp = weakref.proxy(tr)
2057 trp = weakref.proxy(tr)
2058
2058
2059 if ctx.manifestnode():
2059 if ctx.manifestnode():
2060 # reuse an existing manifest revision
2060 # reuse an existing manifest revision
2061 self.ui.debug('reusing known manifest\n')
2061 self.ui.debug('reusing known manifest\n')
2062 mn = ctx.manifestnode()
2062 mn = ctx.manifestnode()
2063 files = ctx.files()
2063 files = ctx.files()
2064 elif ctx.files():
2064 elif ctx.files():
2065 m1ctx = p1.manifestctx()
2065 m1ctx = p1.manifestctx()
2066 m2ctx = p2.manifestctx()
2066 m2ctx = p2.manifestctx()
2067 mctx = m1ctx.copy()
2067 mctx = m1ctx.copy()
2068
2068
2069 m = mctx.read()
2069 m = mctx.read()
2070 m1 = m1ctx.read()
2070 m1 = m1ctx.read()
2071 m2 = m2ctx.read()
2071 m2 = m2ctx.read()
2072
2072
2073 # check in files
2073 # check in files
2074 added = []
2074 added = []
2075 changed = []
2075 changed = []
2076 removed = list(ctx.removed())
2076 removed = list(ctx.removed())
2077 linkrev = len(self)
2077 linkrev = len(self)
2078 self.ui.note(_("committing files:\n"))
2078 self.ui.note(_("committing files:\n"))
2079 for f in sorted(ctx.modified() + ctx.added()):
2079 for f in sorted(ctx.modified() + ctx.added()):
2080 self.ui.note(f + "\n")
2080 self.ui.note(f + "\n")
2081 try:
2081 try:
2082 fctx = ctx[f]
2082 fctx = ctx[f]
2083 if fctx is None:
2083 if fctx is None:
2084 removed.append(f)
2084 removed.append(f)
2085 else:
2085 else:
2086 added.append(f)
2086 added.append(f)
2087 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2087 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2088 trp, changed)
2088 trp, changed)
2089 m.setflag(f, fctx.flags())
2089 m.setflag(f, fctx.flags())
2090 except OSError as inst:
2090 except OSError as inst:
2091 self.ui.warn(_("trouble committing %s!\n") % f)
2091 self.ui.warn(_("trouble committing %s!\n") % f)
2092 raise
2092 raise
2093 except IOError as inst:
2093 except IOError as inst:
2094 errcode = getattr(inst, 'errno', errno.ENOENT)
2094 errcode = getattr(inst, 'errno', errno.ENOENT)
2095 if error or errcode and errcode != errno.ENOENT:
2095 if error or errcode and errcode != errno.ENOENT:
2096 self.ui.warn(_("trouble committing %s!\n") % f)
2096 self.ui.warn(_("trouble committing %s!\n") % f)
2097 raise
2097 raise
2098
2098
2099 # update manifest
2099 # update manifest
2100 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2100 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2101 drop = [f for f in removed if f in m]
2101 drop = [f for f in removed if f in m]
2102 for f in drop:
2102 for f in drop:
2103 del m[f]
2103 del m[f]
2104 files = changed + removed
2104 files = changed + removed
2105 md = None
2105 md = None
2106 if not files:
2106 if not files:
2107 # if no "files" actually changed in terms of the changelog,
2107 # if no "files" actually changed in terms of the changelog,
2108 # try hard to detect unmodified manifest entry so that the
2108 # try hard to detect unmodified manifest entry so that the
2109 # exact same commit can be reproduced later on convert.
2109 # exact same commit can be reproduced later on convert.
2110 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2110 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2111 if not files and md:
2111 if not files and md:
2112 self.ui.debug('not reusing manifest (no file change in '
2112 self.ui.debug('not reusing manifest (no file change in '
2113 'changelog, but manifest differs)\n')
2113 'changelog, but manifest differs)\n')
2114 if files or md:
2114 if files or md:
2115 self.ui.note(_("committing manifest\n"))
2115 self.ui.note(_("committing manifest\n"))
2116 mn = mctx.write(trp, linkrev,
2116 mn = mctx.write(trp, linkrev,
2117 p1.manifestnode(), p2.manifestnode(),
2117 p1.manifestnode(), p2.manifestnode(),
2118 added, drop)
2118 added, drop)
2119 else:
2119 else:
2120 self.ui.debug('reusing manifest form p1 (listed files '
2120 self.ui.debug('reusing manifest form p1 (listed files '
2121 'actually unchanged)\n')
2121 'actually unchanged)\n')
2122 mn = p1.manifestnode()
2122 mn = p1.manifestnode()
2123 else:
2123 else:
2124 self.ui.debug('reusing manifest from p1 (no file change)\n')
2124 self.ui.debug('reusing manifest from p1 (no file change)\n')
2125 mn = p1.manifestnode()
2125 mn = p1.manifestnode()
2126 files = []
2126 files = []
2127
2127
2128 # update changelog
2128 # update changelog
2129 self.ui.note(_("committing changelog\n"))
2129 self.ui.note(_("committing changelog\n"))
2130 self.changelog.delayupdate(tr)
2130 self.changelog.delayupdate(tr)
2131 n = self.changelog.add(mn, files, ctx.description(),
2131 n = self.changelog.add(mn, files, ctx.description(),
2132 trp, p1.node(), p2.node(),
2132 trp, p1.node(), p2.node(),
2133 user, ctx.date(), ctx.extra().copy())
2133 user, ctx.date(), ctx.extra().copy())
2134 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2134 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2135 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2135 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2136 parent2=xp2)
2136 parent2=xp2)
2137 # set the new commit is proper phase
2137 # set the new commit is proper phase
2138 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2138 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2139 if targetphase:
2139 if targetphase:
2140 # retract boundary do not alter parent changeset.
2140 # retract boundary do not alter parent changeset.
2141 # if a parent have higher the resulting phase will
2141 # if a parent have higher the resulting phase will
2142 # be compliant anyway
2142 # be compliant anyway
2143 #
2143 #
2144 # if minimal phase was 0 we don't need to retract anything
2144 # if minimal phase was 0 we don't need to retract anything
2145 phases.registernew(self, tr, targetphase, [n])
2145 phases.registernew(self, tr, targetphase, [n])
2146 tr.close()
2146 tr.close()
2147 return n
2147 return n
2148 finally:
2148 finally:
2149 if tr:
2149 if tr:
2150 tr.release()
2150 tr.release()
2151 lock.release()
2151 lock.release()
2152
2152
2153 @unfilteredmethod
2153 @unfilteredmethod
2154 def destroying(self):
2154 def destroying(self):
2155 '''Inform the repository that nodes are about to be destroyed.
2155 '''Inform the repository that nodes are about to be destroyed.
2156 Intended for use by strip and rollback, so there's a common
2156 Intended for use by strip and rollback, so there's a common
2157 place for anything that has to be done before destroying history.
2157 place for anything that has to be done before destroying history.
2158
2158
2159 This is mostly useful for saving state that is in memory and waiting
2159 This is mostly useful for saving state that is in memory and waiting
2160 to be flushed when the current lock is released. Because a call to
2160 to be flushed when the current lock is released. Because a call to
2161 destroyed is imminent, the repo will be invalidated causing those
2161 destroyed is imminent, the repo will be invalidated causing those
2162 changes to stay in memory (waiting for the next unlock), or vanish
2162 changes to stay in memory (waiting for the next unlock), or vanish
2163 completely.
2163 completely.
2164 '''
2164 '''
2165 # When using the same lock to commit and strip, the phasecache is left
2165 # When using the same lock to commit and strip, the phasecache is left
2166 # dirty after committing. Then when we strip, the repo is invalidated,
2166 # dirty after committing. Then when we strip, the repo is invalidated,
2167 # causing those changes to disappear.
2167 # causing those changes to disappear.
2168 if '_phasecache' in vars(self):
2168 if '_phasecache' in vars(self):
2169 self._phasecache.write()
2169 self._phasecache.write()
2170
2170
2171 @unfilteredmethod
2171 @unfilteredmethod
2172 def destroyed(self):
2172 def destroyed(self):
2173 '''Inform the repository that nodes have been destroyed.
2173 '''Inform the repository that nodes have been destroyed.
2174 Intended for use by strip and rollback, so there's a common
2174 Intended for use by strip and rollback, so there's a common
2175 place for anything that has to be done after destroying history.
2175 place for anything that has to be done after destroying history.
2176 '''
2176 '''
2177 # When one tries to:
2177 # When one tries to:
2178 # 1) destroy nodes thus calling this method (e.g. strip)
2178 # 1) destroy nodes thus calling this method (e.g. strip)
2179 # 2) use phasecache somewhere (e.g. commit)
2179 # 2) use phasecache somewhere (e.g. commit)
2180 #
2180 #
2181 # then 2) will fail because the phasecache contains nodes that were
2181 # then 2) will fail because the phasecache contains nodes that were
2182 # removed. We can either remove phasecache from the filecache,
2182 # removed. We can either remove phasecache from the filecache,
2183 # causing it to reload next time it is accessed, or simply filter
2183 # causing it to reload next time it is accessed, or simply filter
2184 # the removed nodes now and write the updated cache.
2184 # the removed nodes now and write the updated cache.
2185 self._phasecache.filterunknown(self)
2185 self._phasecache.filterunknown(self)
2186 self._phasecache.write()
2186 self._phasecache.write()
2187
2187
2188 # refresh all repository caches
2188 # refresh all repository caches
2189 self.updatecaches()
2189 self.updatecaches()
2190
2190
2191 # Ensure the persistent tag cache is updated. Doing it now
2191 # Ensure the persistent tag cache is updated. Doing it now
2192 # means that the tag cache only has to worry about destroyed
2192 # means that the tag cache only has to worry about destroyed
2193 # heads immediately after a strip/rollback. That in turn
2193 # heads immediately after a strip/rollback. That in turn
2194 # guarantees that "cachetip == currenttip" (comparing both rev
2194 # guarantees that "cachetip == currenttip" (comparing both rev
2195 # and node) always means no nodes have been added or destroyed.
2195 # and node) always means no nodes have been added or destroyed.
2196
2196
2197 # XXX this is suboptimal when qrefresh'ing: we strip the current
2197 # XXX this is suboptimal when qrefresh'ing: we strip the current
2198 # head, refresh the tag cache, then immediately add a new head.
2198 # head, refresh the tag cache, then immediately add a new head.
2199 # But I think doing it this way is necessary for the "instant
2199 # But I think doing it this way is necessary for the "instant
2200 # tag cache retrieval" case to work.
2200 # tag cache retrieval" case to work.
2201 self.invalidate()
2201 self.invalidate()
2202
2202
2203 def status(self, node1='.', node2=None, match=None,
2203 def status(self, node1='.', node2=None, match=None,
2204 ignored=False, clean=False, unknown=False,
2204 ignored=False, clean=False, unknown=False,
2205 listsubrepos=False):
2205 listsubrepos=False):
2206 '''a convenience method that calls node1.status(node2)'''
2206 '''a convenience method that calls node1.status(node2)'''
2207 return self[node1].status(node2, match, ignored, clean, unknown,
2207 return self[node1].status(node2, match, ignored, clean, unknown,
2208 listsubrepos)
2208 listsubrepos)
2209
2209
2210 def addpostdsstatus(self, ps):
2210 def addpostdsstatus(self, ps):
2211 """Add a callback to run within the wlock, at the point at which status
2211 """Add a callback to run within the wlock, at the point at which status
2212 fixups happen.
2212 fixups happen.
2213
2213
2214 On status completion, callback(wctx, status) will be called with the
2214 On status completion, callback(wctx, status) will be called with the
2215 wlock held, unless the dirstate has changed from underneath or the wlock
2215 wlock held, unless the dirstate has changed from underneath or the wlock
2216 couldn't be grabbed.
2216 couldn't be grabbed.
2217
2217
2218 Callbacks should not capture and use a cached copy of the dirstate --
2218 Callbacks should not capture and use a cached copy of the dirstate --
2219 it might change in the meanwhile. Instead, they should access the
2219 it might change in the meanwhile. Instead, they should access the
2220 dirstate via wctx.repo().dirstate.
2220 dirstate via wctx.repo().dirstate.
2221
2221
2222 This list is emptied out after each status run -- extensions should
2222 This list is emptied out after each status run -- extensions should
2223 make sure it adds to this list each time dirstate.status is called.
2223 make sure it adds to this list each time dirstate.status is called.
2224 Extensions should also make sure they don't call this for statuses
2224 Extensions should also make sure they don't call this for statuses
2225 that don't involve the dirstate.
2225 that don't involve the dirstate.
2226 """
2226 """
2227
2227
2228 # The list is located here for uniqueness reasons -- it is actually
2228 # The list is located here for uniqueness reasons -- it is actually
2229 # managed by the workingctx, but that isn't unique per-repo.
2229 # managed by the workingctx, but that isn't unique per-repo.
2230 self._postdsstatus.append(ps)
2230 self._postdsstatus.append(ps)
2231
2231
2232 def postdsstatus(self):
2232 def postdsstatus(self):
2233 """Used by workingctx to get the list of post-dirstate-status hooks."""
2233 """Used by workingctx to get the list of post-dirstate-status hooks."""
2234 return self._postdsstatus
2234 return self._postdsstatus
2235
2235
2236 def clearpostdsstatus(self):
2236 def clearpostdsstatus(self):
2237 """Used by workingctx to clear post-dirstate-status hooks."""
2237 """Used by workingctx to clear post-dirstate-status hooks."""
2238 del self._postdsstatus[:]
2238 del self._postdsstatus[:]
2239
2239
2240 def heads(self, start=None):
2240 def heads(self, start=None):
2241 if start is None:
2241 if start is None:
2242 cl = self.changelog
2242 cl = self.changelog
2243 headrevs = reversed(cl.headrevs())
2243 headrevs = reversed(cl.headrevs())
2244 return [cl.node(rev) for rev in headrevs]
2244 return [cl.node(rev) for rev in headrevs]
2245
2245
2246 heads = self.changelog.heads(start)
2246 heads = self.changelog.heads(start)
2247 # sort the output in rev descending order
2247 # sort the output in rev descending order
2248 return sorted(heads, key=self.changelog.rev, reverse=True)
2248 return sorted(heads, key=self.changelog.rev, reverse=True)
2249
2249
2250 def branchheads(self, branch=None, start=None, closed=False):
2250 def branchheads(self, branch=None, start=None, closed=False):
2251 '''return a (possibly filtered) list of heads for the given branch
2251 '''return a (possibly filtered) list of heads for the given branch
2252
2252
2253 Heads are returned in topological order, from newest to oldest.
2253 Heads are returned in topological order, from newest to oldest.
2254 If branch is None, use the dirstate branch.
2254 If branch is None, use the dirstate branch.
2255 If start is not None, return only heads reachable from start.
2255 If start is not None, return only heads reachable from start.
2256 If closed is True, return heads that are marked as closed as well.
2256 If closed is True, return heads that are marked as closed as well.
2257 '''
2257 '''
2258 if branch is None:
2258 if branch is None:
2259 branch = self[None].branch()
2259 branch = self[None].branch()
2260 branches = self.branchmap()
2260 branches = self.branchmap()
2261 if branch not in branches:
2261 if branch not in branches:
2262 return []
2262 return []
2263 # the cache returns heads ordered lowest to highest
2263 # the cache returns heads ordered lowest to highest
2264 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2264 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2265 if start is not None:
2265 if start is not None:
2266 # filter out the heads that cannot be reached from startrev
2266 # filter out the heads that cannot be reached from startrev
2267 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2267 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2268 bheads = [h for h in bheads if h in fbheads]
2268 bheads = [h for h in bheads if h in fbheads]
2269 return bheads
2269 return bheads
2270
2270
2271 def branches(self, nodes):
2271 def branches(self, nodes):
2272 if not nodes:
2272 if not nodes:
2273 nodes = [self.changelog.tip()]
2273 nodes = [self.changelog.tip()]
2274 b = []
2274 b = []
2275 for n in nodes:
2275 for n in nodes:
2276 t = n
2276 t = n
2277 while True:
2277 while True:
2278 p = self.changelog.parents(n)
2278 p = self.changelog.parents(n)
2279 if p[1] != nullid or p[0] == nullid:
2279 if p[1] != nullid or p[0] == nullid:
2280 b.append((t, n, p[0], p[1]))
2280 b.append((t, n, p[0], p[1]))
2281 break
2281 break
2282 n = p[0]
2282 n = p[0]
2283 return b
2283 return b
2284
2284
2285 def between(self, pairs):
2285 def between(self, pairs):
2286 r = []
2286 r = []
2287
2287
2288 for top, bottom in pairs:
2288 for top, bottom in pairs:
2289 n, l, i = top, [], 0
2289 n, l, i = top, [], 0
2290 f = 1
2290 f = 1
2291
2291
2292 while n != bottom and n != nullid:
2292 while n != bottom and n != nullid:
2293 p = self.changelog.parents(n)[0]
2293 p = self.changelog.parents(n)[0]
2294 if i == f:
2294 if i == f:
2295 l.append(n)
2295 l.append(n)
2296 f = f * 2
2296 f = f * 2
2297 n = p
2297 n = p
2298 i += 1
2298 i += 1
2299
2299
2300 r.append(l)
2300 r.append(l)
2301
2301
2302 return r
2302 return r
2303
2303
2304 def checkpush(self, pushop):
2304 def checkpush(self, pushop):
2305 """Extensions can override this function if additional checks have
2305 """Extensions can override this function if additional checks have
2306 to be performed before pushing, or call it if they override push
2306 to be performed before pushing, or call it if they override push
2307 command.
2307 command.
2308 """
2308 """
2309
2309
2310 @unfilteredpropertycache
2310 @unfilteredpropertycache
2311 def prepushoutgoinghooks(self):
2311 def prepushoutgoinghooks(self):
2312 """Return util.hooks consists of a pushop with repo, remote, outgoing
2312 """Return util.hooks consists of a pushop with repo, remote, outgoing
2313 methods, which are called before pushing changesets.
2313 methods, which are called before pushing changesets.
2314 """
2314 """
2315 return util.hooks()
2315 return util.hooks()
2316
2316
2317 def pushkey(self, namespace, key, old, new):
2317 def pushkey(self, namespace, key, old, new):
2318 try:
2318 try:
2319 tr = self.currenttransaction()
2319 tr = self.currenttransaction()
2320 hookargs = {}
2320 hookargs = {}
2321 if tr is not None:
2321 if tr is not None:
2322 hookargs.update(tr.hookargs)
2322 hookargs.update(tr.hookargs)
2323 hookargs = pycompat.strkwargs(hookargs)
2323 hookargs = pycompat.strkwargs(hookargs)
2324 hookargs[r'namespace'] = namespace
2324 hookargs[r'namespace'] = namespace
2325 hookargs[r'key'] = key
2325 hookargs[r'key'] = key
2326 hookargs[r'old'] = old
2326 hookargs[r'old'] = old
2327 hookargs[r'new'] = new
2327 hookargs[r'new'] = new
2328 self.hook('prepushkey', throw=True, **hookargs)
2328 self.hook('prepushkey', throw=True, **hookargs)
2329 except error.HookAbort as exc:
2329 except error.HookAbort as exc:
2330 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2330 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2331 if exc.hint:
2331 if exc.hint:
2332 self.ui.write_err(_("(%s)\n") % exc.hint)
2332 self.ui.write_err(_("(%s)\n") % exc.hint)
2333 return False
2333 return False
2334 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2334 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2335 ret = pushkey.push(self, namespace, key, old, new)
2335 ret = pushkey.push(self, namespace, key, old, new)
2336 def runhook():
2336 def runhook():
2337 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2337 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2338 ret=ret)
2338 ret=ret)
2339 self._afterlock(runhook)
2339 self._afterlock(runhook)
2340 return ret
2340 return ret
2341
2341
2342 def listkeys(self, namespace):
2342 def listkeys(self, namespace):
2343 self.hook('prelistkeys', throw=True, namespace=namespace)
2343 self.hook('prelistkeys', throw=True, namespace=namespace)
2344 self.ui.debug('listing keys for "%s"\n' % namespace)
2344 self.ui.debug('listing keys for "%s"\n' % namespace)
2345 values = pushkey.list(self, namespace)
2345 values = pushkey.list(self, namespace)
2346 self.hook('listkeys', namespace=namespace, values=values)
2346 self.hook('listkeys', namespace=namespace, values=values)
2347 return values
2347 return values
2348
2348
2349 def debugwireargs(self, one, two, three=None, four=None, five=None):
2349 def debugwireargs(self, one, two, three=None, four=None, five=None):
2350 '''used to test argument passing over the wire'''
2350 '''used to test argument passing over the wire'''
2351 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2351 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2352 pycompat.bytestr(four),
2352 pycompat.bytestr(four),
2353 pycompat.bytestr(five))
2353 pycompat.bytestr(five))
2354
2354
2355 def savecommitmessage(self, text):
2355 def savecommitmessage(self, text):
2356 fp = self.vfs('last-message.txt', 'wb')
2356 fp = self.vfs('last-message.txt', 'wb')
2357 try:
2357 try:
2358 fp.write(text)
2358 fp.write(text)
2359 finally:
2359 finally:
2360 fp.close()
2360 fp.close()
2361 return self.pathto(fp.name[len(self.root) + 1:])
2361 return self.pathto(fp.name[len(self.root) + 1:])
2362
2362
2363 # used to avoid circular references so destructors work
2363 # used to avoid circular references so destructors work
2364 def aftertrans(files):
2364 def aftertrans(files):
2365 renamefiles = [tuple(t) for t in files]
2365 renamefiles = [tuple(t) for t in files]
2366 def a():
2366 def a():
2367 for vfs, src, dest in renamefiles:
2367 for vfs, src, dest in renamefiles:
2368 # if src and dest refer to a same file, vfs.rename is a no-op,
2368 # if src and dest refer to a same file, vfs.rename is a no-op,
2369 # leaving both src and dest on disk. delete dest to make sure
2369 # leaving both src and dest on disk. delete dest to make sure
2370 # the rename couldn't be such a no-op.
2370 # the rename couldn't be such a no-op.
2371 vfs.tryunlink(dest)
2371 vfs.tryunlink(dest)
2372 try:
2372 try:
2373 vfs.rename(src, dest)
2373 vfs.rename(src, dest)
2374 except OSError: # journal file does not yet exist
2374 except OSError: # journal file does not yet exist
2375 pass
2375 pass
2376 return a
2376 return a
2377
2377
2378 def undoname(fn):
2378 def undoname(fn):
2379 base, name = os.path.split(fn)
2379 base, name = os.path.split(fn)
2380 assert name.startswith('journal')
2380 assert name.startswith('journal')
2381 return os.path.join(base, name.replace('journal', 'undo', 1))
2381 return os.path.join(base, name.replace('journal', 'undo', 1))
2382
2382
2383 def instance(ui, path, create, intents=None):
2383 def instance(ui, path, create, intents=None):
2384 return localrepository(ui, util.urllocalpath(path), create,
2384 return localrepository(ui, util.urllocalpath(path), create,
2385 intents=intents)
2385 intents=intents)
2386
2386
2387 def islocal(path):
2387 def islocal(path):
2388 return True
2388 return True
2389
2389
2390 def newreporequirements(repo):
2390 def newreporequirements(repo):
2391 """Determine the set of requirements for a new local repository.
2391 """Determine the set of requirements for a new local repository.
2392
2392
2393 Extensions can wrap this function to specify custom requirements for
2393 Extensions can wrap this function to specify custom requirements for
2394 new repositories.
2394 new repositories.
2395 """
2395 """
2396 ui = repo.ui
2396 ui = repo.ui
2397 requirements = {'revlogv1'}
2397 requirements = {'revlogv1'}
2398 if ui.configbool('format', 'usestore'):
2398 if ui.configbool('format', 'usestore'):
2399 requirements.add('store')
2399 requirements.add('store')
2400 if ui.configbool('format', 'usefncache'):
2400 if ui.configbool('format', 'usefncache'):
2401 requirements.add('fncache')
2401 requirements.add('fncache')
2402 if ui.configbool('format', 'dotencode'):
2402 if ui.configbool('format', 'dotencode'):
2403 requirements.add('dotencode')
2403 requirements.add('dotencode')
2404
2404
2405 compengine = ui.config('experimental', 'format.compression')
2405 compengine = ui.config('experimental', 'format.compression')
2406 if compengine not in util.compengines:
2406 if compengine not in util.compengines:
2407 raise error.Abort(_('compression engine %s defined by '
2407 raise error.Abort(_('compression engine %s defined by '
2408 'experimental.format.compression not available') %
2408 'experimental.format.compression not available') %
2409 compengine,
2409 compengine,
2410 hint=_('run "hg debuginstall" to list available '
2410 hint=_('run "hg debuginstall" to list available '
2411 'compression engines'))
2411 'compression engines'))
2412
2412
2413 # zlib is the historical default and doesn't need an explicit requirement.
2413 # zlib is the historical default and doesn't need an explicit requirement.
2414 if compengine != 'zlib':
2414 if compengine != 'zlib':
2415 requirements.add('exp-compression-%s' % compengine)
2415 requirements.add('exp-compression-%s' % compengine)
2416
2416
2417 if scmutil.gdinitconfig(ui):
2417 if scmutil.gdinitconfig(ui):
2418 requirements.add('generaldelta')
2418 requirements.add('generaldelta')
2419 if ui.configbool('experimental', 'treemanifest'):
2419 if ui.configbool('experimental', 'treemanifest'):
2420 requirements.add('treemanifest')
2420 requirements.add('treemanifest')
2421 # experimental config: format.sparse-revlog
2421 # experimental config: format.sparse-revlog
2422 if ui.configbool('format', 'sparse-revlog'):
2422 if ui.configbool('format', 'sparse-revlog'):
2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2424
2424
2425 revlogv2 = ui.config('experimental', 'revlogv2')
2425 revlogv2 = ui.config('experimental', 'revlogv2')
2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2427 requirements.remove('revlogv1')
2427 requirements.remove('revlogv1')
2428 # generaldelta is implied by revlogv2.
2428 # generaldelta is implied by revlogv2.
2429 requirements.discard('generaldelta')
2429 requirements.discard('generaldelta')
2430 requirements.add(REVLOGV2_REQUIREMENT)
2430 requirements.add(REVLOGV2_REQUIREMENT)
2431 # experimental config: format.internal-phase
2431 # experimental config: format.internal-phase
2432 if repo.ui.configbool('format', 'internal-phase'):
2432 if repo.ui.configbool('format', 'internal-phase'):
2433 requirements.add('internal-phase')
2433 requirements.add('internal-phase')
2434
2434
2435 return requirements
2435 return requirements
@@ -1,982 +1,982 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 node as nodemod,
16 node as nodemod,
17 phases,
17 phases,
18 util,
18 util,
19 )
19 )
20 from .utils import (
20 from .utils import (
21 dateutil,
21 dateutil,
22 )
22 )
23
23
24 ### obsolescence marker flag
24 ### obsolescence marker flag
25
25
26 ## bumpedfix flag
26 ## bumpedfix flag
27 #
27 #
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
29 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
30 #
30 #
31 # o A' (bumped)
31 # o A' (bumped)
32 # |`:
32 # |`:
33 # | o A
33 # | o A
34 # |/
34 # |/
35 # o Z
35 # o Z
36 #
36 #
37 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
40 #
40 #
41 # o Ad
41 # o Ad
42 # |`:
42 # |`:
43 # | x A'
43 # | x A'
44 # |'|
44 # |'|
45 # o | A
45 # o | A
46 # |/
46 # |/
47 # o Z
47 # o Z
48 #
48 #
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
51 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
52 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
53 # "bumped" here.
53 # "bumped" here.
54 bumpedfix = 1
54 bumpedfix = 1
55 usingsha256 = 2
55 usingsha256 = 2
56
56
57 class marker(object):
57 class marker(object):
58 """Wrap obsolete marker raw data"""
58 """Wrap obsolete marker raw data"""
59
59
60 def __init__(self, repo, data):
60 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
61 # the repo argument will be used to create changectx in later version
62 self._repo = repo
62 self._repo = repo
63 self._data = data
63 self._data = data
64 self._decodedmeta = None
64 self._decodedmeta = None
65
65
66 def __hash__(self):
66 def __hash__(self):
67 return hash(self._data)
67 return hash(self._data)
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 if type(other) != type(self):
70 if type(other) != type(self):
71 return False
71 return False
72 return self._data == other._data
72 return self._data == other._data
73
73
74 def prednode(self):
74 def prednode(self):
75 """Predecessor changeset node identifier"""
75 """Predecessor changeset node identifier"""
76 return self._data[0]
76 return self._data[0]
77
77
78 def succnodes(self):
78 def succnodes(self):
79 """List of successor changesets node identifiers"""
79 """List of successor changesets node identifiers"""
80 return self._data[1]
80 return self._data[1]
81
81
82 def parentnodes(self):
82 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
83 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
84 return self._data[5]
85
85
86 def metadata(self):
86 def metadata(self):
87 """Decoded metadata dictionary"""
87 """Decoded metadata dictionary"""
88 return dict(self._data[3])
88 return dict(self._data[3])
89
89
90 def date(self):
90 def date(self):
91 """Creation date as (unixtime, offset)"""
91 """Creation date as (unixtime, offset)"""
92 return self._data[4]
92 return self._data[4]
93
93
94 def flags(self):
94 def flags(self):
95 """The flags field of the marker"""
95 """The flags field of the marker"""
96 return self._data[2]
96 return self._data[2]
97
97
98 def getmarkers(repo, nodes=None, exclusive=False):
98 def getmarkers(repo, nodes=None, exclusive=False):
99 """returns markers known in a repository
99 """returns markers known in a repository
100
100
101 If <nodes> is specified, only markers "relevant" to those nodes are are
101 If <nodes> is specified, only markers "relevant" to those nodes are are
102 returned"""
102 returned"""
103 if nodes is None:
103 if nodes is None:
104 rawmarkers = repo.obsstore
104 rawmarkers = repo.obsstore
105 elif exclusive:
105 elif exclusive:
106 rawmarkers = exclusivemarkers(repo, nodes)
106 rawmarkers = exclusivemarkers(repo, nodes)
107 else:
107 else:
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
108 rawmarkers = repo.obsstore.relevantmarkers(nodes)
109
109
110 for markerdata in rawmarkers:
110 for markerdata in rawmarkers:
111 yield marker(repo, markerdata)
111 yield marker(repo, markerdata)
112
112
113 def closestpredecessors(repo, nodeid):
113 def closestpredecessors(repo, nodeid):
114 """yield the list of next predecessors pointing on visible changectx nodes
114 """yield the list of next predecessors pointing on visible changectx nodes
115
115
116 This function respect the repoview filtering, filtered revision will be
116 This function respect the repoview filtering, filtered revision will be
117 considered missing.
117 considered missing.
118 """
118 """
119
119
120 precursors = repo.obsstore.predecessors
120 precursors = repo.obsstore.predecessors
121 stack = [nodeid]
121 stack = [nodeid]
122 seen = set(stack)
122 seen = set(stack)
123
123
124 while stack:
124 while stack:
125 current = stack.pop()
125 current = stack.pop()
126 currentpreccs = precursors.get(current, ())
126 currentpreccs = precursors.get(current, ())
127
127
128 for prec in currentpreccs:
128 for prec in currentpreccs:
129 precnodeid = prec[0]
129 precnodeid = prec[0]
130
130
131 # Basic cycle protection
131 # Basic cycle protection
132 if precnodeid in seen:
132 if precnodeid in seen:
133 continue
133 continue
134 seen.add(precnodeid)
134 seen.add(precnodeid)
135
135
136 if precnodeid in repo:
136 if precnodeid in repo:
137 yield precnodeid
137 yield precnodeid
138 else:
138 else:
139 stack.append(precnodeid)
139 stack.append(precnodeid)
140
140
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
141 def allpredecessors(obsstore, nodes, ignoreflags=0):
142 """Yield node for every precursors of <nodes>.
142 """Yield node for every precursors of <nodes>.
143
143
144 Some precursors may be unknown locally.
144 Some precursors may be unknown locally.
145
145
146 This is a linear yield unsuited to detecting folded changesets. It includes
146 This is a linear yield unsuited to detecting folded changesets. It includes
147 initial nodes too."""
147 initial nodes too."""
148
148
149 remaining = set(nodes)
149 remaining = set(nodes)
150 seen = set(remaining)
150 seen = set(remaining)
151 while remaining:
151 while remaining:
152 current = remaining.pop()
152 current = remaining.pop()
153 yield current
153 yield current
154 for mark in obsstore.predecessors.get(current, ()):
154 for mark in obsstore.predecessors.get(current, ()):
155 # ignore marker flagged with specified flag
155 # ignore marker flagged with specified flag
156 if mark[2] & ignoreflags:
156 if mark[2] & ignoreflags:
157 continue
157 continue
158 suc = mark[0]
158 suc = mark[0]
159 if suc not in seen:
159 if suc not in seen:
160 seen.add(suc)
160 seen.add(suc)
161 remaining.add(suc)
161 remaining.add(suc)
162
162
163 def allsuccessors(obsstore, nodes, ignoreflags=0):
163 def allsuccessors(obsstore, nodes, ignoreflags=0):
164 """Yield node for every successor of <nodes>.
164 """Yield node for every successor of <nodes>.
165
165
166 Some successors may be unknown locally.
166 Some successors may be unknown locally.
167
167
168 This is a linear yield unsuited to detecting split changesets. It includes
168 This is a linear yield unsuited to detecting split changesets. It includes
169 initial nodes too."""
169 initial nodes too."""
170 remaining = set(nodes)
170 remaining = set(nodes)
171 seen = set(remaining)
171 seen = set(remaining)
172 while remaining:
172 while remaining:
173 current = remaining.pop()
173 current = remaining.pop()
174 yield current
174 yield current
175 for mark in obsstore.successors.get(current, ()):
175 for mark in obsstore.successors.get(current, ()):
176 # ignore marker flagged with specified flag
176 # ignore marker flagged with specified flag
177 if mark[2] & ignoreflags:
177 if mark[2] & ignoreflags:
178 continue
178 continue
179 for suc in mark[1]:
179 for suc in mark[1]:
180 if suc not in seen:
180 if suc not in seen:
181 seen.add(suc)
181 seen.add(suc)
182 remaining.add(suc)
182 remaining.add(suc)
183
183
184 def _filterprunes(markers):
184 def _filterprunes(markers):
185 """return a set with no prune markers"""
185 """return a set with no prune markers"""
186 return set(m for m in markers if m[1])
186 return set(m for m in markers if m[1])
187
187
188 def exclusivemarkers(repo, nodes):
188 def exclusivemarkers(repo, nodes):
189 """set of markers relevant to "nodes" but no other locally-known nodes
189 """set of markers relevant to "nodes" but no other locally-known nodes
190
190
191 This function compute the set of markers "exclusive" to a locally-known
191 This function compute the set of markers "exclusive" to a locally-known
192 node. This means we walk the markers starting from <nodes> until we reach a
192 node. This means we walk the markers starting from <nodes> until we reach a
193 locally-known precursors outside of <nodes>. Element of <nodes> with
193 locally-known precursors outside of <nodes>. Element of <nodes> with
194 locally-known successors outside of <nodes> are ignored (since their
194 locally-known successors outside of <nodes> are ignored (since their
195 precursors markers are also relevant to these successors).
195 precursors markers are also relevant to these successors).
196
196
197 For example:
197 For example:
198
198
199 # (A0 rewritten as A1)
199 # (A0 rewritten as A1)
200 #
200 #
201 # A0 <-1- A1 # Marker "1" is exclusive to A1
201 # A0 <-1- A1 # Marker "1" is exclusive to A1
202
202
203 or
203 or
204
204
205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
205 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
206 #
206 #
207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
207 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
208
208
209 or
209 or
210
210
211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
211 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
212 #
212 #
213 # <-2- A1 # Marker "2" is exclusive to A0,A1
213 # <-2- A1 # Marker "2" is exclusive to A0,A1
214 # /
214 # /
215 # <-1- A0
215 # <-1- A0
216 # \
216 # \
217 # <-3- A2 # Marker "3" is exclusive to A0,A2
217 # <-3- A2 # Marker "3" is exclusive to A0,A2
218 #
218 #
219 # in addition:
219 # in addition:
220 #
220 #
221 # Markers "2,3" are exclusive to A1,A2
221 # Markers "2,3" are exclusive to A1,A2
222 # Markers "1,2,3" are exclusive to A0,A1,A2
222 # Markers "1,2,3" are exclusive to A0,A1,A2
223
223
224 See test/test-obsolete-bundle-strip.t for more examples.
224 See test/test-obsolete-bundle-strip.t for more examples.
225
225
226 An example usage is strip. When stripping a changeset, we also want to
226 An example usage is strip. When stripping a changeset, we also want to
227 strip the markers exclusive to this changeset. Otherwise we would have
227 strip the markers exclusive to this changeset. Otherwise we would have
228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
228 "dangling"" obsolescence markers from its precursors: Obsolescence markers
229 marking a node as obsolete without any successors available locally.
229 marking a node as obsolete without any successors available locally.
230
230
231 As for relevant markers, the prune markers for children will be followed.
231 As for relevant markers, the prune markers for children will be followed.
232 Of course, they will only be followed if the pruned children is
232 Of course, they will only be followed if the pruned children is
233 locally-known. Since the prune markers are relevant to the pruned node.
233 locally-known. Since the prune markers are relevant to the pruned node.
234 However, while prune markers are considered relevant to the parent of the
234 However, while prune markers are considered relevant to the parent of the
235 pruned changesets, prune markers for locally-known changeset (with no
235 pruned changesets, prune markers for locally-known changeset (with no
236 successors) are considered exclusive to the pruned nodes. This allows
236 successors) are considered exclusive to the pruned nodes. This allows
237 to strip the prune markers (with the rest of the exclusive chain) alongside
237 to strip the prune markers (with the rest of the exclusive chain) alongside
238 the pruned changesets.
238 the pruned changesets.
239 """
239 """
240 # running on a filtered repository would be dangerous as markers could be
240 # running on a filtered repository would be dangerous as markers could be
241 # reported as exclusive when they are relevant for other filtered nodes.
241 # reported as exclusive when they are relevant for other filtered nodes.
242 unfi = repo.unfiltered()
242 unfi = repo.unfiltered()
243
243
244 # shortcut to various useful item
244 # shortcut to various useful item
245 nm = unfi.changelog.nodemap
245 nm = unfi.changelog.nodemap
246 precursorsmarkers = unfi.obsstore.predecessors
246 precursorsmarkers = unfi.obsstore.predecessors
247 successormarkers = unfi.obsstore.successors
247 successormarkers = unfi.obsstore.successors
248 childrenmarkers = unfi.obsstore.children
248 childrenmarkers = unfi.obsstore.children
249
249
250 # exclusive markers (return of the function)
250 # exclusive markers (return of the function)
251 exclmarkers = set()
251 exclmarkers = set()
252 # we need fast membership testing
252 # we need fast membership testing
253 nodes = set(nodes)
253 nodes = set(nodes)
254 # looking for head in the obshistory
254 # looking for head in the obshistory
255 #
255 #
256 # XXX we are ignoring all issues in regard with cycle for now.
256 # XXX we are ignoring all issues in regard with cycle for now.
257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
257 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
258 stack.sort()
258 stack.sort()
259 # nodes already stacked
259 # nodes already stacked
260 seennodes = set(stack)
260 seennodes = set(stack)
261 while stack:
261 while stack:
262 current = stack.pop()
262 current = stack.pop()
263 # fetch precursors markers
263 # fetch precursors markers
264 markers = list(precursorsmarkers.get(current, ()))
264 markers = list(precursorsmarkers.get(current, ()))
265 # extend the list with prune markers
265 # extend the list with prune markers
266 for mark in successormarkers.get(current, ()):
266 for mark in successormarkers.get(current, ()):
267 if not mark[1]:
267 if not mark[1]:
268 markers.append(mark)
268 markers.append(mark)
269 # and markers from children (looking for prune)
269 # and markers from children (looking for prune)
270 for mark in childrenmarkers.get(current, ()):
270 for mark in childrenmarkers.get(current, ()):
271 if not mark[1]:
271 if not mark[1]:
272 markers.append(mark)
272 markers.append(mark)
273 # traverse the markers
273 # traverse the markers
274 for mark in markers:
274 for mark in markers:
275 if mark in exclmarkers:
275 if mark in exclmarkers:
276 # markers already selected
276 # markers already selected
277 continue
277 continue
278
278
279 # If the markers is about the current node, select it
279 # If the markers is about the current node, select it
280 #
280 #
281 # (this delay the addition of markers from children)
281 # (this delay the addition of markers from children)
282 if mark[1] or mark[0] == current:
282 if mark[1] or mark[0] == current:
283 exclmarkers.add(mark)
283 exclmarkers.add(mark)
284
284
285 # should we keep traversing through the precursors?
285 # should we keep traversing through the precursors?
286 prec = mark[0]
286 prec = mark[0]
287
287
288 # nodes in the stack or already processed
288 # nodes in the stack or already processed
289 if prec in seennodes:
289 if prec in seennodes:
290 continue
290 continue
291
291
292 # is this a locally known node ?
292 # is this a locally known node ?
293 known = prec in nm
293 known = prec in nm
294 # if locally-known and not in the <nodes> set the traversal
294 # if locally-known and not in the <nodes> set the traversal
295 # stop here.
295 # stop here.
296 if known and prec not in nodes:
296 if known and prec not in nodes:
297 continue
297 continue
298
298
299 # do not keep going if there are unselected markers pointing to this
299 # do not keep going if there are unselected markers pointing to this
300 # nodes. If we end up traversing these unselected markers later the
300 # nodes. If we end up traversing these unselected markers later the
301 # node will be taken care of at that point.
301 # node will be taken care of at that point.
302 precmarkers = _filterprunes(successormarkers.get(prec))
302 precmarkers = _filterprunes(successormarkers.get(prec))
303 if precmarkers.issubset(exclmarkers):
303 if precmarkers.issubset(exclmarkers):
304 seennodes.add(prec)
304 seennodes.add(prec)
305 stack.append(prec)
305 stack.append(prec)
306
306
307 return exclmarkers
307 return exclmarkers
308
308
309 def foreground(repo, nodes):
309 def foreground(repo, nodes):
310 """return all nodes in the "foreground" of other node
310 """return all nodes in the "foreground" of other node
311
311
312 The foreground of a revision is anything reachable using parent -> children
312 The foreground of a revision is anything reachable using parent -> children
313 or precursor -> successor relation. It is very similar to "descendant" but
313 or precursor -> successor relation. It is very similar to "descendant" but
314 augmented with obsolescence information.
314 augmented with obsolescence information.
315
315
316 Beware that possible obsolescence cycle may result if complex situation.
316 Beware that possible obsolescence cycle may result if complex situation.
317 """
317 """
318 repo = repo.unfiltered()
318 repo = repo.unfiltered()
319 foreground = set(repo.set('%ln::', nodes))
319 foreground = set(repo.set('%ln::', nodes))
320 if repo.obsstore:
320 if repo.obsstore:
321 # We only need this complicated logic if there is obsolescence
321 # We only need this complicated logic if there is obsolescence
322 # XXX will probably deserve an optimised revset.
322 # XXX will probably deserve an optimised revset.
323 nm = repo.changelog.nodemap
323 nm = repo.changelog.nodemap
324 plen = -1
324 plen = -1
325 # compute the whole set of successors or descendants
325 # compute the whole set of successors or descendants
326 while len(foreground) != plen:
326 while len(foreground) != plen:
327 plen = len(foreground)
327 plen = len(foreground)
328 succs = set(c.node() for c in foreground)
328 succs = set(c.node() for c in foreground)
329 mutable = [c.node() for c in foreground if c.mutable()]
329 mutable = [c.node() for c in foreground if c.mutable()]
330 succs.update(allsuccessors(repo.obsstore, mutable))
330 succs.update(allsuccessors(repo.obsstore, mutable))
331 known = (n for n in succs if n in nm)
331 known = (n for n in succs if n in nm)
332 foreground = set(repo.set('%ln::', known))
332 foreground = set(repo.set('%ln::', known))
333 return set(c.node() for c in foreground)
333 return set(c.node() for c in foreground)
334
334
335 # effectflag field
335 # effectflag field
336 #
336 #
337 # Effect-flag is a 1-byte bit field used to store what changed between a
337 # Effect-flag is a 1-byte bit field used to store what changed between a
338 # changeset and its successor(s).
338 # changeset and its successor(s).
339 #
339 #
340 # The effect flag is stored in obs-markers metadata while we iterate on the
340 # The effect flag is stored in obs-markers metadata while we iterate on the
341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
341 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
342 # with an incompatible design for effect flag, we can store a new design under
342 # with an incompatible design for effect flag, we can store a new design under
343 # another field name so we don't break readers. We plan to extend the existing
343 # another field name so we don't break readers. We plan to extend the existing
344 # obsmarkers bit-field when the effect flag design will be stabilized.
344 # obsmarkers bit-field when the effect flag design will be stabilized.
345 #
345 #
346 # The effect-flag is placed behind an experimental flag
346 # The effect-flag is placed behind an experimental flag
347 # `effect-flags` set to off by default.
347 # `effect-flags` set to off by default.
348 #
348 #
349
349
350 EFFECTFLAGFIELD = "ef1"
350 EFFECTFLAGFIELD = "ef1"
351
351
352 DESCCHANGED = 1 << 0 # action changed the description
352 DESCCHANGED = 1 << 0 # action changed the description
353 METACHANGED = 1 << 1 # action change the meta
353 METACHANGED = 1 << 1 # action change the meta
354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
354 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
355 PARENTCHANGED = 1 << 2 # action change the parent
355 PARENTCHANGED = 1 << 2 # action change the parent
356 USERCHANGED = 1 << 4 # the user changed
356 USERCHANGED = 1 << 4 # the user changed
357 DATECHANGED = 1 << 5 # the date changed
357 DATECHANGED = 1 << 5 # the date changed
358 BRANCHCHANGED = 1 << 6 # the branch changed
358 BRANCHCHANGED = 1 << 6 # the branch changed
359
359
360 METABLACKLIST = [
360 METABLACKLIST = [
361 re.compile('^branch$'),
361 re.compile('^branch$'),
362 re.compile('^.*-source$'),
362 re.compile('^.*-source$'),
363 re.compile('^.*_source$'),
363 re.compile('^.*_source$'),
364 re.compile('^source$'),
364 re.compile('^source$'),
365 ]
365 ]
366
366
367 def metanotblacklisted(metaitem):
367 def metanotblacklisted(metaitem):
368 """ Check that the key of a meta item (extrakey, extravalue) does not
368 """ Check that the key of a meta item (extrakey, extravalue) does not
369 match at least one of the blacklist pattern
369 match at least one of the blacklist pattern
370 """
370 """
371 metakey = metaitem[0]
371 metakey = metaitem[0]
372
372
373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
373 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
374
374
375 def _prepare_hunk(hunk):
375 def _prepare_hunk(hunk):
376 """Drop all information but the username and patch"""
376 """Drop all information but the username and patch"""
377 cleanhunk = []
377 cleanhunk = []
378 for line in hunk.splitlines():
378 for line in hunk.splitlines():
379 if line.startswith(b'# User') or not line.startswith(b'#'):
379 if line.startswith(b'# User') or not line.startswith(b'#'):
380 if line.startswith(b'@@'):
380 if line.startswith(b'@@'):
381 line = b'@@\n'
381 line = b'@@\n'
382 cleanhunk.append(line)
382 cleanhunk.append(line)
383 return cleanhunk
383 return cleanhunk
384
384
385 def _getdifflines(iterdiff):
385 def _getdifflines(iterdiff):
386 """return a cleaned up lines"""
386 """return a cleaned up lines"""
387 lines = next(iterdiff, None)
387 lines = next(iterdiff, None)
388
388
389 if lines is None:
389 if lines is None:
390 return lines
390 return lines
391
391
392 return _prepare_hunk(lines)
392 return _prepare_hunk(lines)
393
393
394 def _cmpdiff(leftctx, rightctx):
394 def _cmpdiff(leftctx, rightctx):
395 """return True if both ctx introduce the "same diff"
395 """return True if both ctx introduce the "same diff"
396
396
397 This is a first and basic implementation, with many shortcoming.
397 This is a first and basic implementation, with many shortcoming.
398 """
398 """
399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
399 diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
400 # Leftctx or right ctx might be filtered, so we need to use the contexts
400 # Leftctx or right ctx might be filtered, so we need to use the contexts
401 # with an unfiltered repository to safely compute the diff
401 # with an unfiltered repository to safely compute the diff
402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
402 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
403 leftdiff = leftunfi.diff(opts=diffopts)
403 leftdiff = leftunfi.diff(opts=diffopts)
404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
404 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
405 rightdiff = rightunfi.diff(opts=diffopts)
405 rightdiff = rightunfi.diff(opts=diffopts)
406
406
407 left, right = (0, 0)
407 left, right = (0, 0)
408 while None not in (left, right):
408 while None not in (left, right):
409 left = _getdifflines(leftdiff)
409 left = _getdifflines(leftdiff)
410 right = _getdifflines(rightdiff)
410 right = _getdifflines(rightdiff)
411
411
412 if left != right:
412 if left != right:
413 return False
413 return False
414 return True
414 return True
415
415
416 def geteffectflag(relation):
416 def geteffectflag(relation):
417 """ From an obs-marker relation, compute what changed between the
417 """ From an obs-marker relation, compute what changed between the
418 predecessor and the successor.
418 predecessor and the successor.
419 """
419 """
420 effects = 0
420 effects = 0
421
421
422 source = relation[0]
422 source = relation[0]
423
423
424 for changectx in relation[1]:
424 for changectx in relation[1]:
425 # Check if description has changed
425 # Check if description has changed
426 if changectx.description() != source.description():
426 if changectx.description() != source.description():
427 effects |= DESCCHANGED
427 effects |= DESCCHANGED
428
428
429 # Check if user has changed
429 # Check if user has changed
430 if changectx.user() != source.user():
430 if changectx.user() != source.user():
431 effects |= USERCHANGED
431 effects |= USERCHANGED
432
432
433 # Check if date has changed
433 # Check if date has changed
434 if changectx.date() != source.date():
434 if changectx.date() != source.date():
435 effects |= DATECHANGED
435 effects |= DATECHANGED
436
436
437 # Check if branch has changed
437 # Check if branch has changed
438 if changectx.branch() != source.branch():
438 if changectx.branch() != source.branch():
439 effects |= BRANCHCHANGED
439 effects |= BRANCHCHANGED
440
440
441 # Check if at least one of the parent has changed
441 # Check if at least one of the parent has changed
442 if changectx.parents() != source.parents():
442 if changectx.parents() != source.parents():
443 effects |= PARENTCHANGED
443 effects |= PARENTCHANGED
444
444
445 # Check if other meta has changed
445 # Check if other meta has changed
446 changeextra = changectx.extra().items()
446 changeextra = changectx.extra().items()
447 ctxmeta = list(filter(metanotblacklisted, changeextra))
447 ctxmeta = list(filter(metanotblacklisted, changeextra))
448
448
449 sourceextra = source.extra().items()
449 sourceextra = source.extra().items()
450 srcmeta = list(filter(metanotblacklisted, sourceextra))
450 srcmeta = list(filter(metanotblacklisted, sourceextra))
451
451
452 if ctxmeta != srcmeta:
452 if ctxmeta != srcmeta:
453 effects |= METACHANGED
453 effects |= METACHANGED
454
454
455 # Check if the diff has changed
455 # Check if the diff has changed
456 if not _cmpdiff(source, changectx):
456 if not _cmpdiff(source, changectx):
457 effects |= DIFFCHANGED
457 effects |= DIFFCHANGED
458
458
459 return effects
459 return effects
460
460
461 def getobsoleted(repo, tr):
461 def getobsoleted(repo, tr):
462 """return the set of pre-existing revisions obsoleted by a transaction"""
462 """return the set of pre-existing revisions obsoleted by a transaction"""
463 torev = repo.unfiltered().changelog.nodemap.get
463 torev = repo.unfiltered().changelog.nodemap.get
464 phase = repo._phasecache.phase
464 phase = repo._phasecache.phase
465 succsmarkers = repo.obsstore.successors.get
465 succsmarkers = repo.obsstore.successors.get
466 public = phases.public
466 public = phases.public
467 addedmarkers = tr.changes.get('obsmarkers')
467 addedmarkers = tr.changes.get('obsmarkers')
468 addedrevs = tr.changes['revs']
468 origrepolen = tr.changes['origrepolen']
469 seenrevs = set()
469 seenrevs = set()
470 obsoleted = set()
470 obsoleted = set()
471 for mark in addedmarkers:
471 for mark in addedmarkers:
472 node = mark[0]
472 node = mark[0]
473 rev = torev(node)
473 rev = torev(node)
474 if rev is None or rev in seenrevs or rev in addedrevs:
474 if rev is None or rev in seenrevs or rev >= origrepolen:
475 continue
475 continue
476 seenrevs.add(rev)
476 seenrevs.add(rev)
477 if phase(repo, rev) == public:
477 if phase(repo, rev) == public:
478 continue
478 continue
479 if set(succsmarkers(node) or []).issubset(addedmarkers):
479 if set(succsmarkers(node) or []).issubset(addedmarkers):
480 obsoleted.add(rev)
480 obsoleted.add(rev)
481 return obsoleted
481 return obsoleted
482
482
483 class _succs(list):
483 class _succs(list):
484 """small class to represent a successors with some metadata about it"""
484 """small class to represent a successors with some metadata about it"""
485
485
486 def __init__(self, *args, **kwargs):
486 def __init__(self, *args, **kwargs):
487 super(_succs, self).__init__(*args, **kwargs)
487 super(_succs, self).__init__(*args, **kwargs)
488 self.markers = set()
488 self.markers = set()
489
489
490 def copy(self):
490 def copy(self):
491 new = _succs(self)
491 new = _succs(self)
492 new.markers = self.markers.copy()
492 new.markers = self.markers.copy()
493 return new
493 return new
494
494
495 @util.propertycache
495 @util.propertycache
496 def _set(self):
496 def _set(self):
497 # immutable
497 # immutable
498 return set(self)
498 return set(self)
499
499
500 def canmerge(self, other):
500 def canmerge(self, other):
501 return self._set.issubset(other._set)
501 return self._set.issubset(other._set)
502
502
503 def successorssets(repo, initialnode, closest=False, cache=None):
503 def successorssets(repo, initialnode, closest=False, cache=None):
504 """Return set of all latest successors of initial nodes
504 """Return set of all latest successors of initial nodes
505
505
506 The successors set of a changeset A are the group of revisions that succeed
506 The successors set of a changeset A are the group of revisions that succeed
507 A. It succeeds A as a consistent whole, each revision being only a partial
507 A. It succeeds A as a consistent whole, each revision being only a partial
508 replacement. By default, the successors set contains non-obsolete
508 replacement. By default, the successors set contains non-obsolete
509 changesets only, walking the obsolescence graph until reaching a leaf. If
509 changesets only, walking the obsolescence graph until reaching a leaf. If
510 'closest' is set to True, closest successors-sets are return (the
510 'closest' is set to True, closest successors-sets are return (the
511 obsolescence walk stops on known changesets).
511 obsolescence walk stops on known changesets).
512
512
513 This function returns the full list of successor sets which is why it
513 This function returns the full list of successor sets which is why it
514 returns a list of tuples and not just a single tuple. Each tuple is a valid
514 returns a list of tuples and not just a single tuple. Each tuple is a valid
515 successors set. Note that (A,) may be a valid successors set for changeset A
515 successors set. Note that (A,) may be a valid successors set for changeset A
516 (see below).
516 (see below).
517
517
518 In most cases, a changeset A will have a single element (e.g. the changeset
518 In most cases, a changeset A will have a single element (e.g. the changeset
519 A is replaced by A') in its successors set. Though, it is also common for a
519 A is replaced by A') in its successors set. Though, it is also common for a
520 changeset A to have no elements in its successor set (e.g. the changeset
520 changeset A to have no elements in its successor set (e.g. the changeset
521 has been pruned). Therefore, the returned list of successors sets will be
521 has been pruned). Therefore, the returned list of successors sets will be
522 [(A',)] or [], respectively.
522 [(A',)] or [], respectively.
523
523
524 When a changeset A is split into A' and B', however, it will result in a
524 When a changeset A is split into A' and B', however, it will result in a
525 successors set containing more than a single element, i.e. [(A',B')].
525 successors set containing more than a single element, i.e. [(A',B')].
526 Divergent changesets will result in multiple successors sets, i.e. [(A',),
526 Divergent changesets will result in multiple successors sets, i.e. [(A',),
527 (A'')].
527 (A'')].
528
528
529 If a changeset A is not obsolete, then it will conceptually have no
529 If a changeset A is not obsolete, then it will conceptually have no
530 successors set. To distinguish this from a pruned changeset, the successor
530 successors set. To distinguish this from a pruned changeset, the successor
531 set will contain itself only, i.e. [(A,)].
531 set will contain itself only, i.e. [(A,)].
532
532
533 Finally, final successors unknown locally are considered to be pruned
533 Finally, final successors unknown locally are considered to be pruned
534 (pruned: obsoleted without any successors). (Final: successors not affected
534 (pruned: obsoleted without any successors). (Final: successors not affected
535 by markers).
535 by markers).
536
536
537 The 'closest' mode respect the repoview filtering. For example, without
537 The 'closest' mode respect the repoview filtering. For example, without
538 filter it will stop at the first locally known changeset, with 'visible'
538 filter it will stop at the first locally known changeset, with 'visible'
539 filter it will stop on visible changesets).
539 filter it will stop on visible changesets).
540
540
541 The optional `cache` parameter is a dictionary that may contains
541 The optional `cache` parameter is a dictionary that may contains
542 precomputed successors sets. It is meant to reuse the computation of a
542 precomputed successors sets. It is meant to reuse the computation of a
543 previous call to `successorssets` when multiple calls are made at the same
543 previous call to `successorssets` when multiple calls are made at the same
544 time. The cache dictionary is updated in place. The caller is responsible
544 time. The cache dictionary is updated in place. The caller is responsible
545 for its life span. Code that makes multiple calls to `successorssets`
545 for its life span. Code that makes multiple calls to `successorssets`
546 *should* use this cache mechanism or risk a performance hit.
546 *should* use this cache mechanism or risk a performance hit.
547
547
548 Since results are different depending of the 'closest' most, the same cache
548 Since results are different depending of the 'closest' most, the same cache
549 cannot be reused for both mode.
549 cannot be reused for both mode.
550 """
550 """
551
551
552 succmarkers = repo.obsstore.successors
552 succmarkers = repo.obsstore.successors
553
553
554 # Stack of nodes we search successors sets for
554 # Stack of nodes we search successors sets for
555 toproceed = [initialnode]
555 toproceed = [initialnode]
556 # set version of above list for fast loop detection
556 # set version of above list for fast loop detection
557 # element added to "toproceed" must be added here
557 # element added to "toproceed" must be added here
558 stackedset = set(toproceed)
558 stackedset = set(toproceed)
559 if cache is None:
559 if cache is None:
560 cache = {}
560 cache = {}
561
561
562 # This while loop is the flattened version of a recursive search for
562 # This while loop is the flattened version of a recursive search for
563 # successors sets
563 # successors sets
564 #
564 #
565 # def successorssets(x):
565 # def successorssets(x):
566 # successors = directsuccessors(x)
566 # successors = directsuccessors(x)
567 # ss = [[]]
567 # ss = [[]]
568 # for succ in directsuccessors(x):
568 # for succ in directsuccessors(x):
569 # # product as in itertools cartesian product
569 # # product as in itertools cartesian product
570 # ss = product(ss, successorssets(succ))
570 # ss = product(ss, successorssets(succ))
571 # return ss
571 # return ss
572 #
572 #
573 # But we can not use plain recursive calls here:
573 # But we can not use plain recursive calls here:
574 # - that would blow the python call stack
574 # - that would blow the python call stack
575 # - obsolescence markers may have cycles, we need to handle them.
575 # - obsolescence markers may have cycles, we need to handle them.
576 #
576 #
577 # The `toproceed` list act as our call stack. Every node we search
577 # The `toproceed` list act as our call stack. Every node we search
578 # successors set for are stacked there.
578 # successors set for are stacked there.
579 #
579 #
580 # The `stackedset` is set version of this stack used to check if a node is
580 # The `stackedset` is set version of this stack used to check if a node is
581 # already stacked. This check is used to detect cycles and prevent infinite
581 # already stacked. This check is used to detect cycles and prevent infinite
582 # loop.
582 # loop.
583 #
583 #
584 # successors set of all nodes are stored in the `cache` dictionary.
584 # successors set of all nodes are stored in the `cache` dictionary.
585 #
585 #
586 # After this while loop ends we use the cache to return the successors sets
586 # After this while loop ends we use the cache to return the successors sets
587 # for the node requested by the caller.
587 # for the node requested by the caller.
588 while toproceed:
588 while toproceed:
589 # Every iteration tries to compute the successors sets of the topmost
589 # Every iteration tries to compute the successors sets of the topmost
590 # node of the stack: CURRENT.
590 # node of the stack: CURRENT.
591 #
591 #
592 # There are four possible outcomes:
592 # There are four possible outcomes:
593 #
593 #
594 # 1) We already know the successors sets of CURRENT:
594 # 1) We already know the successors sets of CURRENT:
595 # -> mission accomplished, pop it from the stack.
595 # -> mission accomplished, pop it from the stack.
596 # 2) Stop the walk:
596 # 2) Stop the walk:
597 # default case: Node is not obsolete
597 # default case: Node is not obsolete
598 # closest case: Node is known at this repo filter level
598 # closest case: Node is known at this repo filter level
599 # -> the node is its own successors sets. Add it to the cache.
599 # -> the node is its own successors sets. Add it to the cache.
600 # 3) We do not know successors set of direct successors of CURRENT:
600 # 3) We do not know successors set of direct successors of CURRENT:
601 # -> We add those successors to the stack.
601 # -> We add those successors to the stack.
602 # 4) We know successors sets of all direct successors of CURRENT:
602 # 4) We know successors sets of all direct successors of CURRENT:
603 # -> We can compute CURRENT successors set and add it to the
603 # -> We can compute CURRENT successors set and add it to the
604 # cache.
604 # cache.
605 #
605 #
606 current = toproceed[-1]
606 current = toproceed[-1]
607
607
608 # case 2 condition is a bit hairy because of closest,
608 # case 2 condition is a bit hairy because of closest,
609 # we compute it on its own
609 # we compute it on its own
610 case2condition = ((current not in succmarkers)
610 case2condition = ((current not in succmarkers)
611 or (closest and current != initialnode
611 or (closest and current != initialnode
612 and current in repo))
612 and current in repo))
613
613
614 if current in cache:
614 if current in cache:
615 # case (1): We already know the successors sets
615 # case (1): We already know the successors sets
616 stackedset.remove(toproceed.pop())
616 stackedset.remove(toproceed.pop())
617 elif case2condition:
617 elif case2condition:
618 # case (2): end of walk.
618 # case (2): end of walk.
619 if current in repo:
619 if current in repo:
620 # We have a valid successors.
620 # We have a valid successors.
621 cache[current] = [_succs((current,))]
621 cache[current] = [_succs((current,))]
622 else:
622 else:
623 # Final obsolete version is unknown locally.
623 # Final obsolete version is unknown locally.
624 # Do not count that as a valid successors
624 # Do not count that as a valid successors
625 cache[current] = []
625 cache[current] = []
626 else:
626 else:
627 # cases (3) and (4)
627 # cases (3) and (4)
628 #
628 #
629 # We proceed in two phases. Phase 1 aims to distinguish case (3)
629 # We proceed in two phases. Phase 1 aims to distinguish case (3)
630 # from case (4):
630 # from case (4):
631 #
631 #
632 # For each direct successors of CURRENT, we check whether its
632 # For each direct successors of CURRENT, we check whether its
633 # successors sets are known. If they are not, we stack the
633 # successors sets are known. If they are not, we stack the
634 # unknown node and proceed to the next iteration of the while
634 # unknown node and proceed to the next iteration of the while
635 # loop. (case 3)
635 # loop. (case 3)
636 #
636 #
637 # During this step, we may detect obsolescence cycles: a node
637 # During this step, we may detect obsolescence cycles: a node
638 # with unknown successors sets but already in the call stack.
638 # with unknown successors sets but already in the call stack.
639 # In such a situation, we arbitrary set the successors sets of
639 # In such a situation, we arbitrary set the successors sets of
640 # the node to nothing (node pruned) to break the cycle.
640 # the node to nothing (node pruned) to break the cycle.
641 #
641 #
642 # If no break was encountered we proceed to phase 2.
642 # If no break was encountered we proceed to phase 2.
643 #
643 #
644 # Phase 2 computes successors sets of CURRENT (case 4); see details
644 # Phase 2 computes successors sets of CURRENT (case 4); see details
645 # in phase 2 itself.
645 # in phase 2 itself.
646 #
646 #
647 # Note the two levels of iteration in each phase.
647 # Note the two levels of iteration in each phase.
648 # - The first one handles obsolescence markers using CURRENT as
648 # - The first one handles obsolescence markers using CURRENT as
649 # precursor (successors markers of CURRENT).
649 # precursor (successors markers of CURRENT).
650 #
650 #
651 # Having multiple entry here means divergence.
651 # Having multiple entry here means divergence.
652 #
652 #
653 # - The second one handles successors defined in each marker.
653 # - The second one handles successors defined in each marker.
654 #
654 #
655 # Having none means pruned node, multiple successors means split,
655 # Having none means pruned node, multiple successors means split,
656 # single successors are standard replacement.
656 # single successors are standard replacement.
657 #
657 #
658 for mark in sorted(succmarkers[current]):
658 for mark in sorted(succmarkers[current]):
659 for suc in mark[1]:
659 for suc in mark[1]:
660 if suc not in cache:
660 if suc not in cache:
661 if suc in stackedset:
661 if suc in stackedset:
662 # cycle breaking
662 # cycle breaking
663 cache[suc] = []
663 cache[suc] = []
664 else:
664 else:
665 # case (3) If we have not computed successors sets
665 # case (3) If we have not computed successors sets
666 # of one of those successors we add it to the
666 # of one of those successors we add it to the
667 # `toproceed` stack and stop all work for this
667 # `toproceed` stack and stop all work for this
668 # iteration.
668 # iteration.
669 toproceed.append(suc)
669 toproceed.append(suc)
670 stackedset.add(suc)
670 stackedset.add(suc)
671 break
671 break
672 else:
672 else:
673 continue
673 continue
674 break
674 break
675 else:
675 else:
676 # case (4): we know all successors sets of all direct
676 # case (4): we know all successors sets of all direct
677 # successors
677 # successors
678 #
678 #
679 # Successors set contributed by each marker depends on the
679 # Successors set contributed by each marker depends on the
680 # successors sets of all its "successors" node.
680 # successors sets of all its "successors" node.
681 #
681 #
682 # Each different marker is a divergence in the obsolescence
682 # Each different marker is a divergence in the obsolescence
683 # history. It contributes successors sets distinct from other
683 # history. It contributes successors sets distinct from other
684 # markers.
684 # markers.
685 #
685 #
686 # Within a marker, a successor may have divergent successors
686 # Within a marker, a successor may have divergent successors
687 # sets. In such a case, the marker will contribute multiple
687 # sets. In such a case, the marker will contribute multiple
688 # divergent successors sets. If multiple successors have
688 # divergent successors sets. If multiple successors have
689 # divergent successors sets, a Cartesian product is used.
689 # divergent successors sets, a Cartesian product is used.
690 #
690 #
691 # At the end we post-process successors sets to remove
691 # At the end we post-process successors sets to remove
692 # duplicated entry and successors set that are strict subset of
692 # duplicated entry and successors set that are strict subset of
693 # another one.
693 # another one.
694 succssets = []
694 succssets = []
695 for mark in sorted(succmarkers[current]):
695 for mark in sorted(succmarkers[current]):
696 # successors sets contributed by this marker
696 # successors sets contributed by this marker
697 base = _succs()
697 base = _succs()
698 base.markers.add(mark)
698 base.markers.add(mark)
699 markss = [base]
699 markss = [base]
700 for suc in mark[1]:
700 for suc in mark[1]:
701 # cardinal product with previous successors
701 # cardinal product with previous successors
702 productresult = []
702 productresult = []
703 for prefix in markss:
703 for prefix in markss:
704 for suffix in cache[suc]:
704 for suffix in cache[suc]:
705 newss = prefix.copy()
705 newss = prefix.copy()
706 newss.markers.update(suffix.markers)
706 newss.markers.update(suffix.markers)
707 for part in suffix:
707 for part in suffix:
708 # do not duplicated entry in successors set
708 # do not duplicated entry in successors set
709 # first entry wins.
709 # first entry wins.
710 if part not in newss:
710 if part not in newss:
711 newss.append(part)
711 newss.append(part)
712 productresult.append(newss)
712 productresult.append(newss)
713 markss = productresult
713 markss = productresult
714 succssets.extend(markss)
714 succssets.extend(markss)
715 # remove duplicated and subset
715 # remove duplicated and subset
716 seen = []
716 seen = []
717 final = []
717 final = []
718 candidates = sorted((s for s in succssets if s),
718 candidates = sorted((s for s in succssets if s),
719 key=len, reverse=True)
719 key=len, reverse=True)
720 for cand in candidates:
720 for cand in candidates:
721 for seensuccs in seen:
721 for seensuccs in seen:
722 if cand.canmerge(seensuccs):
722 if cand.canmerge(seensuccs):
723 seensuccs.markers.update(cand.markers)
723 seensuccs.markers.update(cand.markers)
724 break
724 break
725 else:
725 else:
726 final.append(cand)
726 final.append(cand)
727 seen.append(cand)
727 seen.append(cand)
728 final.reverse() # put small successors set first
728 final.reverse() # put small successors set first
729 cache[current] = final
729 cache[current] = final
730 return cache[initialnode]
730 return cache[initialnode]
731
731
732 def successorsandmarkers(repo, ctx):
732 def successorsandmarkers(repo, ctx):
733 """compute the raw data needed for computing obsfate
733 """compute the raw data needed for computing obsfate
734 Returns a list of dict, one dict per successors set
734 Returns a list of dict, one dict per successors set
735 """
735 """
736 if not ctx.obsolete():
736 if not ctx.obsolete():
737 return None
737 return None
738
738
739 ssets = successorssets(repo, ctx.node(), closest=True)
739 ssets = successorssets(repo, ctx.node(), closest=True)
740
740
741 # closestsuccessors returns an empty list for pruned revisions, remap it
741 # closestsuccessors returns an empty list for pruned revisions, remap it
742 # into a list containing an empty list for future processing
742 # into a list containing an empty list for future processing
743 if ssets == []:
743 if ssets == []:
744 ssets = [[]]
744 ssets = [[]]
745
745
746 # Try to recover pruned markers
746 # Try to recover pruned markers
747 succsmap = repo.obsstore.successors
747 succsmap = repo.obsstore.successors
748 fullsuccessorsets = [] # successor set + markers
748 fullsuccessorsets = [] # successor set + markers
749 for sset in ssets:
749 for sset in ssets:
750 if sset:
750 if sset:
751 fullsuccessorsets.append(sset)
751 fullsuccessorsets.append(sset)
752 else:
752 else:
753 # successorsset return an empty set() when ctx or one of its
753 # successorsset return an empty set() when ctx or one of its
754 # successors is pruned.
754 # successors is pruned.
755 # In this case, walk the obs-markers tree again starting with ctx
755 # In this case, walk the obs-markers tree again starting with ctx
756 # and find the relevant pruning obs-makers, the ones without
756 # and find the relevant pruning obs-makers, the ones without
757 # successors.
757 # successors.
758 # Having these markers allow us to compute some information about
758 # Having these markers allow us to compute some information about
759 # its fate, like who pruned this changeset and when.
759 # its fate, like who pruned this changeset and when.
760
760
761 # XXX we do not catch all prune markers (eg rewritten then pruned)
761 # XXX we do not catch all prune markers (eg rewritten then pruned)
762 # (fix me later)
762 # (fix me later)
763 foundany = False
763 foundany = False
764 for mark in succsmap.get(ctx.node(), ()):
764 for mark in succsmap.get(ctx.node(), ()):
765 if not mark[1]:
765 if not mark[1]:
766 foundany = True
766 foundany = True
767 sset = _succs()
767 sset = _succs()
768 sset.markers.add(mark)
768 sset.markers.add(mark)
769 fullsuccessorsets.append(sset)
769 fullsuccessorsets.append(sset)
770 if not foundany:
770 if not foundany:
771 fullsuccessorsets.append(_succs())
771 fullsuccessorsets.append(_succs())
772
772
773 values = []
773 values = []
774 for sset in fullsuccessorsets:
774 for sset in fullsuccessorsets:
775 values.append({'successors': sset, 'markers': sset.markers})
775 values.append({'successors': sset, 'markers': sset.markers})
776
776
777 return values
777 return values
778
778
779 def _getobsfate(successorssets):
779 def _getobsfate(successorssets):
780 """ Compute a changeset obsolescence fate based on its successorssets.
780 """ Compute a changeset obsolescence fate based on its successorssets.
781 Successors can be the tipmost ones or the immediate ones. This function
781 Successors can be the tipmost ones or the immediate ones. This function
782 return values are not meant to be shown directly to users, it is meant to
782 return values are not meant to be shown directly to users, it is meant to
783 be used by internal functions only.
783 be used by internal functions only.
784 Returns one fate from the following values:
784 Returns one fate from the following values:
785 - pruned
785 - pruned
786 - diverged
786 - diverged
787 - superseded
787 - superseded
788 - superseded_split
788 - superseded_split
789 """
789 """
790
790
791 if len(successorssets) == 0:
791 if len(successorssets) == 0:
792 # The commit has been pruned
792 # The commit has been pruned
793 return 'pruned'
793 return 'pruned'
794 elif len(successorssets) > 1:
794 elif len(successorssets) > 1:
795 return 'diverged'
795 return 'diverged'
796 else:
796 else:
797 # No divergence, only one set of successors
797 # No divergence, only one set of successors
798 successors = successorssets[0]
798 successors = successorssets[0]
799
799
800 if len(successors) == 1:
800 if len(successors) == 1:
801 return 'superseded'
801 return 'superseded'
802 else:
802 else:
803 return 'superseded_split'
803 return 'superseded_split'
804
804
805 def obsfateverb(successorset, markers):
805 def obsfateverb(successorset, markers):
806 """ Return the verb summarizing the successorset and potentially using
806 """ Return the verb summarizing the successorset and potentially using
807 information from the markers
807 information from the markers
808 """
808 """
809 if not successorset:
809 if not successorset:
810 verb = 'pruned'
810 verb = 'pruned'
811 elif len(successorset) == 1:
811 elif len(successorset) == 1:
812 verb = 'rewritten'
812 verb = 'rewritten'
813 else:
813 else:
814 verb = 'split'
814 verb = 'split'
815 return verb
815 return verb
816
816
817 def markersdates(markers):
817 def markersdates(markers):
818 """returns the list of dates for a list of markers
818 """returns the list of dates for a list of markers
819 """
819 """
820 return [m[4] for m in markers]
820 return [m[4] for m in markers]
821
821
822 def markersusers(markers):
822 def markersusers(markers):
823 """ Returns a sorted list of markers users without duplicates
823 """ Returns a sorted list of markers users without duplicates
824 """
824 """
825 markersmeta = [dict(m[3]) for m in markers]
825 markersmeta = [dict(m[3]) for m in markers]
826 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
826 users = set(encoding.tolocal(meta['user']) for meta in markersmeta
827 if meta.get('user'))
827 if meta.get('user'))
828
828
829 return sorted(users)
829 return sorted(users)
830
830
831 def markersoperations(markers):
831 def markersoperations(markers):
832 """ Returns a sorted list of markers operations without duplicates
832 """ Returns a sorted list of markers operations without duplicates
833 """
833 """
834 markersmeta = [dict(m[3]) for m in markers]
834 markersmeta = [dict(m[3]) for m in markers]
835 operations = set(meta.get('operation') for meta in markersmeta
835 operations = set(meta.get('operation') for meta in markersmeta
836 if meta.get('operation'))
836 if meta.get('operation'))
837
837
838 return sorted(operations)
838 return sorted(operations)
839
839
840 def obsfateprinter(ui, repo, successors, markers, formatctx):
840 def obsfateprinter(ui, repo, successors, markers, formatctx):
841 """ Build a obsfate string for a single successorset using all obsfate
841 """ Build a obsfate string for a single successorset using all obsfate
842 related function defined in obsutil
842 related function defined in obsutil
843 """
843 """
844 quiet = ui.quiet
844 quiet = ui.quiet
845 verbose = ui.verbose
845 verbose = ui.verbose
846 normal = not verbose and not quiet
846 normal = not verbose and not quiet
847
847
848 line = []
848 line = []
849
849
850 # Verb
850 # Verb
851 line.append(obsfateverb(successors, markers))
851 line.append(obsfateverb(successors, markers))
852
852
853 # Operations
853 # Operations
854 operations = markersoperations(markers)
854 operations = markersoperations(markers)
855 if operations:
855 if operations:
856 line.append(" using %s" % ", ".join(operations))
856 line.append(" using %s" % ", ".join(operations))
857
857
858 # Successors
858 # Successors
859 if successors:
859 if successors:
860 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
860 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
861 line.append(" as %s" % ", ".join(fmtsuccessors))
861 line.append(" as %s" % ", ".join(fmtsuccessors))
862
862
863 # Users
863 # Users
864 users = markersusers(markers)
864 users = markersusers(markers)
865 # Filter out current user in not verbose mode to reduce amount of
865 # Filter out current user in not verbose mode to reduce amount of
866 # information
866 # information
867 if not verbose:
867 if not verbose:
868 currentuser = ui.username(acceptempty=True)
868 currentuser = ui.username(acceptempty=True)
869 if len(users) == 1 and currentuser in users:
869 if len(users) == 1 and currentuser in users:
870 users = None
870 users = None
871
871
872 if (verbose or normal) and users:
872 if (verbose or normal) and users:
873 line.append(" by %s" % ", ".join(users))
873 line.append(" by %s" % ", ".join(users))
874
874
875 # Date
875 # Date
876 dates = markersdates(markers)
876 dates = markersdates(markers)
877
877
878 if dates and verbose:
878 if dates and verbose:
879 min_date = min(dates)
879 min_date = min(dates)
880 max_date = max(dates)
880 max_date = max(dates)
881
881
882 if min_date == max_date:
882 if min_date == max_date:
883 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
883 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
884 line.append(" (at %s)" % fmtmin_date)
884 line.append(" (at %s)" % fmtmin_date)
885 else:
885 else:
886 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
886 fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
887 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
887 fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
888 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
888 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
889
889
890 return "".join(line)
890 return "".join(line)
891
891
892
892
893 filteredmsgtable = {
893 filteredmsgtable = {
894 "pruned": _("hidden revision '%s' is pruned"),
894 "pruned": _("hidden revision '%s' is pruned"),
895 "diverged": _("hidden revision '%s' has diverged"),
895 "diverged": _("hidden revision '%s' has diverged"),
896 "superseded": _("hidden revision '%s' was rewritten as: %s"),
896 "superseded": _("hidden revision '%s' was rewritten as: %s"),
897 "superseded_split": _("hidden revision '%s' was split as: %s"),
897 "superseded_split": _("hidden revision '%s' was split as: %s"),
898 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
898 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
899 "%d more"),
899 "%d more"),
900 }
900 }
901
901
902 def _getfilteredreason(repo, changeid, ctx):
902 def _getfilteredreason(repo, changeid, ctx):
903 """return a human-friendly string on why a obsolete changeset is hidden
903 """return a human-friendly string on why a obsolete changeset is hidden
904 """
904 """
905 successors = successorssets(repo, ctx.node())
905 successors = successorssets(repo, ctx.node())
906 fate = _getobsfate(successors)
906 fate = _getobsfate(successors)
907
907
908 # Be more precise in case the revision is superseded
908 # Be more precise in case the revision is superseded
909 if fate == 'pruned':
909 if fate == 'pruned':
910 return filteredmsgtable['pruned'] % changeid
910 return filteredmsgtable['pruned'] % changeid
911 elif fate == 'diverged':
911 elif fate == 'diverged':
912 return filteredmsgtable['diverged'] % changeid
912 return filteredmsgtable['diverged'] % changeid
913 elif fate == 'superseded':
913 elif fate == 'superseded':
914 single_successor = nodemod.short(successors[0][0])
914 single_successor = nodemod.short(successors[0][0])
915 return filteredmsgtable['superseded'] % (changeid, single_successor)
915 return filteredmsgtable['superseded'] % (changeid, single_successor)
916 elif fate == 'superseded_split':
916 elif fate == 'superseded_split':
917
917
918 succs = []
918 succs = []
919 for node_id in successors[0]:
919 for node_id in successors[0]:
920 succs.append(nodemod.short(node_id))
920 succs.append(nodemod.short(node_id))
921
921
922 if len(succs) <= 2:
922 if len(succs) <= 2:
923 fmtsuccs = ', '.join(succs)
923 fmtsuccs = ', '.join(succs)
924 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
924 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
925 else:
925 else:
926 firstsuccessors = ', '.join(succs[:2])
926 firstsuccessors = ', '.join(succs[:2])
927 remainingnumber = len(succs) - 2
927 remainingnumber = len(succs) - 2
928
928
929 args = (changeid, firstsuccessors, remainingnumber)
929 args = (changeid, firstsuccessors, remainingnumber)
930 return filteredmsgtable['superseded_split_several'] % args
930 return filteredmsgtable['superseded_split_several'] % args
931
931
932 def divergentsets(repo, ctx):
932 def divergentsets(repo, ctx):
933 """Compute sets of commits divergent with a given one"""
933 """Compute sets of commits divergent with a given one"""
934 cache = {}
934 cache = {}
935 base = {}
935 base = {}
936 for n in allpredecessors(repo.obsstore, [ctx.node()]):
936 for n in allpredecessors(repo.obsstore, [ctx.node()]):
937 if n == ctx.node():
937 if n == ctx.node():
938 # a node can't be a base for divergence with itself
938 # a node can't be a base for divergence with itself
939 continue
939 continue
940 nsuccsets = successorssets(repo, n, cache)
940 nsuccsets = successorssets(repo, n, cache)
941 for nsuccset in nsuccsets:
941 for nsuccset in nsuccsets:
942 if ctx.node() in nsuccset:
942 if ctx.node() in nsuccset:
943 # we are only interested in *other* successor sets
943 # we are only interested in *other* successor sets
944 continue
944 continue
945 if tuple(nsuccset) in base:
945 if tuple(nsuccset) in base:
946 # we already know the latest base for this divergency
946 # we already know the latest base for this divergency
947 continue
947 continue
948 base[tuple(nsuccset)] = n
948 base[tuple(nsuccset)] = n
949 return [{'divergentnodes': divset, 'commonpredecessor': b}
949 return [{'divergentnodes': divset, 'commonpredecessor': b}
950 for divset, b in base.iteritems()]
950 for divset, b in base.iteritems()]
951
951
952 def whyunstable(repo, ctx):
952 def whyunstable(repo, ctx):
953 result = []
953 result = []
954 if ctx.orphan():
954 if ctx.orphan():
955 for parent in ctx.parents():
955 for parent in ctx.parents():
956 kind = None
956 kind = None
957 if parent.orphan():
957 if parent.orphan():
958 kind = 'orphan'
958 kind = 'orphan'
959 elif parent.obsolete():
959 elif parent.obsolete():
960 kind = 'obsolete'
960 kind = 'obsolete'
961 if kind is not None:
961 if kind is not None:
962 result.append({'instability': 'orphan',
962 result.append({'instability': 'orphan',
963 'reason': '%s parent' % kind,
963 'reason': '%s parent' % kind,
964 'node': parent.hex()})
964 'node': parent.hex()})
965 if ctx.phasedivergent():
965 if ctx.phasedivergent():
966 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
966 predecessors = allpredecessors(repo.obsstore, [ctx.node()],
967 ignoreflags=bumpedfix)
967 ignoreflags=bumpedfix)
968 immutable = [repo[p] for p in predecessors
968 immutable = [repo[p] for p in predecessors
969 if p in repo and not repo[p].mutable()]
969 if p in repo and not repo[p].mutable()]
970 for predecessor in immutable:
970 for predecessor in immutable:
971 result.append({'instability': 'phase-divergent',
971 result.append({'instability': 'phase-divergent',
972 'reason': 'immutable predecessor',
972 'reason': 'immutable predecessor',
973 'node': predecessor.hex()})
973 'node': predecessor.hex()})
974 if ctx.contentdivergent():
974 if ctx.contentdivergent():
975 dsets = divergentsets(repo, ctx)
975 dsets = divergentsets(repo, ctx)
976 for dset in dsets:
976 for dset in dsets:
977 divnodes = [repo[n] for n in dset['divergentnodes']]
977 divnodes = [repo[n] for n in dset['divergentnodes']]
978 result.append({'instability': 'content-divergent',
978 result.append({'instability': 'content-divergent',
979 'divergentnodes': divnodes,
979 'divergentnodes': divnodes,
980 'reason': 'predecessor',
980 'reason': 'predecessor',
981 'node': nodemod.hex(dset['commonpredecessor'])})
981 'node': nodemod.hex(dset['commonpredecessor'])})
982 return result
982 return result
@@ -1,1776 +1,1776 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 revsetlang,
39 revsetlang,
40 similar,
40 similar,
41 url,
41 url,
42 util,
42 util,
43 vfs,
43 vfs,
44 )
44 )
45
45
46 from .utils import (
46 from .utils import (
47 procutil,
47 procutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 if pycompat.iswindows:
51 if pycompat.iswindows:
52 from . import scmwindows as scmplatform
52 from . import scmwindows as scmplatform
53 else:
53 else:
54 from . import scmposix as scmplatform
54 from . import scmposix as scmplatform
55
55
56 parsers = policy.importmod(r'parsers')
56 parsers = policy.importmod(r'parsers')
57
57
58 termsize = scmplatform.termsize
58 termsize = scmplatform.termsize
59
59
60 class status(tuple):
60 class status(tuple):
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 and 'ignored' properties are only relevant to the working copy.
62 and 'ignored' properties are only relevant to the working copy.
63 '''
63 '''
64
64
65 __slots__ = ()
65 __slots__ = ()
66
66
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 clean):
68 clean):
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 ignored, clean))
70 ignored, clean))
71
71
72 @property
72 @property
73 def modified(self):
73 def modified(self):
74 '''files that have been modified'''
74 '''files that have been modified'''
75 return self[0]
75 return self[0]
76
76
77 @property
77 @property
78 def added(self):
78 def added(self):
79 '''files that have been added'''
79 '''files that have been added'''
80 return self[1]
80 return self[1]
81
81
82 @property
82 @property
83 def removed(self):
83 def removed(self):
84 '''files that have been removed'''
84 '''files that have been removed'''
85 return self[2]
85 return self[2]
86
86
87 @property
87 @property
88 def deleted(self):
88 def deleted(self):
89 '''files that are in the dirstate, but have been deleted from the
89 '''files that are in the dirstate, but have been deleted from the
90 working copy (aka "missing")
90 working copy (aka "missing")
91 '''
91 '''
92 return self[3]
92 return self[3]
93
93
94 @property
94 @property
95 def unknown(self):
95 def unknown(self):
96 '''files not in the dirstate that are not ignored'''
96 '''files not in the dirstate that are not ignored'''
97 return self[4]
97 return self[4]
98
98
99 @property
99 @property
100 def ignored(self):
100 def ignored(self):
101 '''files not in the dirstate that are ignored (by _dirignore())'''
101 '''files not in the dirstate that are ignored (by _dirignore())'''
102 return self[5]
102 return self[5]
103
103
104 @property
104 @property
105 def clean(self):
105 def clean(self):
106 '''files that have not been modified'''
106 '''files that have not been modified'''
107 return self[6]
107 return self[6]
108
108
109 def __repr__(self, *args, **kwargs):
109 def __repr__(self, *args, **kwargs):
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 r'unknown=%s, ignored=%s, clean=%s>') %
111 r'unknown=%s, ignored=%s, clean=%s>') %
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113
113
114 def itersubrepos(ctx1, ctx2):
114 def itersubrepos(ctx1, ctx2):
115 """find subrepos in ctx1 or ctx2"""
115 """find subrepos in ctx1 or ctx2"""
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
116 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 # has been modified (in ctx2) but not yet committed (in ctx1).
118 # has been modified (in ctx2) but not yet committed (in ctx1).
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121
121
122 missing = set()
122 missing = set()
123
123
124 for subpath in ctx2.substate:
124 for subpath in ctx2.substate:
125 if subpath not in ctx1.substate:
125 if subpath not in ctx1.substate:
126 del subpaths[subpath]
126 del subpaths[subpath]
127 missing.add(subpath)
127 missing.add(subpath)
128
128
129 for subpath, ctx in sorted(subpaths.iteritems()):
129 for subpath, ctx in sorted(subpaths.iteritems()):
130 yield subpath, ctx.sub(subpath)
130 yield subpath, ctx.sub(subpath)
131
131
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 # status and diff will have an accurate result when it does
133 # status and diff will have an accurate result when it does
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 # against itself.
135 # against itself.
136 for subpath in missing:
136 for subpath in missing:
137 yield subpath, ctx2.nullsub(subpath, ctx1)
137 yield subpath, ctx2.nullsub(subpath, ctx1)
138
138
139 def nochangesfound(ui, repo, excluded=None):
139 def nochangesfound(ui, repo, excluded=None):
140 '''Report no changes for push/pull, excluded is None or a list of
140 '''Report no changes for push/pull, excluded is None or a list of
141 nodes excluded from the push/pull.
141 nodes excluded from the push/pull.
142 '''
142 '''
143 secretlist = []
143 secretlist = []
144 if excluded:
144 if excluded:
145 for n in excluded:
145 for n in excluded:
146 ctx = repo[n]
146 ctx = repo[n]
147 if ctx.phase() >= phases.secret and not ctx.extinct():
147 if ctx.phase() >= phases.secret and not ctx.extinct():
148 secretlist.append(n)
148 secretlist.append(n)
149
149
150 if secretlist:
150 if secretlist:
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
151 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 % len(secretlist))
152 % len(secretlist))
153 else:
153 else:
154 ui.status(_("no changes found\n"))
154 ui.status(_("no changes found\n"))
155
155
156 def callcatch(ui, func):
156 def callcatch(ui, func):
157 """call func() with global exception handling
157 """call func() with global exception handling
158
158
159 return func() if no exception happens. otherwise do some error handling
159 return func() if no exception happens. otherwise do some error handling
160 and return an exit code accordingly. does not handle all exceptions.
160 and return an exit code accordingly. does not handle all exceptions.
161 """
161 """
162 try:
162 try:
163 try:
163 try:
164 return func()
164 return func()
165 except: # re-raises
165 except: # re-raises
166 ui.traceback()
166 ui.traceback()
167 raise
167 raise
168 # Global exception handling, alphabetically
168 # Global exception handling, alphabetically
169 # Mercurial-specific first, followed by built-in and library exceptions
169 # Mercurial-specific first, followed by built-in and library exceptions
170 except error.LockHeld as inst:
170 except error.LockHeld as inst:
171 if inst.errno == errno.ETIMEDOUT:
171 if inst.errno == errno.ETIMEDOUT:
172 reason = _('timed out waiting for lock held by %r') % inst.locker
172 reason = _('timed out waiting for lock held by %r') % inst.locker
173 else:
173 else:
174 reason = _('lock held by %r') % inst.locker
174 reason = _('lock held by %r') % inst.locker
175 ui.error(_("abort: %s: %s\n") % (
175 ui.error(_("abort: %s: %s\n") % (
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 if not inst.locker:
177 if not inst.locker:
178 ui.error(_("(lock might be very busy)\n"))
178 ui.error(_("(lock might be very busy)\n"))
179 except error.LockUnavailable as inst:
179 except error.LockUnavailable as inst:
180 ui.error(_("abort: could not lock %s: %s\n") %
180 ui.error(_("abort: could not lock %s: %s\n") %
181 (inst.desc or stringutil.forcebytestr(inst.filename),
181 (inst.desc or stringutil.forcebytestr(inst.filename),
182 encoding.strtolocal(inst.strerror)))
182 encoding.strtolocal(inst.strerror)))
183 except error.OutOfBandError as inst:
183 except error.OutOfBandError as inst:
184 if inst.args:
184 if inst.args:
185 msg = _("abort: remote error:\n")
185 msg = _("abort: remote error:\n")
186 else:
186 else:
187 msg = _("abort: remote error\n")
187 msg = _("abort: remote error\n")
188 ui.error(msg)
188 ui.error(msg)
189 if inst.args:
189 if inst.args:
190 ui.error(''.join(inst.args))
190 ui.error(''.join(inst.args))
191 if inst.hint:
191 if inst.hint:
192 ui.error('(%s)\n' % inst.hint)
192 ui.error('(%s)\n' % inst.hint)
193 except error.RepoError as inst:
193 except error.RepoError as inst:
194 ui.error(_("abort: %s!\n") % inst)
194 ui.error(_("abort: %s!\n") % inst)
195 if inst.hint:
195 if inst.hint:
196 ui.error(_("(%s)\n") % inst.hint)
196 ui.error(_("(%s)\n") % inst.hint)
197 except error.ResponseError as inst:
197 except error.ResponseError as inst:
198 ui.error(_("abort: %s") % inst.args[0])
198 ui.error(_("abort: %s") % inst.args[0])
199 msg = inst.args[1]
199 msg = inst.args[1]
200 if isinstance(msg, type(u'')):
200 if isinstance(msg, type(u'')):
201 msg = pycompat.sysbytes(msg)
201 msg = pycompat.sysbytes(msg)
202 if not isinstance(msg, bytes):
202 if not isinstance(msg, bytes):
203 ui.error(" %r\n" % (msg,))
203 ui.error(" %r\n" % (msg,))
204 elif not msg:
204 elif not msg:
205 ui.error(_(" empty string\n"))
205 ui.error(_(" empty string\n"))
206 else:
206 else:
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 except error.CensoredNodeError as inst:
208 except error.CensoredNodeError as inst:
209 ui.error(_("abort: file censored %s!\n") % inst)
209 ui.error(_("abort: file censored %s!\n") % inst)
210 except error.RevlogError as inst:
210 except error.RevlogError as inst:
211 ui.error(_("abort: %s!\n") % inst)
211 ui.error(_("abort: %s!\n") % inst)
212 except error.InterventionRequired as inst:
212 except error.InterventionRequired as inst:
213 ui.error("%s\n" % inst)
213 ui.error("%s\n" % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_("(%s)\n") % inst.hint)
215 ui.error(_("(%s)\n") % inst.hint)
216 return 1
216 return 1
217 except error.WdirUnsupported:
217 except error.WdirUnsupported:
218 ui.error(_("abort: working directory revision cannot be specified\n"))
218 ui.error(_("abort: working directory revision cannot be specified\n"))
219 except error.Abort as inst:
219 except error.Abort as inst:
220 ui.error(_("abort: %s\n") % inst)
220 ui.error(_("abort: %s\n") % inst)
221 if inst.hint:
221 if inst.hint:
222 ui.error(_("(%s)\n") % inst.hint)
222 ui.error(_("(%s)\n") % inst.hint)
223 except ImportError as inst:
223 except ImportError as inst:
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 m = stringutil.forcebytestr(inst).split()[-1]
225 m = stringutil.forcebytestr(inst).split()[-1]
226 if m in "mpatch bdiff".split():
226 if m in "mpatch bdiff".split():
227 ui.error(_("(did you forget to compile extensions?)\n"))
227 ui.error(_("(did you forget to compile extensions?)\n"))
228 elif m in "zlib".split():
228 elif m in "zlib".split():
229 ui.error(_("(is your Python install correct?)\n"))
229 ui.error(_("(is your Python install correct?)\n"))
230 except IOError as inst:
230 except IOError as inst:
231 if util.safehasattr(inst, "code"):
231 if util.safehasattr(inst, "code"):
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 elif util.safehasattr(inst, "reason"):
233 elif util.safehasattr(inst, "reason"):
234 try: # usually it is in the form (errno, strerror)
234 try: # usually it is in the form (errno, strerror)
235 reason = inst.reason.args[1]
235 reason = inst.reason.args[1]
236 except (AttributeError, IndexError):
236 except (AttributeError, IndexError):
237 # it might be anything, for example a string
237 # it might be anything, for example a string
238 reason = inst.reason
238 reason = inst.reason
239 if isinstance(reason, pycompat.unicode):
239 if isinstance(reason, pycompat.unicode):
240 # SSLError of Python 2.7.9 contains a unicode
240 # SSLError of Python 2.7.9 contains a unicode
241 reason = encoding.unitolocal(reason)
241 reason = encoding.unitolocal(reason)
242 ui.error(_("abort: error: %s\n") % reason)
242 ui.error(_("abort: error: %s\n") % reason)
243 elif (util.safehasattr(inst, "args")
243 elif (util.safehasattr(inst, "args")
244 and inst.args and inst.args[0] == errno.EPIPE):
244 and inst.args and inst.args[0] == errno.EPIPE):
245 pass
245 pass
246 elif getattr(inst, "strerror", None):
246 elif getattr(inst, "strerror", None):
247 if getattr(inst, "filename", None):
247 if getattr(inst, "filename", None):
248 ui.error(_("abort: %s: %s\n") % (
248 ui.error(_("abort: %s: %s\n") % (
249 encoding.strtolocal(inst.strerror),
249 encoding.strtolocal(inst.strerror),
250 stringutil.forcebytestr(inst.filename)))
250 stringutil.forcebytestr(inst.filename)))
251 else:
251 else:
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 else:
253 else:
254 raise
254 raise
255 except OSError as inst:
255 except OSError as inst:
256 if getattr(inst, "filename", None) is not None:
256 if getattr(inst, "filename", None) is not None:
257 ui.error(_("abort: %s: '%s'\n") % (
257 ui.error(_("abort: %s: '%s'\n") % (
258 encoding.strtolocal(inst.strerror),
258 encoding.strtolocal(inst.strerror),
259 stringutil.forcebytestr(inst.filename)))
259 stringutil.forcebytestr(inst.filename)))
260 else:
260 else:
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 except MemoryError:
262 except MemoryError:
263 ui.error(_("abort: out of memory\n"))
263 ui.error(_("abort: out of memory\n"))
264 except SystemExit as inst:
264 except SystemExit as inst:
265 # Commands shouldn't sys.exit directly, but give a return code.
265 # Commands shouldn't sys.exit directly, but give a return code.
266 # Just in case catch this and and pass exit code to caller.
266 # Just in case catch this and and pass exit code to caller.
267 return inst.code
267 return inst.code
268 except socket.error as inst:
268 except socket.error as inst:
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270
270
271 return -1
271 return -1
272
272
273 def checknewlabel(repo, lbl, kind):
273 def checknewlabel(repo, lbl, kind):
274 # Do not use the "kind" parameter in ui output.
274 # Do not use the "kind" parameter in ui output.
275 # It makes strings difficult to translate.
275 # It makes strings difficult to translate.
276 if lbl in ['tip', '.', 'null']:
276 if lbl in ['tip', '.', 'null']:
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
277 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 for c in (':', '\0', '\n', '\r'):
278 for c in (':', '\0', '\n', '\r'):
279 if c in lbl:
279 if c in lbl:
280 raise error.Abort(
280 raise error.Abort(
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
281 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 try:
282 try:
283 int(lbl)
283 int(lbl)
284 raise error.Abort(_("cannot use an integer as a name"))
284 raise error.Abort(_("cannot use an integer as a name"))
285 except ValueError:
285 except ValueError:
286 pass
286 pass
287 if lbl.strip() != lbl:
287 if lbl.strip() != lbl:
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289
289
290 def checkfilename(f):
290 def checkfilename(f):
291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 if '\r' in f or '\n' in f:
292 if '\r' in f or '\n' in f:
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f))
294 % pycompat.bytestr(f))
295
295
296 def checkportable(ui, f):
296 def checkportable(ui, f):
297 '''Check if filename f is portable and warn or abort depending on config'''
297 '''Check if filename f is portable and warn or abort depending on config'''
298 checkfilename(f)
298 checkfilename(f)
299 abort, warn = checkportabilityalert(ui)
299 abort, warn = checkportabilityalert(ui)
300 if abort or warn:
300 if abort or warn:
301 msg = util.checkwinfilename(f)
301 msg = util.checkwinfilename(f)
302 if msg:
302 if msg:
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
303 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 if abort:
304 if abort:
305 raise error.Abort(msg)
305 raise error.Abort(msg)
306 ui.warn(_("warning: %s\n") % msg)
306 ui.warn(_("warning: %s\n") % msg)
307
307
308 def checkportabilityalert(ui):
308 def checkportabilityalert(ui):
309 '''check if the user's config requests nothing, a warning, or abort for
309 '''check if the user's config requests nothing, a warning, or abort for
310 non-portable filenames'''
310 non-portable filenames'''
311 val = ui.config('ui', 'portablefilenames')
311 val = ui.config('ui', 'portablefilenames')
312 lval = val.lower()
312 lval = val.lower()
313 bval = stringutil.parsebool(val)
313 bval = stringutil.parsebool(val)
314 abort = pycompat.iswindows or lval == 'abort'
314 abort = pycompat.iswindows or lval == 'abort'
315 warn = bval or lval == 'warn'
315 warn = bval or lval == 'warn'
316 if bval is None and not (warn or abort or lval == 'ignore'):
316 if bval is None and not (warn or abort or lval == 'ignore'):
317 raise error.ConfigError(
317 raise error.ConfigError(
318 _("ui.portablefilenames value is invalid ('%s')") % val)
318 _("ui.portablefilenames value is invalid ('%s')") % val)
319 return abort, warn
319 return abort, warn
320
320
321 class casecollisionauditor(object):
321 class casecollisionauditor(object):
322 def __init__(self, ui, abort, dirstate):
322 def __init__(self, ui, abort, dirstate):
323 self._ui = ui
323 self._ui = ui
324 self._abort = abort
324 self._abort = abort
325 allfiles = '\0'.join(dirstate._map)
325 allfiles = '\0'.join(dirstate._map)
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 self._dirstate = dirstate
327 self._dirstate = dirstate
328 # The purpose of _newfiles is so that we don't complain about
328 # The purpose of _newfiles is so that we don't complain about
329 # case collisions if someone were to call this object with the
329 # case collisions if someone were to call this object with the
330 # same filename twice.
330 # same filename twice.
331 self._newfiles = set()
331 self._newfiles = set()
332
332
333 def __call__(self, f):
333 def __call__(self, f):
334 if f in self._newfiles:
334 if f in self._newfiles:
335 return
335 return
336 fl = encoding.lower(f)
336 fl = encoding.lower(f)
337 if fl in self._loweredfiles and f not in self._dirstate:
337 if fl in self._loweredfiles and f not in self._dirstate:
338 msg = _('possible case-folding collision for %s') % f
338 msg = _('possible case-folding collision for %s') % f
339 if self._abort:
339 if self._abort:
340 raise error.Abort(msg)
340 raise error.Abort(msg)
341 self._ui.warn(_("warning: %s\n") % msg)
341 self._ui.warn(_("warning: %s\n") % msg)
342 self._loweredfiles.add(fl)
342 self._loweredfiles.add(fl)
343 self._newfiles.add(f)
343 self._newfiles.add(f)
344
344
345 def filteredhash(repo, maxrev):
345 def filteredhash(repo, maxrev):
346 """build hash of filtered revisions in the current repoview.
346 """build hash of filtered revisions in the current repoview.
347
347
348 Multiple caches perform up-to-date validation by checking that the
348 Multiple caches perform up-to-date validation by checking that the
349 tiprev and tipnode stored in the cache file match the current repository.
349 tiprev and tipnode stored in the cache file match the current repository.
350 However, this is not sufficient for validating repoviews because the set
350 However, this is not sufficient for validating repoviews because the set
351 of revisions in the view may change without the repository tiprev and
351 of revisions in the view may change without the repository tiprev and
352 tipnode changing.
352 tipnode changing.
353
353
354 This function hashes all the revs filtered from the view and returns
354 This function hashes all the revs filtered from the view and returns
355 that SHA-1 digest.
355 that SHA-1 digest.
356 """
356 """
357 cl = repo.changelog
357 cl = repo.changelog
358 if not cl.filteredrevs:
358 if not cl.filteredrevs:
359 return None
359 return None
360 key = None
360 key = None
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 if revs:
362 if revs:
363 s = hashlib.sha1()
363 s = hashlib.sha1()
364 for rev in revs:
364 for rev in revs:
365 s.update('%d;' % rev)
365 s.update('%d;' % rev)
366 key = s.digest()
366 key = s.digest()
367 return key
367 return key
368
368
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 '''yield every hg repository under path, always recursively.
370 '''yield every hg repository under path, always recursively.
371 The recurse flag will only control recursion into repo working dirs'''
371 The recurse flag will only control recursion into repo working dirs'''
372 def errhandler(err):
372 def errhandler(err):
373 if err.filename == path:
373 if err.filename == path:
374 raise err
374 raise err
375 samestat = getattr(os.path, 'samestat', None)
375 samestat = getattr(os.path, 'samestat', None)
376 if followsym and samestat is not None:
376 if followsym and samestat is not None:
377 def adddir(dirlst, dirname):
377 def adddir(dirlst, dirname):
378 dirstat = os.stat(dirname)
378 dirstat = os.stat(dirname)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 if not match:
380 if not match:
381 dirlst.append(dirstat)
381 dirlst.append(dirstat)
382 return not match
382 return not match
383 else:
383 else:
384 followsym = False
384 followsym = False
385
385
386 if (seen_dirs is None) and followsym:
386 if (seen_dirs is None) and followsym:
387 seen_dirs = []
387 seen_dirs = []
388 adddir(seen_dirs, path)
388 adddir(seen_dirs, path)
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 dirs.sort()
390 dirs.sort()
391 if '.hg' in dirs:
391 if '.hg' in dirs:
392 yield root # found a repository
392 yield root # found a repository
393 qroot = os.path.join(root, '.hg', 'patches')
393 qroot = os.path.join(root, '.hg', 'patches')
394 if os.path.isdir(os.path.join(qroot, '.hg')):
394 if os.path.isdir(os.path.join(qroot, '.hg')):
395 yield qroot # we have a patch queue repo here
395 yield qroot # we have a patch queue repo here
396 if recurse:
396 if recurse:
397 # avoid recursing inside the .hg directory
397 # avoid recursing inside the .hg directory
398 dirs.remove('.hg')
398 dirs.remove('.hg')
399 else:
399 else:
400 dirs[:] = [] # don't descend further
400 dirs[:] = [] # don't descend further
401 elif followsym:
401 elif followsym:
402 newdirs = []
402 newdirs = []
403 for d in dirs:
403 for d in dirs:
404 fname = os.path.join(root, d)
404 fname = os.path.join(root, d)
405 if adddir(seen_dirs, fname):
405 if adddir(seen_dirs, fname):
406 if os.path.islink(fname):
406 if os.path.islink(fname):
407 for hgname in walkrepos(fname, True, seen_dirs):
407 for hgname in walkrepos(fname, True, seen_dirs):
408 yield hgname
408 yield hgname
409 else:
409 else:
410 newdirs.append(d)
410 newdirs.append(d)
411 dirs[:] = newdirs
411 dirs[:] = newdirs
412
412
413 def binnode(ctx):
413 def binnode(ctx):
414 """Return binary node id for a given basectx"""
414 """Return binary node id for a given basectx"""
415 node = ctx.node()
415 node = ctx.node()
416 if node is None:
416 if node is None:
417 return wdirid
417 return wdirid
418 return node
418 return node
419
419
420 def intrev(ctx):
420 def intrev(ctx):
421 """Return integer for a given basectx that can be used in comparison or
421 """Return integer for a given basectx that can be used in comparison or
422 arithmetic operation"""
422 arithmetic operation"""
423 rev = ctx.rev()
423 rev = ctx.rev()
424 if rev is None:
424 if rev is None:
425 return wdirrev
425 return wdirrev
426 return rev
426 return rev
427
427
428 def formatchangeid(ctx):
428 def formatchangeid(ctx):
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
429 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 template provided by logcmdutil.changesettemplater"""
430 template provided by logcmdutil.changesettemplater"""
431 repo = ctx.repo()
431 repo = ctx.repo()
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433
433
434 def formatrevnode(ui, rev, node):
434 def formatrevnode(ui, rev, node):
435 """Format given revision and node depending on the current verbosity"""
435 """Format given revision and node depending on the current verbosity"""
436 if ui.debugflag:
436 if ui.debugflag:
437 hexfunc = hex
437 hexfunc = hex
438 else:
438 else:
439 hexfunc = short
439 hexfunc = short
440 return '%d:%s' % (rev, hexfunc(node))
440 return '%d:%s' % (rev, hexfunc(node))
441
441
442 def resolvehexnodeidprefix(repo, prefix):
442 def resolvehexnodeidprefix(repo, prefix):
443 if (prefix.startswith('x') and
443 if (prefix.startswith('x') and
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 prefix = prefix[1:]
445 prefix = prefix[1:]
446 try:
446 try:
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 # This matches the shortesthexnodeidprefix() function below.
448 # This matches the shortesthexnodeidprefix() function below.
449 node = repo.unfiltered().changelog._partialmatch(prefix)
449 node = repo.unfiltered().changelog._partialmatch(prefix)
450 except error.AmbiguousPrefixLookupError:
450 except error.AmbiguousPrefixLookupError:
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 if revset:
452 if revset:
453 # Clear config to avoid infinite recursion
453 # Clear config to avoid infinite recursion
454 configoverrides = {('experimental',
454 configoverrides = {('experimental',
455 'revisions.disambiguatewithin'): None}
455 'revisions.disambiguatewithin'): None}
456 with repo.ui.configoverride(configoverrides):
456 with repo.ui.configoverride(configoverrides):
457 revs = repo.anyrevs([revset], user=True)
457 revs = repo.anyrevs([revset], user=True)
458 matches = []
458 matches = []
459 for rev in revs:
459 for rev in revs:
460 node = repo.changelog.node(rev)
460 node = repo.changelog.node(rev)
461 if hex(node).startswith(prefix):
461 if hex(node).startswith(prefix):
462 matches.append(node)
462 matches.append(node)
463 if len(matches) == 1:
463 if len(matches) == 1:
464 return matches[0]
464 return matches[0]
465 raise
465 raise
466 if node is None:
466 if node is None:
467 return
467 return
468 repo.changelog.rev(node) # make sure node isn't filtered
468 repo.changelog.rev(node) # make sure node isn't filtered
469 return node
469 return node
470
470
471 def mayberevnum(repo, prefix):
471 def mayberevnum(repo, prefix):
472 """Checks if the given prefix may be mistaken for a revision number"""
472 """Checks if the given prefix may be mistaken for a revision number"""
473 try:
473 try:
474 i = int(prefix)
474 i = int(prefix)
475 # if we are a pure int, then starting with zero will not be
475 # if we are a pure int, then starting with zero will not be
476 # confused as a rev; or, obviously, if the int is larger
476 # confused as a rev; or, obviously, if the int is larger
477 # than the value of the tip rev
477 # than the value of the tip rev
478 if prefix[0:1] == b'0' or i > len(repo):
478 if prefix[0:1] == b'0' or i > len(repo):
479 return False
479 return False
480 return True
480 return True
481 except ValueError:
481 except ValueError:
482 return False
482 return False
483
483
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 """Find the shortest unambiguous prefix that matches hexnode.
485 """Find the shortest unambiguous prefix that matches hexnode.
486
486
487 If "cache" is not None, it must be a dictionary that can be used for
487 If "cache" is not None, it must be a dictionary that can be used for
488 caching between calls to this method.
488 caching between calls to this method.
489 """
489 """
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 # which would be unacceptably slow. so we look for hash collision in
491 # which would be unacceptably slow. so we look for hash collision in
492 # unfiltered space, which means some hashes may be slightly longer.
492 # unfiltered space, which means some hashes may be slightly longer.
493
493
494 def disambiguate(prefix):
494 def disambiguate(prefix):
495 """Disambiguate against revnums."""
495 """Disambiguate against revnums."""
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 if mayberevnum(repo, prefix):
497 if mayberevnum(repo, prefix):
498 return 'x' + prefix
498 return 'x' + prefix
499 else:
499 else:
500 return prefix
500 return prefix
501
501
502 hexnode = hex(node)
502 hexnode = hex(node)
503 for length in range(len(prefix), len(hexnode) + 1):
503 for length in range(len(prefix), len(hexnode) + 1):
504 prefix = hexnode[:length]
504 prefix = hexnode[:length]
505 if not mayberevnum(repo, prefix):
505 if not mayberevnum(repo, prefix):
506 return prefix
506 return prefix
507
507
508 cl = repo.unfiltered().changelog
508 cl = repo.unfiltered().changelog
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 if revset:
510 if revset:
511 revs = None
511 revs = None
512 if cache is not None:
512 if cache is not None:
513 revs = cache.get('disambiguationrevset')
513 revs = cache.get('disambiguationrevset')
514 if revs is None:
514 if revs is None:
515 revs = repo.anyrevs([revset], user=True)
515 revs = repo.anyrevs([revset], user=True)
516 if cache is not None:
516 if cache is not None:
517 cache['disambiguationrevset'] = revs
517 cache['disambiguationrevset'] = revs
518 if cl.rev(node) in revs:
518 if cl.rev(node) in revs:
519 hexnode = hex(node)
519 hexnode = hex(node)
520 nodetree = None
520 nodetree = None
521 if cache is not None:
521 if cache is not None:
522 nodetree = cache.get('disambiguationnodetree')
522 nodetree = cache.get('disambiguationnodetree')
523 if not nodetree:
523 if not nodetree:
524 try:
524 try:
525 nodetree = parsers.nodetree(cl.index, len(revs))
525 nodetree = parsers.nodetree(cl.index, len(revs))
526 except AttributeError:
526 except AttributeError:
527 # no native nodetree
527 # no native nodetree
528 pass
528 pass
529 else:
529 else:
530 for r in revs:
530 for r in revs:
531 nodetree.insert(r)
531 nodetree.insert(r)
532 if cache is not None:
532 if cache is not None:
533 cache['disambiguationnodetree'] = nodetree
533 cache['disambiguationnodetree'] = nodetree
534 if nodetree is not None:
534 if nodetree is not None:
535 length = max(nodetree.shortest(node), minlength)
535 length = max(nodetree.shortest(node), minlength)
536 prefix = hexnode[:length]
536 prefix = hexnode[:length]
537 return disambiguate(prefix)
537 return disambiguate(prefix)
538 for length in range(minlength, len(hexnode) + 1):
538 for length in range(minlength, len(hexnode) + 1):
539 matches = []
539 matches = []
540 prefix = hexnode[:length]
540 prefix = hexnode[:length]
541 for rev in revs:
541 for rev in revs:
542 otherhexnode = repo[rev].hex()
542 otherhexnode = repo[rev].hex()
543 if prefix == otherhexnode[:length]:
543 if prefix == otherhexnode[:length]:
544 matches.append(otherhexnode)
544 matches.append(otherhexnode)
545 if len(matches) == 1:
545 if len(matches) == 1:
546 return disambiguate(prefix)
546 return disambiguate(prefix)
547
547
548 try:
548 try:
549 return disambiguate(cl.shortest(node, minlength))
549 return disambiguate(cl.shortest(node, minlength))
550 except error.LookupError:
550 except error.LookupError:
551 raise error.RepoLookupError()
551 raise error.RepoLookupError()
552
552
553 def isrevsymbol(repo, symbol):
553 def isrevsymbol(repo, symbol):
554 """Checks if a symbol exists in the repo.
554 """Checks if a symbol exists in the repo.
555
555
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 symbol is an ambiguous nodeid prefix.
557 symbol is an ambiguous nodeid prefix.
558 """
558 """
559 try:
559 try:
560 revsymbol(repo, symbol)
560 revsymbol(repo, symbol)
561 return True
561 return True
562 except error.RepoLookupError:
562 except error.RepoLookupError:
563 return False
563 return False
564
564
565 def revsymbol(repo, symbol):
565 def revsymbol(repo, symbol):
566 """Returns a context given a single revision symbol (as string).
566 """Returns a context given a single revision symbol (as string).
567
567
568 This is similar to revsingle(), but accepts only a single revision symbol,
568 This is similar to revsingle(), but accepts only a single revision symbol,
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 not "max(public())".
570 not "max(public())".
571 """
571 """
572 if not isinstance(symbol, bytes):
572 if not isinstance(symbol, bytes):
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 "repo[symbol]?" % (symbol, type(symbol)))
574 "repo[symbol]?" % (symbol, type(symbol)))
575 raise error.ProgrammingError(msg)
575 raise error.ProgrammingError(msg)
576 try:
576 try:
577 if symbol in ('.', 'tip', 'null'):
577 if symbol in ('.', 'tip', 'null'):
578 return repo[symbol]
578 return repo[symbol]
579
579
580 try:
580 try:
581 r = int(symbol)
581 r = int(symbol)
582 if '%d' % r != symbol:
582 if '%d' % r != symbol:
583 raise ValueError
583 raise ValueError
584 l = len(repo.changelog)
584 l = len(repo.changelog)
585 if r < 0:
585 if r < 0:
586 r += l
586 r += l
587 if r < 0 or r >= l and r != wdirrev:
587 if r < 0 or r >= l and r != wdirrev:
588 raise ValueError
588 raise ValueError
589 return repo[r]
589 return repo[r]
590 except error.FilteredIndexError:
590 except error.FilteredIndexError:
591 raise
591 raise
592 except (ValueError, OverflowError, IndexError):
592 except (ValueError, OverflowError, IndexError):
593 pass
593 pass
594
594
595 if len(symbol) == 40:
595 if len(symbol) == 40:
596 try:
596 try:
597 node = bin(symbol)
597 node = bin(symbol)
598 rev = repo.changelog.rev(node)
598 rev = repo.changelog.rev(node)
599 return repo[rev]
599 return repo[rev]
600 except error.FilteredLookupError:
600 except error.FilteredLookupError:
601 raise
601 raise
602 except (TypeError, LookupError):
602 except (TypeError, LookupError):
603 pass
603 pass
604
604
605 # look up bookmarks through the name interface
605 # look up bookmarks through the name interface
606 try:
606 try:
607 node = repo.names.singlenode(repo, symbol)
607 node = repo.names.singlenode(repo, symbol)
608 rev = repo.changelog.rev(node)
608 rev = repo.changelog.rev(node)
609 return repo[rev]
609 return repo[rev]
610 except KeyError:
610 except KeyError:
611 pass
611 pass
612
612
613 node = resolvehexnodeidprefix(repo, symbol)
613 node = resolvehexnodeidprefix(repo, symbol)
614 if node is not None:
614 if node is not None:
615 rev = repo.changelog.rev(node)
615 rev = repo.changelog.rev(node)
616 return repo[rev]
616 return repo[rev]
617
617
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619
619
620 except error.WdirUnsupported:
620 except error.WdirUnsupported:
621 return repo[None]
621 return repo[None]
622 except (error.FilteredIndexError, error.FilteredLookupError,
622 except (error.FilteredIndexError, error.FilteredLookupError,
623 error.FilteredRepoLookupError):
623 error.FilteredRepoLookupError):
624 raise _filterederror(repo, symbol)
624 raise _filterederror(repo, symbol)
625
625
626 def _filterederror(repo, changeid):
626 def _filterederror(repo, changeid):
627 """build an exception to be raised about a filtered changeid
627 """build an exception to be raised about a filtered changeid
628
628
629 This is extracted in a function to help extensions (eg: evolve) to
629 This is extracted in a function to help extensions (eg: evolve) to
630 experiment with various message variants."""
630 experiment with various message variants."""
631 if repo.filtername.startswith('visible'):
631 if repo.filtername.startswith('visible'):
632
632
633 # Check if the changeset is obsolete
633 # Check if the changeset is obsolete
634 unfilteredrepo = repo.unfiltered()
634 unfilteredrepo = repo.unfiltered()
635 ctx = revsymbol(unfilteredrepo, changeid)
635 ctx = revsymbol(unfilteredrepo, changeid)
636
636
637 # If the changeset is obsolete, enrich the message with the reason
637 # If the changeset is obsolete, enrich the message with the reason
638 # that made this changeset not visible
638 # that made this changeset not visible
639 if ctx.obsolete():
639 if ctx.obsolete():
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 else:
641 else:
642 msg = _("hidden revision '%s'") % changeid
642 msg = _("hidden revision '%s'") % changeid
643
643
644 hint = _('use --hidden to access hidden revisions')
644 hint = _('use --hidden to access hidden revisions')
645
645
646 return error.FilteredRepoLookupError(msg, hint=hint)
646 return error.FilteredRepoLookupError(msg, hint=hint)
647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 msg %= (changeid, repo.filtername)
648 msg %= (changeid, repo.filtername)
649 return error.FilteredRepoLookupError(msg)
649 return error.FilteredRepoLookupError(msg)
650
650
651 def revsingle(repo, revspec, default='.', localalias=None):
651 def revsingle(repo, revspec, default='.', localalias=None):
652 if not revspec and revspec != 0:
652 if not revspec and revspec != 0:
653 return repo[default]
653 return repo[default]
654
654
655 l = revrange(repo, [revspec], localalias=localalias)
655 l = revrange(repo, [revspec], localalias=localalias)
656 if not l:
656 if not l:
657 raise error.Abort(_('empty revision set'))
657 raise error.Abort(_('empty revision set'))
658 return repo[l.last()]
658 return repo[l.last()]
659
659
660 def _pairspec(revspec):
660 def _pairspec(revspec):
661 tree = revsetlang.parse(revspec)
661 tree = revsetlang.parse(revspec)
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663
663
664 def revpair(repo, revs):
664 def revpair(repo, revs):
665 if not revs:
665 if not revs:
666 return repo['.'], repo[None]
666 return repo['.'], repo[None]
667
667
668 l = revrange(repo, revs)
668 l = revrange(repo, revs)
669
669
670 if not l:
670 if not l:
671 first = second = None
671 first = second = None
672 elif l.isascending():
672 elif l.isascending():
673 first = l.min()
673 first = l.min()
674 second = l.max()
674 second = l.max()
675 elif l.isdescending():
675 elif l.isdescending():
676 first = l.max()
676 first = l.max()
677 second = l.min()
677 second = l.min()
678 else:
678 else:
679 first = l.first()
679 first = l.first()
680 second = l.last()
680 second = l.last()
681
681
682 if first is None:
682 if first is None:
683 raise error.Abort(_('empty revision range'))
683 raise error.Abort(_('empty revision range'))
684 if (first == second and len(revs) >= 2
684 if (first == second and len(revs) >= 2
685 and not all(revrange(repo, [r]) for r in revs)):
685 and not all(revrange(repo, [r]) for r in revs)):
686 raise error.Abort(_('empty revision on one side of range'))
686 raise error.Abort(_('empty revision on one side of range'))
687
687
688 # if top-level is range expression, the result must always be a pair
688 # if top-level is range expression, the result must always be a pair
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 return repo[first], repo[None]
690 return repo[first], repo[None]
691
691
692 return repo[first], repo[second]
692 return repo[first], repo[second]
693
693
694 def revrange(repo, specs, localalias=None):
694 def revrange(repo, specs, localalias=None):
695 """Execute 1 to many revsets and return the union.
695 """Execute 1 to many revsets and return the union.
696
696
697 This is the preferred mechanism for executing revsets using user-specified
697 This is the preferred mechanism for executing revsets using user-specified
698 config options, such as revset aliases.
698 config options, such as revset aliases.
699
699
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
700 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 expression. If ``specs`` is empty, an empty result is returned.
701 expression. If ``specs`` is empty, an empty result is returned.
702
702
703 ``specs`` can contain integers, in which case they are assumed to be
703 ``specs`` can contain integers, in which case they are assumed to be
704 revision numbers.
704 revision numbers.
705
705
706 It is assumed the revsets are already formatted. If you have arguments
706 It is assumed the revsets are already formatted. If you have arguments
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 and pass the result as an element of ``specs``.
708 and pass the result as an element of ``specs``.
709
709
710 Specifying a single revset is allowed.
710 Specifying a single revset is allowed.
711
711
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 integer revisions.
713 integer revisions.
714 """
714 """
715 allspecs = []
715 allspecs = []
716 for spec in specs:
716 for spec in specs:
717 if isinstance(spec, int):
717 if isinstance(spec, int):
718 spec = revsetlang.formatspec('rev(%d)', spec)
718 spec = revsetlang.formatspec('rev(%d)', spec)
719 allspecs.append(spec)
719 allspecs.append(spec)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721
721
722 def meaningfulparents(repo, ctx):
722 def meaningfulparents(repo, ctx):
723 """Return list of meaningful (or all if debug) parentrevs for rev.
723 """Return list of meaningful (or all if debug) parentrevs for rev.
724
724
725 For merges (two non-nullrev revisions) both parents are meaningful.
725 For merges (two non-nullrev revisions) both parents are meaningful.
726 Otherwise the first parent revision is considered meaningful if it
726 Otherwise the first parent revision is considered meaningful if it
727 is not the preceding revision.
727 is not the preceding revision.
728 """
728 """
729 parents = ctx.parents()
729 parents = ctx.parents()
730 if len(parents) > 1:
730 if len(parents) > 1:
731 return parents
731 return parents
732 if repo.ui.debugflag:
732 if repo.ui.debugflag:
733 return [parents[0], repo['null']]
733 return [parents[0], repo['null']]
734 if parents[0].rev() >= intrev(ctx) - 1:
734 if parents[0].rev() >= intrev(ctx) - 1:
735 return []
735 return []
736 return parents
736 return parents
737
737
738 def expandpats(pats):
738 def expandpats(pats):
739 '''Expand bare globs when running on windows.
739 '''Expand bare globs when running on windows.
740 On posix we assume it already has already been done by sh.'''
740 On posix we assume it already has already been done by sh.'''
741 if not util.expandglobs:
741 if not util.expandglobs:
742 return list(pats)
742 return list(pats)
743 ret = []
743 ret = []
744 for kindpat in pats:
744 for kindpat in pats:
745 kind, pat = matchmod._patsplit(kindpat, None)
745 kind, pat = matchmod._patsplit(kindpat, None)
746 if kind is None:
746 if kind is None:
747 try:
747 try:
748 globbed = glob.glob(pat)
748 globbed = glob.glob(pat)
749 except re.error:
749 except re.error:
750 globbed = [pat]
750 globbed = [pat]
751 if globbed:
751 if globbed:
752 ret.extend(globbed)
752 ret.extend(globbed)
753 continue
753 continue
754 ret.append(kindpat)
754 ret.append(kindpat)
755 return ret
755 return ret
756
756
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 badfn=None):
758 badfn=None):
759 '''Return a matcher and the patterns that were used.
759 '''Return a matcher and the patterns that were used.
760 The matcher will warn about bad matches, unless an alternate badfn callback
760 The matcher will warn about bad matches, unless an alternate badfn callback
761 is provided.'''
761 is provided.'''
762 if pats == ("",):
762 if pats == ("",):
763 pats = []
763 pats = []
764 if opts is None:
764 if opts is None:
765 opts = {}
765 opts = {}
766 if not globbed and default == 'relpath':
766 if not globbed and default == 'relpath':
767 pats = expandpats(pats or [])
767 pats = expandpats(pats or [])
768
768
769 def bad(f, msg):
769 def bad(f, msg):
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771
771
772 if badfn is None:
772 if badfn is None:
773 badfn = bad
773 badfn = bad
774
774
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777
777
778 if m.always():
778 if m.always():
779 pats = []
779 pats = []
780 return m, pats
780 return m, pats
781
781
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 badfn=None):
783 badfn=None):
784 '''Return a matcher that will warn about bad matches.'''
784 '''Return a matcher that will warn about bad matches.'''
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786
786
787 def matchall(repo):
787 def matchall(repo):
788 '''Return a matcher that will efficiently match everything.'''
788 '''Return a matcher that will efficiently match everything.'''
789 return matchmod.always(repo.root, repo.getcwd())
789 return matchmod.always(repo.root, repo.getcwd())
790
790
791 def matchfiles(repo, files, badfn=None):
791 def matchfiles(repo, files, badfn=None):
792 '''Return a matcher that will efficiently match exactly these files.'''
792 '''Return a matcher that will efficiently match exactly these files.'''
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794
794
795 def parsefollowlinespattern(repo, rev, pat, msg):
795 def parsefollowlinespattern(repo, rev, pat, msg):
796 """Return a file name from `pat` pattern suitable for usage in followlines
796 """Return a file name from `pat` pattern suitable for usage in followlines
797 logic.
797 logic.
798 """
798 """
799 if not matchmod.patkind(pat):
799 if not matchmod.patkind(pat):
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 else:
801 else:
802 ctx = repo[rev]
802 ctx = repo[rev]
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 files = [f for f in ctx if m(f)]
804 files = [f for f in ctx if m(f)]
805 if len(files) != 1:
805 if len(files) != 1:
806 raise error.ParseError(msg)
806 raise error.ParseError(msg)
807 return files[0]
807 return files[0]
808
808
809 def origpath(ui, repo, filepath):
809 def origpath(ui, repo, filepath):
810 '''customize where .orig files are created
810 '''customize where .orig files are created
811
811
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
812 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 Fall back to default (filepath with .orig suffix) if not specified
813 Fall back to default (filepath with .orig suffix) if not specified
814 '''
814 '''
815 origbackuppath = ui.config('ui', 'origbackuppath')
815 origbackuppath = ui.config('ui', 'origbackuppath')
816 if not origbackuppath:
816 if not origbackuppath:
817 return filepath + ".orig"
817 return filepath + ".orig"
818
818
819 # Convert filepath from an absolute path into a path inside the repo.
819 # Convert filepath from an absolute path into a path inside the repo.
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
820 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 start=repo.root))
821 start=repo.root))
822
822
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 origbackupdir = origvfs.dirname(filepathfromroot)
824 origbackupdir = origvfs.dirname(filepathfromroot)
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827
827
828 # Remove any files that conflict with the backup file's path
828 # Remove any files that conflict with the backup file's path
829 for f in reversed(list(util.finddirs(filepathfromroot))):
829 for f in reversed(list(util.finddirs(filepathfromroot))):
830 if origvfs.isfileorlink(f):
830 if origvfs.isfileorlink(f):
831 ui.note(_('removing conflicting file: %s\n')
831 ui.note(_('removing conflicting file: %s\n')
832 % origvfs.join(f))
832 % origvfs.join(f))
833 origvfs.unlink(f)
833 origvfs.unlink(f)
834 break
834 break
835
835
836 origvfs.makedirs(origbackupdir)
836 origvfs.makedirs(origbackupdir)
837
837
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 ui.note(_('removing conflicting directory: %s\n')
839 ui.note(_('removing conflicting directory: %s\n')
840 % origvfs.join(filepathfromroot))
840 % origvfs.join(filepathfromroot))
841 origvfs.rmtree(filepathfromroot, forcibly=True)
841 origvfs.rmtree(filepathfromroot, forcibly=True)
842
842
843 return origvfs.join(filepathfromroot)
843 return origvfs.join(filepathfromroot)
844
844
845 class _containsnode(object):
845 class _containsnode(object):
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847
847
848 def __init__(self, repo, revcontainer):
848 def __init__(self, repo, revcontainer):
849 self._torev = repo.changelog.rev
849 self._torev = repo.changelog.rev
850 self._revcontains = revcontainer.__contains__
850 self._revcontains = revcontainer.__contains__
851
851
852 def __contains__(self, node):
852 def __contains__(self, node):
853 return self._revcontains(self._torev(node))
853 return self._revcontains(self._torev(node))
854
854
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 fixphase=False, targetphase=None, backup=True):
856 fixphase=False, targetphase=None, backup=True):
857 """do common cleanups when old nodes are replaced by new nodes
857 """do common cleanups when old nodes are replaced by new nodes
858
858
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 (we might also want to move working directory parent in the future)
860 (we might also want to move working directory parent in the future)
861
861
862 By default, bookmark moves are calculated automatically from 'replacements',
862 By default, bookmark moves are calculated automatically from 'replacements',
863 but 'moves' can be used to override that. Also, 'moves' may include
863 but 'moves' can be used to override that. Also, 'moves' may include
864 additional bookmark moves that should not have associated obsmarkers.
864 additional bookmark moves that should not have associated obsmarkers.
865
865
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 have replacements. operation is a string, like "rebase".
867 have replacements. operation is a string, like "rebase".
868
868
869 metadata is dictionary containing metadata to be stored in obsmarker if
869 metadata is dictionary containing metadata to be stored in obsmarker if
870 obsolescence is enabled.
870 obsolescence is enabled.
871 """
871 """
872 assert fixphase or targetphase is None
872 assert fixphase or targetphase is None
873 if not replacements and not moves:
873 if not replacements and not moves:
874 return
874 return
875
875
876 # translate mapping's other forms
876 # translate mapping's other forms
877 if not util.safehasattr(replacements, 'items'):
877 if not util.safehasattr(replacements, 'items'):
878 replacements = {n: () for n in replacements}
878 replacements = {n: () for n in replacements}
879
879
880 # Calculate bookmark movements
880 # Calculate bookmark movements
881 if moves is None:
881 if moves is None:
882 moves = {}
882 moves = {}
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
883 # Unfiltered repo is needed since nodes in replacements might be hidden.
884 unfi = repo.unfiltered()
884 unfi = repo.unfiltered()
885 for oldnode, newnodes in replacements.items():
885 for oldnode, newnodes in replacements.items():
886 if oldnode in moves:
886 if oldnode in moves:
887 continue
887 continue
888 if len(newnodes) > 1:
888 if len(newnodes) > 1:
889 # usually a split, take the one with biggest rev number
889 # usually a split, take the one with biggest rev number
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
891 elif len(newnodes) == 0:
891 elif len(newnodes) == 0:
892 # move bookmark backwards
892 # move bookmark backwards
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
894 list(replacements)))
894 list(replacements)))
895 if roots:
895 if roots:
896 newnode = roots[0].node()
896 newnode = roots[0].node()
897 else:
897 else:
898 newnode = nullid
898 newnode = nullid
899 else:
899 else:
900 newnode = newnodes[0]
900 newnode = newnodes[0]
901 moves[oldnode] = newnode
901 moves[oldnode] = newnode
902
902
903 allnewnodes = [n for ns in replacements.values() for n in ns]
903 allnewnodes = [n for ns in replacements.values() for n in ns]
904 toretract = {}
904 toretract = {}
905 toadvance = {}
905 toadvance = {}
906 if fixphase:
906 if fixphase:
907 precursors = {}
907 precursors = {}
908 for oldnode, newnodes in replacements.items():
908 for oldnode, newnodes in replacements.items():
909 for newnode in newnodes:
909 for newnode in newnodes:
910 precursors.setdefault(newnode, []).append(oldnode)
910 precursors.setdefault(newnode, []).append(oldnode)
911
911
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
912 allnewnodes.sort(key=lambda n: unfi[n].rev())
913 newphases = {}
913 newphases = {}
914 def phase(ctx):
914 def phase(ctx):
915 return newphases.get(ctx.node(), ctx.phase())
915 return newphases.get(ctx.node(), ctx.phase())
916 for newnode in allnewnodes:
916 for newnode in allnewnodes:
917 ctx = unfi[newnode]
917 ctx = unfi[newnode]
918 parentphase = max(phase(p) for p in ctx.parents())
918 parentphase = max(phase(p) for p in ctx.parents())
919 if targetphase is None:
919 if targetphase is None:
920 oldphase = max(unfi[oldnode].phase()
920 oldphase = max(unfi[oldnode].phase()
921 for oldnode in precursors[newnode])
921 for oldnode in precursors[newnode])
922 newphase = max(oldphase, parentphase)
922 newphase = max(oldphase, parentphase)
923 else:
923 else:
924 newphase = max(targetphase, parentphase)
924 newphase = max(targetphase, parentphase)
925 newphases[newnode] = newphase
925 newphases[newnode] = newphase
926 if newphase > ctx.phase():
926 if newphase > ctx.phase():
927 toretract.setdefault(newphase, []).append(newnode)
927 toretract.setdefault(newphase, []).append(newnode)
928 elif newphase < ctx.phase():
928 elif newphase < ctx.phase():
929 toadvance.setdefault(newphase, []).append(newnode)
929 toadvance.setdefault(newphase, []).append(newnode)
930
930
931 with repo.transaction('cleanup') as tr:
931 with repo.transaction('cleanup') as tr:
932 # Move bookmarks
932 # Move bookmarks
933 bmarks = repo._bookmarks
933 bmarks = repo._bookmarks
934 bmarkchanges = []
934 bmarkchanges = []
935 for oldnode, newnode in moves.items():
935 for oldnode, newnode in moves.items():
936 oldbmarks = repo.nodebookmarks(oldnode)
936 oldbmarks = repo.nodebookmarks(oldnode)
937 if not oldbmarks:
937 if not oldbmarks:
938 continue
938 continue
939 from . import bookmarks # avoid import cycle
939 from . import bookmarks # avoid import cycle
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
942 hex(oldnode), hex(newnode)))
942 hex(oldnode), hex(newnode)))
943 # Delete divergent bookmarks being parents of related newnodes
943 # Delete divergent bookmarks being parents of related newnodes
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
945 allnewnodes, newnode, oldnode)
945 allnewnodes, newnode, oldnode)
946 deletenodes = _containsnode(repo, deleterevs)
946 deletenodes = _containsnode(repo, deleterevs)
947 for name in oldbmarks:
947 for name in oldbmarks:
948 bmarkchanges.append((name, newnode))
948 bmarkchanges.append((name, newnode))
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
950 bmarkchanges.append((b, None))
950 bmarkchanges.append((b, None))
951
951
952 if bmarkchanges:
952 if bmarkchanges:
953 bmarks.applychanges(repo, tr, bmarkchanges)
953 bmarks.applychanges(repo, tr, bmarkchanges)
954
954
955 for phase, nodes in toretract.items():
955 for phase, nodes in toretract.items():
956 phases.retractboundary(repo, tr, phase, nodes)
956 phases.retractboundary(repo, tr, phase, nodes)
957 for phase, nodes in toadvance.items():
957 for phase, nodes in toadvance.items():
958 phases.advanceboundary(repo, tr, phase, nodes)
958 phases.advanceboundary(repo, tr, phase, nodes)
959
959
960 # Obsolete or strip nodes
960 # Obsolete or strip nodes
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
962 # If a node is already obsoleted, and we want to obsolete it
962 # If a node is already obsoleted, and we want to obsolete it
963 # without a successor, skip that obssolete request since it's
963 # without a successor, skip that obssolete request since it's
964 # unnecessary. That's the "if s or not isobs(n)" check below.
964 # unnecessary. That's the "if s or not isobs(n)" check below.
965 # Also sort the node in topology order, that might be useful for
965 # Also sort the node in topology order, that might be useful for
966 # some obsstore logic.
966 # some obsstore logic.
967 # NOTE: the filtering and sorting might belong to createmarkers.
967 # NOTE: the filtering and sorting might belong to createmarkers.
968 isobs = unfi.obsstore.successors.__contains__
968 isobs = unfi.obsstore.successors.__contains__
969 torev = unfi.changelog.rev
969 torev = unfi.changelog.rev
970 sortfunc = lambda ns: torev(ns[0])
970 sortfunc = lambda ns: torev(ns[0])
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
972 for n, s in sorted(replacements.items(), key=sortfunc)
972 for n, s in sorted(replacements.items(), key=sortfunc)
973 if s or not isobs(n)]
973 if s or not isobs(n)]
974 if rels:
974 if rels:
975 obsolete.createmarkers(repo, rels, operation=operation,
975 obsolete.createmarkers(repo, rels, operation=operation,
976 metadata=metadata)
976 metadata=metadata)
977 else:
977 else:
978 from . import repair # avoid import cycle
978 from . import repair # avoid import cycle
979 tostrip = list(replacements)
979 tostrip = list(replacements)
980 if tostrip:
980 if tostrip:
981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
981 repair.delayedstrip(repo.ui, repo, tostrip, operation,
982 backup=backup)
982 backup=backup)
983
983
984 def addremove(repo, matcher, prefix, opts=None):
984 def addremove(repo, matcher, prefix, opts=None):
985 if opts is None:
985 if opts is None:
986 opts = {}
986 opts = {}
987 m = matcher
987 m = matcher
988 dry_run = opts.get('dry_run')
988 dry_run = opts.get('dry_run')
989 try:
989 try:
990 similarity = float(opts.get('similarity') or 0)
990 similarity = float(opts.get('similarity') or 0)
991 except ValueError:
991 except ValueError:
992 raise error.Abort(_('similarity must be a number'))
992 raise error.Abort(_('similarity must be a number'))
993 if similarity < 0 or similarity > 100:
993 if similarity < 0 or similarity > 100:
994 raise error.Abort(_('similarity must be between 0 and 100'))
994 raise error.Abort(_('similarity must be between 0 and 100'))
995 similarity /= 100.0
995 similarity /= 100.0
996
996
997 ret = 0
997 ret = 0
998 join = lambda f: os.path.join(prefix, f)
998 join = lambda f: os.path.join(prefix, f)
999
999
1000 wctx = repo[None]
1000 wctx = repo[None]
1001 for subpath in sorted(wctx.substate):
1001 for subpath in sorted(wctx.substate):
1002 submatch = matchmod.subdirmatcher(subpath, m)
1002 submatch = matchmod.subdirmatcher(subpath, m)
1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1003 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1004 sub = wctx.sub(subpath)
1004 sub = wctx.sub(subpath)
1005 try:
1005 try:
1006 if sub.addremove(submatch, prefix, opts):
1006 if sub.addremove(submatch, prefix, opts):
1007 ret = 1
1007 ret = 1
1008 except error.LookupError:
1008 except error.LookupError:
1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1009 repo.ui.status(_("skipping missing subrepository: %s\n")
1010 % join(subpath))
1010 % join(subpath))
1011
1011
1012 rejected = []
1012 rejected = []
1013 def badfn(f, msg):
1013 def badfn(f, msg):
1014 if f in m.files():
1014 if f in m.files():
1015 m.bad(f, msg)
1015 m.bad(f, msg)
1016 rejected.append(f)
1016 rejected.append(f)
1017
1017
1018 badmatch = matchmod.badmatch(m, badfn)
1018 badmatch = matchmod.badmatch(m, badfn)
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1020 badmatch)
1020 badmatch)
1021
1021
1022 unknownset = set(unknown + forgotten)
1022 unknownset = set(unknown + forgotten)
1023 toprint = unknownset.copy()
1023 toprint = unknownset.copy()
1024 toprint.update(deleted)
1024 toprint.update(deleted)
1025 for abs in sorted(toprint):
1025 for abs in sorted(toprint):
1026 if repo.ui.verbose or not m.exact(abs):
1026 if repo.ui.verbose or not m.exact(abs):
1027 if abs in unknownset:
1027 if abs in unknownset:
1028 status = _('adding %s\n') % m.uipath(abs)
1028 status = _('adding %s\n') % m.uipath(abs)
1029 label = 'addremove.added'
1029 label = 'addremove.added'
1030 else:
1030 else:
1031 status = _('removing %s\n') % m.uipath(abs)
1031 status = _('removing %s\n') % m.uipath(abs)
1032 label = 'addremove.removed'
1032 label = 'addremove.removed'
1033 repo.ui.status(status, label=label)
1033 repo.ui.status(status, label=label)
1034
1034
1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1035 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1036 similarity)
1036 similarity)
1037
1037
1038 if not dry_run:
1038 if not dry_run:
1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1039 _markchanges(repo, unknown + forgotten, deleted, renames)
1040
1040
1041 for f in rejected:
1041 for f in rejected:
1042 if f in m.files():
1042 if f in m.files():
1043 return 1
1043 return 1
1044 return ret
1044 return ret
1045
1045
1046 def marktouched(repo, files, similarity=0.0):
1046 def marktouched(repo, files, similarity=0.0):
1047 '''Assert that files have somehow been operated upon. files are relative to
1047 '''Assert that files have somehow been operated upon. files are relative to
1048 the repo root.'''
1048 the repo root.'''
1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1049 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1050 rejected = []
1050 rejected = []
1051
1051
1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1052 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1053
1053
1054 if repo.ui.verbose:
1054 if repo.ui.verbose:
1055 unknownset = set(unknown + forgotten)
1055 unknownset = set(unknown + forgotten)
1056 toprint = unknownset.copy()
1056 toprint = unknownset.copy()
1057 toprint.update(deleted)
1057 toprint.update(deleted)
1058 for abs in sorted(toprint):
1058 for abs in sorted(toprint):
1059 if abs in unknownset:
1059 if abs in unknownset:
1060 status = _('adding %s\n') % abs
1060 status = _('adding %s\n') % abs
1061 else:
1061 else:
1062 status = _('removing %s\n') % abs
1062 status = _('removing %s\n') % abs
1063 repo.ui.status(status)
1063 repo.ui.status(status)
1064
1064
1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 similarity)
1066 similarity)
1067
1067
1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1069
1069
1070 for f in rejected:
1070 for f in rejected:
1071 if f in m.files():
1071 if f in m.files():
1072 return 1
1072 return 1
1073 return 0
1073 return 0
1074
1074
1075 def _interestingfiles(repo, matcher):
1075 def _interestingfiles(repo, matcher):
1076 '''Walk dirstate with matcher, looking for files that addremove would care
1076 '''Walk dirstate with matcher, looking for files that addremove would care
1077 about.
1077 about.
1078
1078
1079 This is different from dirstate.status because it doesn't care about
1079 This is different from dirstate.status because it doesn't care about
1080 whether files are modified or clean.'''
1080 whether files are modified or clean.'''
1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1081 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1082 audit_path = pathutil.pathauditor(repo.root, cached=True)
1083
1083
1084 ctx = repo[None]
1084 ctx = repo[None]
1085 dirstate = repo.dirstate
1085 dirstate = repo.dirstate
1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1086 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1087 unknown=True, ignored=False, full=False)
1087 unknown=True, ignored=False, full=False)
1088 for abs, st in walkresults.iteritems():
1088 for abs, st in walkresults.iteritems():
1089 dstate = dirstate[abs]
1089 dstate = dirstate[abs]
1090 if dstate == '?' and audit_path.check(abs):
1090 if dstate == '?' and audit_path.check(abs):
1091 unknown.append(abs)
1091 unknown.append(abs)
1092 elif dstate != 'r' and not st:
1092 elif dstate != 'r' and not st:
1093 deleted.append(abs)
1093 deleted.append(abs)
1094 elif dstate == 'r' and st:
1094 elif dstate == 'r' and st:
1095 forgotten.append(abs)
1095 forgotten.append(abs)
1096 # for finding renames
1096 # for finding renames
1097 elif dstate == 'r' and not st:
1097 elif dstate == 'r' and not st:
1098 removed.append(abs)
1098 removed.append(abs)
1099 elif dstate == 'a':
1099 elif dstate == 'a':
1100 added.append(abs)
1100 added.append(abs)
1101
1101
1102 return added, unknown, deleted, removed, forgotten
1102 return added, unknown, deleted, removed, forgotten
1103
1103
1104 def _findrenames(repo, matcher, added, removed, similarity):
1104 def _findrenames(repo, matcher, added, removed, similarity):
1105 '''Find renames from removed files to added ones.'''
1105 '''Find renames from removed files to added ones.'''
1106 renames = {}
1106 renames = {}
1107 if similarity > 0:
1107 if similarity > 0:
1108 for old, new, score in similar.findrenames(repo, added, removed,
1108 for old, new, score in similar.findrenames(repo, added, removed,
1109 similarity):
1109 similarity):
1110 if (repo.ui.verbose or not matcher.exact(old)
1110 if (repo.ui.verbose or not matcher.exact(old)
1111 or not matcher.exact(new)):
1111 or not matcher.exact(new)):
1112 repo.ui.status(_('recording removal of %s as rename to %s '
1112 repo.ui.status(_('recording removal of %s as rename to %s '
1113 '(%d%% similar)\n') %
1113 '(%d%% similar)\n') %
1114 (matcher.rel(old), matcher.rel(new),
1114 (matcher.rel(old), matcher.rel(new),
1115 score * 100))
1115 score * 100))
1116 renames[new] = old
1116 renames[new] = old
1117 return renames
1117 return renames
1118
1118
1119 def _markchanges(repo, unknown, deleted, renames):
1119 def _markchanges(repo, unknown, deleted, renames):
1120 '''Marks the files in unknown as added, the files in deleted as removed,
1120 '''Marks the files in unknown as added, the files in deleted as removed,
1121 and the files in renames as copied.'''
1121 and the files in renames as copied.'''
1122 wctx = repo[None]
1122 wctx = repo[None]
1123 with repo.wlock():
1123 with repo.wlock():
1124 wctx.forget(deleted)
1124 wctx.forget(deleted)
1125 wctx.add(unknown)
1125 wctx.add(unknown)
1126 for new, old in renames.iteritems():
1126 for new, old in renames.iteritems():
1127 wctx.copy(old, new)
1127 wctx.copy(old, new)
1128
1128
1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1129 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1130 """Update the dirstate to reflect the intent of copying src to dst. For
1130 """Update the dirstate to reflect the intent of copying src to dst. For
1131 different reasons it might not end with dst being marked as copied from src.
1131 different reasons it might not end with dst being marked as copied from src.
1132 """
1132 """
1133 origsrc = repo.dirstate.copied(src) or src
1133 origsrc = repo.dirstate.copied(src) or src
1134 if dst == origsrc: # copying back a copy?
1134 if dst == origsrc: # copying back a copy?
1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1135 if repo.dirstate[dst] not in 'mn' and not dryrun:
1136 repo.dirstate.normallookup(dst)
1136 repo.dirstate.normallookup(dst)
1137 else:
1137 else:
1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1138 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1139 if not ui.quiet:
1139 if not ui.quiet:
1140 ui.warn(_("%s has not been committed yet, so no copy "
1140 ui.warn(_("%s has not been committed yet, so no copy "
1141 "data will be stored for %s.\n")
1141 "data will be stored for %s.\n")
1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1142 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1143 if repo.dirstate[dst] in '?r' and not dryrun:
1143 if repo.dirstate[dst] in '?r' and not dryrun:
1144 wctx.add([dst])
1144 wctx.add([dst])
1145 elif not dryrun:
1145 elif not dryrun:
1146 wctx.copy(origsrc, dst)
1146 wctx.copy(origsrc, dst)
1147
1147
1148 def readrequires(opener, supported):
1148 def readrequires(opener, supported):
1149 '''Reads and parses .hg/requires and checks if all entries found
1149 '''Reads and parses .hg/requires and checks if all entries found
1150 are in the list of supported features.'''
1150 are in the list of supported features.'''
1151 requirements = set(opener.read("requires").splitlines())
1151 requirements = set(opener.read("requires").splitlines())
1152 missings = []
1152 missings = []
1153 for r in requirements:
1153 for r in requirements:
1154 if r not in supported:
1154 if r not in supported:
1155 if not r or not r[0:1].isalnum():
1155 if not r or not r[0:1].isalnum():
1156 raise error.RequirementError(_(".hg/requires file is corrupt"))
1156 raise error.RequirementError(_(".hg/requires file is corrupt"))
1157 missings.append(r)
1157 missings.append(r)
1158 missings.sort()
1158 missings.sort()
1159 if missings:
1159 if missings:
1160 raise error.RequirementError(
1160 raise error.RequirementError(
1161 _("repository requires features unknown to this Mercurial: %s")
1161 _("repository requires features unknown to this Mercurial: %s")
1162 % " ".join(missings),
1162 % " ".join(missings),
1163 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1163 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1164 " for more information"))
1164 " for more information"))
1165 return requirements
1165 return requirements
1166
1166
1167 def writerequires(opener, requirements):
1167 def writerequires(opener, requirements):
1168 with opener('requires', 'w') as fp:
1168 with opener('requires', 'w') as fp:
1169 for r in sorted(requirements):
1169 for r in sorted(requirements):
1170 fp.write("%s\n" % r)
1170 fp.write("%s\n" % r)
1171
1171
1172 class filecachesubentry(object):
1172 class filecachesubentry(object):
1173 def __init__(self, path, stat):
1173 def __init__(self, path, stat):
1174 self.path = path
1174 self.path = path
1175 self.cachestat = None
1175 self.cachestat = None
1176 self._cacheable = None
1176 self._cacheable = None
1177
1177
1178 if stat:
1178 if stat:
1179 self.cachestat = filecachesubentry.stat(self.path)
1179 self.cachestat = filecachesubentry.stat(self.path)
1180
1180
1181 if self.cachestat:
1181 if self.cachestat:
1182 self._cacheable = self.cachestat.cacheable()
1182 self._cacheable = self.cachestat.cacheable()
1183 else:
1183 else:
1184 # None means we don't know yet
1184 # None means we don't know yet
1185 self._cacheable = None
1185 self._cacheable = None
1186
1186
1187 def refresh(self):
1187 def refresh(self):
1188 if self.cacheable():
1188 if self.cacheable():
1189 self.cachestat = filecachesubentry.stat(self.path)
1189 self.cachestat = filecachesubentry.stat(self.path)
1190
1190
1191 def cacheable(self):
1191 def cacheable(self):
1192 if self._cacheable is not None:
1192 if self._cacheable is not None:
1193 return self._cacheable
1193 return self._cacheable
1194
1194
1195 # we don't know yet, assume it is for now
1195 # we don't know yet, assume it is for now
1196 return True
1196 return True
1197
1197
1198 def changed(self):
1198 def changed(self):
1199 # no point in going further if we can't cache it
1199 # no point in going further if we can't cache it
1200 if not self.cacheable():
1200 if not self.cacheable():
1201 return True
1201 return True
1202
1202
1203 newstat = filecachesubentry.stat(self.path)
1203 newstat = filecachesubentry.stat(self.path)
1204
1204
1205 # we may not know if it's cacheable yet, check again now
1205 # we may not know if it's cacheable yet, check again now
1206 if newstat and self._cacheable is None:
1206 if newstat and self._cacheable is None:
1207 self._cacheable = newstat.cacheable()
1207 self._cacheable = newstat.cacheable()
1208
1208
1209 # check again
1209 # check again
1210 if not self._cacheable:
1210 if not self._cacheable:
1211 return True
1211 return True
1212
1212
1213 if self.cachestat != newstat:
1213 if self.cachestat != newstat:
1214 self.cachestat = newstat
1214 self.cachestat = newstat
1215 return True
1215 return True
1216 else:
1216 else:
1217 return False
1217 return False
1218
1218
1219 @staticmethod
1219 @staticmethod
1220 def stat(path):
1220 def stat(path):
1221 try:
1221 try:
1222 return util.cachestat(path)
1222 return util.cachestat(path)
1223 except OSError as e:
1223 except OSError as e:
1224 if e.errno != errno.ENOENT:
1224 if e.errno != errno.ENOENT:
1225 raise
1225 raise
1226
1226
1227 class filecacheentry(object):
1227 class filecacheentry(object):
1228 def __init__(self, paths, stat=True):
1228 def __init__(self, paths, stat=True):
1229 self._entries = []
1229 self._entries = []
1230 for path in paths:
1230 for path in paths:
1231 self._entries.append(filecachesubentry(path, stat))
1231 self._entries.append(filecachesubentry(path, stat))
1232
1232
1233 def changed(self):
1233 def changed(self):
1234 '''true if any entry has changed'''
1234 '''true if any entry has changed'''
1235 for entry in self._entries:
1235 for entry in self._entries:
1236 if entry.changed():
1236 if entry.changed():
1237 return True
1237 return True
1238 return False
1238 return False
1239
1239
1240 def refresh(self):
1240 def refresh(self):
1241 for entry in self._entries:
1241 for entry in self._entries:
1242 entry.refresh()
1242 entry.refresh()
1243
1243
1244 class filecache(object):
1244 class filecache(object):
1245 """A property like decorator that tracks files under .hg/ for updates.
1245 """A property like decorator that tracks files under .hg/ for updates.
1246
1246
1247 On first access, the files defined as arguments are stat()ed and the
1247 On first access, the files defined as arguments are stat()ed and the
1248 results cached. The decorated function is called. The results are stashed
1248 results cached. The decorated function is called. The results are stashed
1249 away in a ``_filecache`` dict on the object whose method is decorated.
1249 away in a ``_filecache`` dict on the object whose method is decorated.
1250
1250
1251 On subsequent access, the cached result is returned.
1251 On subsequent access, the cached result is returned.
1252
1252
1253 On external property set operations, stat() calls are performed and the new
1253 On external property set operations, stat() calls are performed and the new
1254 value is cached.
1254 value is cached.
1255
1255
1256 On property delete operations, cached data is removed.
1256 On property delete operations, cached data is removed.
1257
1257
1258 When using the property API, cached data is always returned, if available:
1258 When using the property API, cached data is always returned, if available:
1259 no stat() is performed to check if the file has changed and if the function
1259 no stat() is performed to check if the file has changed and if the function
1260 needs to be called to reflect file changes.
1260 needs to be called to reflect file changes.
1261
1261
1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 can populate an entry before the property's getter is called. In this case,
1263 can populate an entry before the property's getter is called. In this case,
1264 entries in ``_filecache`` will be used during property operations,
1264 entries in ``_filecache`` will be used during property operations,
1265 if available. If the underlying file changes, it is up to external callers
1265 if available. If the underlying file changes, it is up to external callers
1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 remove the ``filecacheentry``.
1268 remove the ``filecacheentry``.
1269 """
1269 """
1270
1270
1271 def __init__(self, *paths):
1271 def __init__(self, *paths):
1272 self.paths = paths
1272 self.paths = paths
1273
1273
1274 def join(self, obj, fname):
1274 def join(self, obj, fname):
1275 """Used to compute the runtime path of a cached file.
1275 """Used to compute the runtime path of a cached file.
1276
1276
1277 Users should subclass filecache and provide their own version of this
1277 Users should subclass filecache and provide their own version of this
1278 function to call the appropriate join function on 'obj' (an instance
1278 function to call the appropriate join function on 'obj' (an instance
1279 of the class that its member function was decorated).
1279 of the class that its member function was decorated).
1280 """
1280 """
1281 raise NotImplementedError
1281 raise NotImplementedError
1282
1282
1283 def __call__(self, func):
1283 def __call__(self, func):
1284 self.func = func
1284 self.func = func
1285 self.sname = func.__name__
1285 self.sname = func.__name__
1286 self.name = pycompat.sysbytes(self.sname)
1286 self.name = pycompat.sysbytes(self.sname)
1287 return self
1287 return self
1288
1288
1289 def __get__(self, obj, type=None):
1289 def __get__(self, obj, type=None):
1290 # if accessed on the class, return the descriptor itself.
1290 # if accessed on the class, return the descriptor itself.
1291 if obj is None:
1291 if obj is None:
1292 return self
1292 return self
1293 # do we need to check if the file changed?
1293 # do we need to check if the file changed?
1294 if self.sname in obj.__dict__:
1294 if self.sname in obj.__dict__:
1295 assert self.name in obj._filecache, self.name
1295 assert self.name in obj._filecache, self.name
1296 return obj.__dict__[self.sname]
1296 return obj.__dict__[self.sname]
1297
1297
1298 entry = obj._filecache.get(self.name)
1298 entry = obj._filecache.get(self.name)
1299
1299
1300 if entry:
1300 if entry:
1301 if entry.changed():
1301 if entry.changed():
1302 entry.obj = self.func(obj)
1302 entry.obj = self.func(obj)
1303 else:
1303 else:
1304 paths = [self.join(obj, path) for path in self.paths]
1304 paths = [self.join(obj, path) for path in self.paths]
1305
1305
1306 # We stat -before- creating the object so our cache doesn't lie if
1306 # We stat -before- creating the object so our cache doesn't lie if
1307 # a writer modified between the time we read and stat
1307 # a writer modified between the time we read and stat
1308 entry = filecacheentry(paths, True)
1308 entry = filecacheentry(paths, True)
1309 entry.obj = self.func(obj)
1309 entry.obj = self.func(obj)
1310
1310
1311 obj._filecache[self.name] = entry
1311 obj._filecache[self.name] = entry
1312
1312
1313 obj.__dict__[self.sname] = entry.obj
1313 obj.__dict__[self.sname] = entry.obj
1314 return entry.obj
1314 return entry.obj
1315
1315
1316 def __set__(self, obj, value):
1316 def __set__(self, obj, value):
1317 if self.name not in obj._filecache:
1317 if self.name not in obj._filecache:
1318 # we add an entry for the missing value because X in __dict__
1318 # we add an entry for the missing value because X in __dict__
1319 # implies X in _filecache
1319 # implies X in _filecache
1320 paths = [self.join(obj, path) for path in self.paths]
1320 paths = [self.join(obj, path) for path in self.paths]
1321 ce = filecacheentry(paths, False)
1321 ce = filecacheentry(paths, False)
1322 obj._filecache[self.name] = ce
1322 obj._filecache[self.name] = ce
1323 else:
1323 else:
1324 ce = obj._filecache[self.name]
1324 ce = obj._filecache[self.name]
1325
1325
1326 ce.obj = value # update cached copy
1326 ce.obj = value # update cached copy
1327 obj.__dict__[self.sname] = value # update copy returned by obj.x
1327 obj.__dict__[self.sname] = value # update copy returned by obj.x
1328
1328
1329 def __delete__(self, obj):
1329 def __delete__(self, obj):
1330 try:
1330 try:
1331 del obj.__dict__[self.sname]
1331 del obj.__dict__[self.sname]
1332 except KeyError:
1332 except KeyError:
1333 raise AttributeError(self.sname)
1333 raise AttributeError(self.sname)
1334
1334
1335 def extdatasource(repo, source):
1335 def extdatasource(repo, source):
1336 """Gather a map of rev -> value dict from the specified source
1336 """Gather a map of rev -> value dict from the specified source
1337
1337
1338 A source spec is treated as a URL, with a special case shell: type
1338 A source spec is treated as a URL, with a special case shell: type
1339 for parsing the output from a shell command.
1339 for parsing the output from a shell command.
1340
1340
1341 The data is parsed as a series of newline-separated records where
1341 The data is parsed as a series of newline-separated records where
1342 each record is a revision specifier optionally followed by a space
1342 each record is a revision specifier optionally followed by a space
1343 and a freeform string value. If the revision is known locally, it
1343 and a freeform string value. If the revision is known locally, it
1344 is converted to a rev, otherwise the record is skipped.
1344 is converted to a rev, otherwise the record is skipped.
1345
1345
1346 Note that both key and value are treated as UTF-8 and converted to
1346 Note that both key and value are treated as UTF-8 and converted to
1347 the local encoding. This allows uniformity between local and
1347 the local encoding. This allows uniformity between local and
1348 remote data sources.
1348 remote data sources.
1349 """
1349 """
1350
1350
1351 spec = repo.ui.config("extdata", source)
1351 spec = repo.ui.config("extdata", source)
1352 if not spec:
1352 if not spec:
1353 raise error.Abort(_("unknown extdata source '%s'") % source)
1353 raise error.Abort(_("unknown extdata source '%s'") % source)
1354
1354
1355 data = {}
1355 data = {}
1356 src = proc = None
1356 src = proc = None
1357 try:
1357 try:
1358 if spec.startswith("shell:"):
1358 if spec.startswith("shell:"):
1359 # external commands should be run relative to the repo root
1359 # external commands should be run relative to the repo root
1360 cmd = spec[6:]
1360 cmd = spec[6:]
1361 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1361 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1362 close_fds=procutil.closefds,
1362 close_fds=procutil.closefds,
1363 stdout=subprocess.PIPE, cwd=repo.root)
1363 stdout=subprocess.PIPE, cwd=repo.root)
1364 src = proc.stdout
1364 src = proc.stdout
1365 else:
1365 else:
1366 # treat as a URL or file
1366 # treat as a URL or file
1367 src = url.open(repo.ui, spec)
1367 src = url.open(repo.ui, spec)
1368 for l in src:
1368 for l in src:
1369 if " " in l:
1369 if " " in l:
1370 k, v = l.strip().split(" ", 1)
1370 k, v = l.strip().split(" ", 1)
1371 else:
1371 else:
1372 k, v = l.strip(), ""
1372 k, v = l.strip(), ""
1373
1373
1374 k = encoding.tolocal(k)
1374 k = encoding.tolocal(k)
1375 try:
1375 try:
1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1377 except (error.LookupError, error.RepoLookupError):
1377 except (error.LookupError, error.RepoLookupError):
1378 pass # we ignore data for nodes that don't exist locally
1378 pass # we ignore data for nodes that don't exist locally
1379 finally:
1379 finally:
1380 if proc:
1380 if proc:
1381 proc.communicate()
1381 proc.communicate()
1382 if src:
1382 if src:
1383 src.close()
1383 src.close()
1384 if proc and proc.returncode != 0:
1384 if proc and proc.returncode != 0:
1385 raise error.Abort(_("extdata command '%s' failed: %s")
1385 raise error.Abort(_("extdata command '%s' failed: %s")
1386 % (cmd, procutil.explainexit(proc.returncode)))
1386 % (cmd, procutil.explainexit(proc.returncode)))
1387
1387
1388 return data
1388 return data
1389
1389
1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1391 if lock is None:
1391 if lock is None:
1392 raise error.LockInheritanceContractViolation(
1392 raise error.LockInheritanceContractViolation(
1393 'lock can only be inherited while held')
1393 'lock can only be inherited while held')
1394 if environ is None:
1394 if environ is None:
1395 environ = {}
1395 environ = {}
1396 with lock.inherit() as locker:
1396 with lock.inherit() as locker:
1397 environ[envvar] = locker
1397 environ[envvar] = locker
1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1399
1399
1400 def wlocksub(repo, cmd, *args, **kwargs):
1400 def wlocksub(repo, cmd, *args, **kwargs):
1401 """run cmd as a subprocess that allows inheriting repo's wlock
1401 """run cmd as a subprocess that allows inheriting repo's wlock
1402
1402
1403 This can only be called while the wlock is held. This takes all the
1403 This can only be called while the wlock is held. This takes all the
1404 arguments that ui.system does, and returns the exit code of the
1404 arguments that ui.system does, and returns the exit code of the
1405 subprocess."""
1405 subprocess."""
1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1407 **kwargs)
1407 **kwargs)
1408
1408
1409 class progress(object):
1409 class progress(object):
1410 def __init__(self, ui, topic, unit="", total=None):
1410 def __init__(self, ui, topic, unit="", total=None):
1411 self.ui = ui
1411 self.ui = ui
1412 self.pos = 0
1412 self.pos = 0
1413 self.topic = topic
1413 self.topic = topic
1414 self.unit = unit
1414 self.unit = unit
1415 self.total = total
1415 self.total = total
1416
1416
1417 def __enter__(self):
1417 def __enter__(self):
1418 return self
1418 return self
1419
1419
1420 def __exit__(self, exc_type, exc_value, exc_tb):
1420 def __exit__(self, exc_type, exc_value, exc_tb):
1421 self.complete()
1421 self.complete()
1422
1422
1423 def update(self, pos, item="", total=None):
1423 def update(self, pos, item="", total=None):
1424 assert pos is not None
1424 assert pos is not None
1425 if total:
1425 if total:
1426 self.total = total
1426 self.total = total
1427 self.pos = pos
1427 self.pos = pos
1428 self._print(item)
1428 self._print(item)
1429
1429
1430 def increment(self, step=1, item="", total=None):
1430 def increment(self, step=1, item="", total=None):
1431 self.update(self.pos + step, item, total)
1431 self.update(self.pos + step, item, total)
1432
1432
1433 def complete(self):
1433 def complete(self):
1434 self.ui.progress(self.topic, None)
1434 self.ui.progress(self.topic, None)
1435
1435
1436 def _print(self, item):
1436 def _print(self, item):
1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1438 self.total)
1438 self.total)
1439
1439
1440 def gdinitconfig(ui):
1440 def gdinitconfig(ui):
1441 """helper function to know if a repo should be created as general delta
1441 """helper function to know if a repo should be created as general delta
1442 """
1442 """
1443 # experimental config: format.generaldelta
1443 # experimental config: format.generaldelta
1444 return (ui.configbool('format', 'generaldelta')
1444 return (ui.configbool('format', 'generaldelta')
1445 or ui.configbool('format', 'usegeneraldelta')
1445 or ui.configbool('format', 'usegeneraldelta')
1446 or ui.configbool('format', 'sparse-revlog'))
1446 or ui.configbool('format', 'sparse-revlog'))
1447
1447
1448 def gddeltaconfig(ui):
1448 def gddeltaconfig(ui):
1449 """helper function to know if incoming delta should be optimised
1449 """helper function to know if incoming delta should be optimised
1450 """
1450 """
1451 # experimental config: format.generaldelta
1451 # experimental config: format.generaldelta
1452 return ui.configbool('format', 'generaldelta')
1452 return ui.configbool('format', 'generaldelta')
1453
1453
1454 class simplekeyvaluefile(object):
1454 class simplekeyvaluefile(object):
1455 """A simple file with key=value lines
1455 """A simple file with key=value lines
1456
1456
1457 Keys must be alphanumerics and start with a letter, values must not
1457 Keys must be alphanumerics and start with a letter, values must not
1458 contain '\n' characters"""
1458 contain '\n' characters"""
1459 firstlinekey = '__firstline'
1459 firstlinekey = '__firstline'
1460
1460
1461 def __init__(self, vfs, path, keys=None):
1461 def __init__(self, vfs, path, keys=None):
1462 self.vfs = vfs
1462 self.vfs = vfs
1463 self.path = path
1463 self.path = path
1464
1464
1465 def read(self, firstlinenonkeyval=False):
1465 def read(self, firstlinenonkeyval=False):
1466 """Read the contents of a simple key-value file
1466 """Read the contents of a simple key-value file
1467
1467
1468 'firstlinenonkeyval' indicates whether the first line of file should
1468 'firstlinenonkeyval' indicates whether the first line of file should
1469 be treated as a key-value pair or reuturned fully under the
1469 be treated as a key-value pair or reuturned fully under the
1470 __firstline key."""
1470 __firstline key."""
1471 lines = self.vfs.readlines(self.path)
1471 lines = self.vfs.readlines(self.path)
1472 d = {}
1472 d = {}
1473 if firstlinenonkeyval:
1473 if firstlinenonkeyval:
1474 if not lines:
1474 if not lines:
1475 e = _("empty simplekeyvalue file")
1475 e = _("empty simplekeyvalue file")
1476 raise error.CorruptedState(e)
1476 raise error.CorruptedState(e)
1477 # we don't want to include '\n' in the __firstline
1477 # we don't want to include '\n' in the __firstline
1478 d[self.firstlinekey] = lines[0][:-1]
1478 d[self.firstlinekey] = lines[0][:-1]
1479 del lines[0]
1479 del lines[0]
1480
1480
1481 try:
1481 try:
1482 # the 'if line.strip()' part prevents us from failing on empty
1482 # the 'if line.strip()' part prevents us from failing on empty
1483 # lines which only contain '\n' therefore are not skipped
1483 # lines which only contain '\n' therefore are not skipped
1484 # by 'if line'
1484 # by 'if line'
1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1486 if line.strip())
1486 if line.strip())
1487 if self.firstlinekey in updatedict:
1487 if self.firstlinekey in updatedict:
1488 e = _("%r can't be used as a key")
1488 e = _("%r can't be used as a key")
1489 raise error.CorruptedState(e % self.firstlinekey)
1489 raise error.CorruptedState(e % self.firstlinekey)
1490 d.update(updatedict)
1490 d.update(updatedict)
1491 except ValueError as e:
1491 except ValueError as e:
1492 raise error.CorruptedState(str(e))
1492 raise error.CorruptedState(str(e))
1493 return d
1493 return d
1494
1494
1495 def write(self, data, firstline=None):
1495 def write(self, data, firstline=None):
1496 """Write key=>value mapping to a file
1496 """Write key=>value mapping to a file
1497 data is a dict. Keys must be alphanumerical and start with a letter.
1497 data is a dict. Keys must be alphanumerical and start with a letter.
1498 Values must not contain newline characters.
1498 Values must not contain newline characters.
1499
1499
1500 If 'firstline' is not None, it is written to file before
1500 If 'firstline' is not None, it is written to file before
1501 everything else, as it is, not in a key=value form"""
1501 everything else, as it is, not in a key=value form"""
1502 lines = []
1502 lines = []
1503 if firstline is not None:
1503 if firstline is not None:
1504 lines.append('%s\n' % firstline)
1504 lines.append('%s\n' % firstline)
1505
1505
1506 for k, v in data.items():
1506 for k, v in data.items():
1507 if k == self.firstlinekey:
1507 if k == self.firstlinekey:
1508 e = "key name '%s' is reserved" % self.firstlinekey
1508 e = "key name '%s' is reserved" % self.firstlinekey
1509 raise error.ProgrammingError(e)
1509 raise error.ProgrammingError(e)
1510 if not k[0:1].isalpha():
1510 if not k[0:1].isalpha():
1511 e = "keys must start with a letter in a key-value file"
1511 e = "keys must start with a letter in a key-value file"
1512 raise error.ProgrammingError(e)
1512 raise error.ProgrammingError(e)
1513 if not k.isalnum():
1513 if not k.isalnum():
1514 e = "invalid key name in a simple key-value file"
1514 e = "invalid key name in a simple key-value file"
1515 raise error.ProgrammingError(e)
1515 raise error.ProgrammingError(e)
1516 if '\n' in v:
1516 if '\n' in v:
1517 e = "invalid value in a simple key-value file"
1517 e = "invalid value in a simple key-value file"
1518 raise error.ProgrammingError(e)
1518 raise error.ProgrammingError(e)
1519 lines.append("%s=%s\n" % (k, v))
1519 lines.append("%s=%s\n" % (k, v))
1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1521 fp.write(''.join(lines))
1521 fp.write(''.join(lines))
1522
1522
1523 _reportobsoletedsource = [
1523 _reportobsoletedsource = [
1524 'debugobsolete',
1524 'debugobsolete',
1525 'pull',
1525 'pull',
1526 'push',
1526 'push',
1527 'serve',
1527 'serve',
1528 'unbundle',
1528 'unbundle',
1529 ]
1529 ]
1530
1530
1531 _reportnewcssource = [
1531 _reportnewcssource = [
1532 'pull',
1532 'pull',
1533 'unbundle',
1533 'unbundle',
1534 ]
1534 ]
1535
1535
1536 def prefetchfiles(repo, revs, match):
1536 def prefetchfiles(repo, revs, match):
1537 """Invokes the registered file prefetch functions, allowing extensions to
1537 """Invokes the registered file prefetch functions, allowing extensions to
1538 ensure the corresponding files are available locally, before the command
1538 ensure the corresponding files are available locally, before the command
1539 uses them."""
1539 uses them."""
1540 if match:
1540 if match:
1541 # The command itself will complain about files that don't exist, so
1541 # The command itself will complain about files that don't exist, so
1542 # don't duplicate the message.
1542 # don't duplicate the message.
1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1544 else:
1544 else:
1545 match = matchall(repo)
1545 match = matchall(repo)
1546
1546
1547 fileprefetchhooks(repo, revs, match)
1547 fileprefetchhooks(repo, revs, match)
1548
1548
1549 # a list of (repo, revs, match) prefetch functions
1549 # a list of (repo, revs, match) prefetch functions
1550 fileprefetchhooks = util.hooks()
1550 fileprefetchhooks = util.hooks()
1551
1551
1552 # A marker that tells the evolve extension to suppress its own reporting
1552 # A marker that tells the evolve extension to suppress its own reporting
1553 _reportstroubledchangesets = True
1553 _reportstroubledchangesets = True
1554
1554
1555 def registersummarycallback(repo, otr, txnname=''):
1555 def registersummarycallback(repo, otr, txnname=''):
1556 """register a callback to issue a summary after the transaction is closed
1556 """register a callback to issue a summary after the transaction is closed
1557 """
1557 """
1558 def txmatch(sources):
1558 def txmatch(sources):
1559 return any(txnname.startswith(source) for source in sources)
1559 return any(txnname.startswith(source) for source in sources)
1560
1560
1561 categories = []
1561 categories = []
1562
1562
1563 def reportsummary(func):
1563 def reportsummary(func):
1564 """decorator for report callbacks."""
1564 """decorator for report callbacks."""
1565 # The repoview life cycle is shorter than the one of the actual
1565 # The repoview life cycle is shorter than the one of the actual
1566 # underlying repository. So the filtered object can die before the
1566 # underlying repository. So the filtered object can die before the
1567 # weakref is used leading to troubles. We keep a reference to the
1567 # weakref is used leading to troubles. We keep a reference to the
1568 # unfiltered object and restore the filtering when retrieving the
1568 # unfiltered object and restore the filtering when retrieving the
1569 # repository through the weakref.
1569 # repository through the weakref.
1570 filtername = repo.filtername
1570 filtername = repo.filtername
1571 reporef = weakref.ref(repo.unfiltered())
1571 reporef = weakref.ref(repo.unfiltered())
1572 def wrapped(tr):
1572 def wrapped(tr):
1573 repo = reporef()
1573 repo = reporef()
1574 if filtername:
1574 if filtername:
1575 repo = repo.filtered(filtername)
1575 repo = repo.filtered(filtername)
1576 func(repo, tr)
1576 func(repo, tr)
1577 newcat = '%02i-txnreport' % len(categories)
1577 newcat = '%02i-txnreport' % len(categories)
1578 otr.addpostclose(newcat, wrapped)
1578 otr.addpostclose(newcat, wrapped)
1579 categories.append(newcat)
1579 categories.append(newcat)
1580 return wrapped
1580 return wrapped
1581
1581
1582 if txmatch(_reportobsoletedsource):
1582 if txmatch(_reportobsoletedsource):
1583 @reportsummary
1583 @reportsummary
1584 def reportobsoleted(repo, tr):
1584 def reportobsoleted(repo, tr):
1585 obsoleted = obsutil.getobsoleted(repo, tr)
1585 obsoleted = obsutil.getobsoleted(repo, tr)
1586 if obsoleted:
1586 if obsoleted:
1587 repo.ui.status(_('obsoleted %i changesets\n')
1587 repo.ui.status(_('obsoleted %i changesets\n')
1588 % len(obsoleted))
1588 % len(obsoleted))
1589
1589
1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1592 instabilitytypes = [
1592 instabilitytypes = [
1593 ('orphan', 'orphan'),
1593 ('orphan', 'orphan'),
1594 ('phase-divergent', 'phasedivergent'),
1594 ('phase-divergent', 'phasedivergent'),
1595 ('content-divergent', 'contentdivergent'),
1595 ('content-divergent', 'contentdivergent'),
1596 ]
1596 ]
1597
1597
1598 def getinstabilitycounts(repo):
1598 def getinstabilitycounts(repo):
1599 filtered = repo.changelog.filteredrevs
1599 filtered = repo.changelog.filteredrevs
1600 counts = {}
1600 counts = {}
1601 for instability, revset in instabilitytypes:
1601 for instability, revset in instabilitytypes:
1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1603 filtered)
1603 filtered)
1604 return counts
1604 return counts
1605
1605
1606 oldinstabilitycounts = getinstabilitycounts(repo)
1606 oldinstabilitycounts = getinstabilitycounts(repo)
1607 @reportsummary
1607 @reportsummary
1608 def reportnewinstabilities(repo, tr):
1608 def reportnewinstabilities(repo, tr):
1609 newinstabilitycounts = getinstabilitycounts(repo)
1609 newinstabilitycounts = getinstabilitycounts(repo)
1610 for instability, revset in instabilitytypes:
1610 for instability, revset in instabilitytypes:
1611 delta = (newinstabilitycounts[instability] -
1611 delta = (newinstabilitycounts[instability] -
1612 oldinstabilitycounts[instability])
1612 oldinstabilitycounts[instability])
1613 msg = getinstabilitymessage(delta, instability)
1613 msg = getinstabilitymessage(delta, instability)
1614 if msg:
1614 if msg:
1615 repo.ui.warn(msg)
1615 repo.ui.warn(msg)
1616
1616
1617 if txmatch(_reportnewcssource):
1617 if txmatch(_reportnewcssource):
1618 @reportsummary
1618 @reportsummary
1619 def reportnewcs(repo, tr):
1619 def reportnewcs(repo, tr):
1620 """Report the range of new revisions pulled/unbundled."""
1620 """Report the range of new revisions pulled/unbundled."""
1621 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1621 origrepolen = tr.changes.get('origrepolen', len(repo))
1622 if not newrevs:
1622 if origrepolen >= len(repo):
1623 return
1623 return
1624
1624
1625 # Compute the bounds of new revisions' range, excluding obsoletes.
1625 # Compute the bounds of new revisions' range, excluding obsoletes.
1626 unfi = repo.unfiltered()
1626 unfi = repo.unfiltered()
1627 revs = unfi.revs('%ld and not obsolete()', newrevs)
1627 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1628 if not revs:
1628 if not revs:
1629 # Got only obsoletes.
1629 # Got only obsoletes.
1630 return
1630 return
1631 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1631 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1632
1632
1633 if minrev == maxrev:
1633 if minrev == maxrev:
1634 revrange = minrev
1634 revrange = minrev
1635 else:
1635 else:
1636 revrange = '%s:%s' % (minrev, maxrev)
1636 revrange = '%s:%s' % (minrev, maxrev)
1637 repo.ui.status(_('new changesets %s\n') % revrange)
1637 repo.ui.status(_('new changesets %s\n') % revrange)
1638
1638
1639 @reportsummary
1639 @reportsummary
1640 def reportphasechanges(repo, tr):
1640 def reportphasechanges(repo, tr):
1641 """Report statistics of phase changes for changesets pre-existing
1641 """Report statistics of phase changes for changesets pre-existing
1642 pull/unbundle.
1642 pull/unbundle.
1643 """
1643 """
1644 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1644 origrepolen = tr.changes.get('origrepolen', len(repo))
1645 phasetracking = tr.changes.get('phases', {})
1645 phasetracking = tr.changes.get('phases', {})
1646 if not phasetracking:
1646 if not phasetracking:
1647 return
1647 return
1648 published = [
1648 published = [
1649 rev for rev, (old, new) in phasetracking.iteritems()
1649 rev for rev, (old, new) in phasetracking.iteritems()
1650 if new == phases.public and rev not in newrevs
1650 if new == phases.public and rev < origrepolen
1651 ]
1651 ]
1652 if not published:
1652 if not published:
1653 return
1653 return
1654 repo.ui.status(_('%d local changesets published\n')
1654 repo.ui.status(_('%d local changesets published\n')
1655 % len(published))
1655 % len(published))
1656
1656
1657 def getinstabilitymessage(delta, instability):
1657 def getinstabilitymessage(delta, instability):
1658 """function to return the message to show warning about new instabilities
1658 """function to return the message to show warning about new instabilities
1659
1659
1660 exists as a separate function so that extension can wrap to show more
1660 exists as a separate function so that extension can wrap to show more
1661 information like how to fix instabilities"""
1661 information like how to fix instabilities"""
1662 if delta > 0:
1662 if delta > 0:
1663 return _('%i new %s changesets\n') % (delta, instability)
1663 return _('%i new %s changesets\n') % (delta, instability)
1664
1664
1665 def nodesummaries(repo, nodes, maxnumnodes=4):
1665 def nodesummaries(repo, nodes, maxnumnodes=4):
1666 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1666 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1667 return ' '.join(short(h) for h in nodes)
1667 return ' '.join(short(h) for h in nodes)
1668 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1668 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1669 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1669 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1670
1670
1671 def enforcesinglehead(repo, tr, desc):
1671 def enforcesinglehead(repo, tr, desc):
1672 """check that no named branch has multiple heads"""
1672 """check that no named branch has multiple heads"""
1673 if desc in ('strip', 'repair'):
1673 if desc in ('strip', 'repair'):
1674 # skip the logic during strip
1674 # skip the logic during strip
1675 return
1675 return
1676 visible = repo.filtered('visible')
1676 visible = repo.filtered('visible')
1677 # possible improvement: we could restrict the check to affected branch
1677 # possible improvement: we could restrict the check to affected branch
1678 for name, heads in visible.branchmap().iteritems():
1678 for name, heads in visible.branchmap().iteritems():
1679 if len(heads) > 1:
1679 if len(heads) > 1:
1680 msg = _('rejecting multiple heads on branch "%s"')
1680 msg = _('rejecting multiple heads on branch "%s"')
1681 msg %= name
1681 msg %= name
1682 hint = _('%d heads: %s')
1682 hint = _('%d heads: %s')
1683 hint %= (len(heads), nodesummaries(repo, heads))
1683 hint %= (len(heads), nodesummaries(repo, heads))
1684 raise error.Abort(msg, hint=hint)
1684 raise error.Abort(msg, hint=hint)
1685
1685
1686 def wrapconvertsink(sink):
1686 def wrapconvertsink(sink):
1687 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1687 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1688 before it is used, whether or not the convert extension was formally loaded.
1688 before it is used, whether or not the convert extension was formally loaded.
1689 """
1689 """
1690 return sink
1690 return sink
1691
1691
1692 def unhidehashlikerevs(repo, specs, hiddentype):
1692 def unhidehashlikerevs(repo, specs, hiddentype):
1693 """parse the user specs and unhide changesets whose hash or revision number
1693 """parse the user specs and unhide changesets whose hash or revision number
1694 is passed.
1694 is passed.
1695
1695
1696 hiddentype can be: 1) 'warn': warn while unhiding changesets
1696 hiddentype can be: 1) 'warn': warn while unhiding changesets
1697 2) 'nowarn': don't warn while unhiding changesets
1697 2) 'nowarn': don't warn while unhiding changesets
1698
1698
1699 returns a repo object with the required changesets unhidden
1699 returns a repo object with the required changesets unhidden
1700 """
1700 """
1701 if not repo.filtername or not repo.ui.configbool('experimental',
1701 if not repo.filtername or not repo.ui.configbool('experimental',
1702 'directaccess'):
1702 'directaccess'):
1703 return repo
1703 return repo
1704
1704
1705 if repo.filtername not in ('visible', 'visible-hidden'):
1705 if repo.filtername not in ('visible', 'visible-hidden'):
1706 return repo
1706 return repo
1707
1707
1708 symbols = set()
1708 symbols = set()
1709 for spec in specs:
1709 for spec in specs:
1710 try:
1710 try:
1711 tree = revsetlang.parse(spec)
1711 tree = revsetlang.parse(spec)
1712 except error.ParseError: # will be reported by scmutil.revrange()
1712 except error.ParseError: # will be reported by scmutil.revrange()
1713 continue
1713 continue
1714
1714
1715 symbols.update(revsetlang.gethashlikesymbols(tree))
1715 symbols.update(revsetlang.gethashlikesymbols(tree))
1716
1716
1717 if not symbols:
1717 if not symbols:
1718 return repo
1718 return repo
1719
1719
1720 revs = _getrevsfromsymbols(repo, symbols)
1720 revs = _getrevsfromsymbols(repo, symbols)
1721
1721
1722 if not revs:
1722 if not revs:
1723 return repo
1723 return repo
1724
1724
1725 if hiddentype == 'warn':
1725 if hiddentype == 'warn':
1726 unfi = repo.unfiltered()
1726 unfi = repo.unfiltered()
1727 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1727 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1728 repo.ui.warn(_("warning: accessing hidden changesets for write "
1728 repo.ui.warn(_("warning: accessing hidden changesets for write "
1729 "operation: %s\n") % revstr)
1729 "operation: %s\n") % revstr)
1730
1730
1731 # we have to use new filtername to separate branch/tags cache until we can
1731 # we have to use new filtername to separate branch/tags cache until we can
1732 # disbale these cache when revisions are dynamically pinned.
1732 # disbale these cache when revisions are dynamically pinned.
1733 return repo.filtered('visible-hidden', revs)
1733 return repo.filtered('visible-hidden', revs)
1734
1734
1735 def _getrevsfromsymbols(repo, symbols):
1735 def _getrevsfromsymbols(repo, symbols):
1736 """parse the list of symbols and returns a set of revision numbers of hidden
1736 """parse the list of symbols and returns a set of revision numbers of hidden
1737 changesets present in symbols"""
1737 changesets present in symbols"""
1738 revs = set()
1738 revs = set()
1739 unfi = repo.unfiltered()
1739 unfi = repo.unfiltered()
1740 unficl = unfi.changelog
1740 unficl = unfi.changelog
1741 cl = repo.changelog
1741 cl = repo.changelog
1742 tiprev = len(unficl)
1742 tiprev = len(unficl)
1743 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1743 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1744 for s in symbols:
1744 for s in symbols:
1745 try:
1745 try:
1746 n = int(s)
1746 n = int(s)
1747 if n <= tiprev:
1747 if n <= tiprev:
1748 if not allowrevnums:
1748 if not allowrevnums:
1749 continue
1749 continue
1750 else:
1750 else:
1751 if n not in cl:
1751 if n not in cl:
1752 revs.add(n)
1752 revs.add(n)
1753 continue
1753 continue
1754 except ValueError:
1754 except ValueError:
1755 pass
1755 pass
1756
1756
1757 try:
1757 try:
1758 s = resolvehexnodeidprefix(unfi, s)
1758 s = resolvehexnodeidprefix(unfi, s)
1759 except (error.LookupError, error.WdirUnsupported):
1759 except (error.LookupError, error.WdirUnsupported):
1760 s = None
1760 s = None
1761
1761
1762 if s is not None:
1762 if s is not None:
1763 rev = unficl.rev(s)
1763 rev = unficl.rev(s)
1764 if rev not in cl:
1764 if rev not in cl:
1765 revs.add(rev)
1765 revs.add(rev)
1766
1766
1767 return revs
1767 return revs
1768
1768
1769 def bookmarkrevs(repo, mark):
1769 def bookmarkrevs(repo, mark):
1770 """
1770 """
1771 Select revisions reachable by a given bookmark
1771 Select revisions reachable by a given bookmark
1772 """
1772 """
1773 return repo.revs("ancestors(bookmark(%s)) - "
1773 return repo.revs("ancestors(bookmark(%s)) - "
1774 "ancestors(head() and not bookmark(%s)) - "
1774 "ancestors(head() and not bookmark(%s)) - "
1775 "ancestors(bookmark() and not bookmark(%s))",
1775 "ancestors(bookmark() and not bookmark(%s))",
1776 mark, mark, mark)
1776 mark, mark, mark)
@@ -1,512 +1,514 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-bundle2
5 #if stream-bundle2
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [experimental]
7 > [experimental]
8 > bundle2.stream = yes
8 > bundle2.stream = yes
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(b"%d" % i) and None
21 ... fh.write(b"%d" % i) and None
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid > $DAEMON_PIDS
25 $ cat hg.pid > $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Cannot stream clone when server.uncompressed is set
28 Cannot stream clone when server.uncompressed is set
29
29
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 200 Script output follows
31 200 Script output follows
32
32
33 1
33 1
34
34
35 #if stream-legacy
35 #if stream-legacy
36 $ hg debugcapabilities http://localhost:$HGPORT
36 $ hg debugcapabilities http://localhost:$HGPORT
37 Main capabilities:
37 Main capabilities:
38 batch
38 batch
39 branchmap
39 branchmap
40 $USUAL_BUNDLE2_CAPS_SERVER$
40 $USUAL_BUNDLE2_CAPS_SERVER$
41 changegroupsubset
41 changegroupsubset
42 compression=$BUNDLE2_COMPRESSIONS$
42 compression=$BUNDLE2_COMPRESSIONS$
43 getbundle
43 getbundle
44 httpheader=1024
44 httpheader=1024
45 httpmediatype=0.1rx,0.1tx,0.2tx
45 httpmediatype=0.1rx,0.1tx,0.2tx
46 known
46 known
47 lookup
47 lookup
48 pushkey
48 pushkey
49 unbundle=HG10GZ,HG10BZ,HG10UN
49 unbundle=HG10GZ,HG10BZ,HG10UN
50 unbundlehash
50 unbundlehash
51 Bundle2 capabilities:
51 Bundle2 capabilities:
52 HG20
52 HG20
53 bookmarks
53 bookmarks
54 changegroup
54 changegroup
55 01
55 01
56 02
56 02
57 digests
57 digests
58 md5
58 md5
59 sha1
59 sha1
60 sha512
60 sha512
61 error
61 error
62 abort
62 abort
63 unsupportedcontent
63 unsupportedcontent
64 pushraced
64 pushraced
65 pushkey
65 pushkey
66 hgtagsfnodes
66 hgtagsfnodes
67 listkeys
67 listkeys
68 phases
68 phases
69 heads
69 heads
70 pushkey
70 pushkey
71 remote-changegroup
71 remote-changegroup
72 http
72 http
73 https
73 https
74 rev-branch-cache
74 rev-branch-cache
75
75
76 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
76 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
77 warning: stream clone requested but server has them disabled
77 warning: stream clone requested but server has them disabled
78 requesting all changes
78 requesting all changes
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 2 changesets with 1025 changes to 1025 files
82 added 2 changesets with 1025 changes to 1025 files
83 new changesets 96ee1d7354c4:c17445101a72
83 new changesets 96ee1d7354c4:c17445101a72
84
84
85 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
85 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
86 200 Script output follows
86 200 Script output follows
87 content-type: application/mercurial-0.2
87 content-type: application/mercurial-0.2
88
88
89
89
90 $ f --size body --hexdump --bytes 100
90 $ f --size body --hexdump --bytes 100
91 body: size=232
91 body: size=232
92 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
92 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
93 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
93 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
94 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
94 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
95 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
95 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
96 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
96 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
97 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
97 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
98 0060: 69 73 20 66 |is f|
98 0060: 69 73 20 66 |is f|
99
99
100 #endif
100 #endif
101 #if stream-bundle2
101 #if stream-bundle2
102 $ hg debugcapabilities http://localhost:$HGPORT
102 $ hg debugcapabilities http://localhost:$HGPORT
103 Main capabilities:
103 Main capabilities:
104 batch
104 batch
105 branchmap
105 branchmap
106 $USUAL_BUNDLE2_CAPS_SERVER$
106 $USUAL_BUNDLE2_CAPS_SERVER$
107 changegroupsubset
107 changegroupsubset
108 compression=$BUNDLE2_COMPRESSIONS$
108 compression=$BUNDLE2_COMPRESSIONS$
109 getbundle
109 getbundle
110 httpheader=1024
110 httpheader=1024
111 httpmediatype=0.1rx,0.1tx,0.2tx
111 httpmediatype=0.1rx,0.1tx,0.2tx
112 known
112 known
113 lookup
113 lookup
114 pushkey
114 pushkey
115 unbundle=HG10GZ,HG10BZ,HG10UN
115 unbundle=HG10GZ,HG10BZ,HG10UN
116 unbundlehash
116 unbundlehash
117 Bundle2 capabilities:
117 Bundle2 capabilities:
118 HG20
118 HG20
119 bookmarks
119 bookmarks
120 changegroup
120 changegroup
121 01
121 01
122 02
122 02
123 digests
123 digests
124 md5
124 md5
125 sha1
125 sha1
126 sha512
126 sha512
127 error
127 error
128 abort
128 abort
129 unsupportedcontent
129 unsupportedcontent
130 pushraced
130 pushraced
131 pushkey
131 pushkey
132 hgtagsfnodes
132 hgtagsfnodes
133 listkeys
133 listkeys
134 phases
134 phases
135 heads
135 heads
136 pushkey
136 pushkey
137 remote-changegroup
137 remote-changegroup
138 http
138 http
139 https
139 https
140 rev-branch-cache
140 rev-branch-cache
141
141
142 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
142 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
143 warning: stream clone requested but server has them disabled
143 warning: stream clone requested but server has them disabled
144 requesting all changes
144 requesting all changes
145 adding changesets
145 adding changesets
146 adding manifests
146 adding manifests
147 adding file changes
147 adding file changes
148 added 2 changesets with 1025 changes to 1025 files
148 added 2 changesets with 1025 changes to 1025 files
149 new changesets 96ee1d7354c4:c17445101a72
149 new changesets 96ee1d7354c4:c17445101a72
150
150
151 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
151 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
152 200 Script output follows
152 200 Script output follows
153 content-type: application/mercurial-0.2
153 content-type: application/mercurial-0.2
154
154
155
155
156 $ f --size body --hexdump --bytes 100
156 $ f --size body --hexdump --bytes 100
157 body: size=232
157 body: size=232
158 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
158 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
159 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
159 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
160 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
160 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
161 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
161 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
162 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
162 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
163 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
163 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
164 0060: 69 73 20 66 |is f|
164 0060: 69 73 20 66 |is f|
165
165
166 #endif
166 #endif
167
167
168 $ killdaemons.py
168 $ killdaemons.py
169 $ cd server
169 $ cd server
170 $ hg serve -p $HGPORT -d --pid-file=hg.pid
170 $ hg serve -p $HGPORT -d --pid-file=hg.pid
171 $ cat hg.pid > $DAEMON_PIDS
171 $ cat hg.pid > $DAEMON_PIDS
172 $ cd ..
172 $ cd ..
173
173
174 Basic clone
174 Basic clone
175
175
176 #if stream-legacy
176 #if stream-legacy
177 $ hg clone --stream -U http://localhost:$HGPORT clone1
177 $ hg clone --stream -U http://localhost:$HGPORT clone1
178 streaming all changes
178 streaming all changes
179 1027 files to transfer, 96.3 KB of data
179 1027 files to transfer, 96.3 KB of data
180 transferred 96.3 KB in * seconds (*/sec) (glob)
180 transferred 96.3 KB in * seconds (*/sec) (glob)
181 searching for changes
181 searching for changes
182 no changes found
182 no changes found
183 #endif
183 #endif
184 #if stream-bundle2
184 #if stream-bundle2
185 $ hg clone --stream -U http://localhost:$HGPORT clone1
185 $ hg clone --stream -U http://localhost:$HGPORT clone1
186 streaming all changes
186 streaming all changes
187 1030 files to transfer, 96.4 KB of data
187 1030 files to transfer, 96.4 KB of data
188 transferred 96.4 KB in * seconds (* */sec) (glob)
188 transferred 96.4 KB in * seconds (* */sec) (glob)
189
189
190 $ ls -1 clone1/.hg/cache
190 $ ls -1 clone1/.hg/cache
191 branch2-served
191 branch2-served
192 rbc-names-v1
192 rbc-names-v1
193 rbc-revs-v1
193 rbc-revs-v1
194 #endif
194 #endif
195
195
196 getbundle requests with stream=1 are uncompressed
196 getbundle requests with stream=1 are uncompressed
197
197
198 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
198 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
199 200 Script output follows
199 200 Script output follows
200 content-type: application/mercurial-0.2
200 content-type: application/mercurial-0.2
201
201
202
202
203 $ f --size --hex --bytes 256 body
203 $ f --size --hex --bytes 256 body
204 body: size=112230
204 body: size=112230
205 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
205 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
206 0010: 70 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |p.STREAM2.......|
206 0010: 70 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |p.STREAM2.......|
207 0020: 05 09 04 0c 35 62 79 74 65 63 6f 75 6e 74 39 38 |....5bytecount98|
207 0020: 05 09 04 0c 35 62 79 74 65 63 6f 75 6e 74 39 38 |....5bytecount98|
208 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
208 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
209 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
209 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
210 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
210 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
211 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
211 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
212 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 74 6f 72 |Crevlogv1%2Cstor|
212 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 74 6f 72 |Crevlogv1%2Cstor|
213 0080: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
213 0080: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
214 0090: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
214 0090: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
215 00a0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
215 00a0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
216 00b0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
216 00b0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
217 00c0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
217 00c0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
218 00d0: 75 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 |u0s.Bdata/1.i...|
218 00d0: 75 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 |u0s.Bdata/1.i...|
219 00e0: 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 |................|
219 00e0: 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 |................|
220 00f0: 00 00 00 00 01 ff ff ff ff ff ff ff ff f9 76 da |..............v.|
220 00f0: 00 00 00 00 01 ff ff ff ff ff ff ff ff f9 76 da |..............v.|
221
221
222 --uncompressed is an alias to --stream
222 --uncompressed is an alias to --stream
223
223
224 #if stream-legacy
224 #if stream-legacy
225 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
225 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
226 streaming all changes
226 streaming all changes
227 1027 files to transfer, 96.3 KB of data
227 1027 files to transfer, 96.3 KB of data
228 transferred 96.3 KB in * seconds (*/sec) (glob)
228 transferred 96.3 KB in * seconds (*/sec) (glob)
229 searching for changes
229 searching for changes
230 no changes found
230 no changes found
231 #endif
231 #endif
232 #if stream-bundle2
232 #if stream-bundle2
233 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
233 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
234 streaming all changes
234 streaming all changes
235 1030 files to transfer, 96.4 KB of data
235 1030 files to transfer, 96.4 KB of data
236 transferred 96.4 KB in * seconds (* */sec) (glob)
236 transferred 96.4 KB in * seconds (* */sec) (glob)
237 #endif
237 #endif
238
238
239 Clone with background file closing enabled
239 Clone with background file closing enabled
240
240
241 #if stream-legacy
241 #if stream-legacy
242 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
242 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
243 using http://localhost:$HGPORT/
243 using http://localhost:$HGPORT/
244 sending capabilities command
244 sending capabilities command
245 sending branchmap command
245 sending branchmap command
246 streaming all changes
246 streaming all changes
247 sending stream_out command
247 sending stream_out command
248 1027 files to transfer, 96.3 KB of data
248 1027 files to transfer, 96.3 KB of data
249 starting 4 threads for background file closing
249 starting 4 threads for background file closing
250 updating the branch cache
250 transferred 96.3 KB in * seconds (*/sec) (glob)
251 transferred 96.3 KB in * seconds (*/sec) (glob)
251 query 1; heads
252 query 1; heads
252 sending batch command
253 sending batch command
253 searching for changes
254 searching for changes
254 all remote heads known locally
255 all remote heads known locally
255 no changes found
256 no changes found
256 sending getbundle command
257 sending getbundle command
257 bundle2-input-bundle: with-transaction
258 bundle2-input-bundle: with-transaction
258 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
259 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
259 bundle2-input-part: "phase-heads" supported
260 bundle2-input-part: "phase-heads" supported
260 bundle2-input-part: total payload size 24
261 bundle2-input-part: total payload size 24
261 bundle2-input-bundle: 1 parts total
262 bundle2-input-bundle: 1 parts total
262 checking for updated bookmarks
263 checking for updated bookmarks
263 #endif
264 #endif
264 #if stream-bundle2
265 #if stream-bundle2
265 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
266 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
266 using http://localhost:$HGPORT/
267 using http://localhost:$HGPORT/
267 sending capabilities command
268 sending capabilities command
268 query 1; heads
269 query 1; heads
269 sending batch command
270 sending batch command
270 streaming all changes
271 streaming all changes
271 sending getbundle command
272 sending getbundle command
272 bundle2-input-bundle: with-transaction
273 bundle2-input-bundle: with-transaction
273 bundle2-input-part: "stream2" (params: 3 mandatory) supported
274 bundle2-input-part: "stream2" (params: 3 mandatory) supported
274 applying stream bundle
275 applying stream bundle
275 1030 files to transfer, 96.4 KB of data
276 1030 files to transfer, 96.4 KB of data
276 starting 4 threads for background file closing
277 starting 4 threads for background file closing
277 starting 4 threads for background file closing
278 starting 4 threads for background file closing
279 updating the branch cache
278 transferred 96.4 KB in * seconds (* */sec) (glob)
280 transferred 96.4 KB in * seconds (* */sec) (glob)
279 bundle2-input-part: total payload size 112077
281 bundle2-input-part: total payload size 112077
280 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
282 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
281 bundle2-input-bundle: 1 parts total
283 bundle2-input-bundle: 1 parts total
282 checking for updated bookmarks
284 checking for updated bookmarks
283 #endif
285 #endif
284
286
285 Cannot stream clone when there are secret changesets
287 Cannot stream clone when there are secret changesets
286
288
287 $ hg -R server phase --force --secret -r tip
289 $ hg -R server phase --force --secret -r tip
288 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
290 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
289 warning: stream clone requested but server has them disabled
291 warning: stream clone requested but server has them disabled
290 requesting all changes
292 requesting all changes
291 adding changesets
293 adding changesets
292 adding manifests
294 adding manifests
293 adding file changes
295 adding file changes
294 added 1 changesets with 1 changes to 1 files
296 added 1 changesets with 1 changes to 1 files
295 new changesets 96ee1d7354c4
297 new changesets 96ee1d7354c4
296
298
297 $ killdaemons.py
299 $ killdaemons.py
298
300
299 Streaming of secrets can be overridden by server config
301 Streaming of secrets can be overridden by server config
300
302
301 $ cd server
303 $ cd server
302 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
304 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
303 $ cat hg.pid > $DAEMON_PIDS
305 $ cat hg.pid > $DAEMON_PIDS
304 $ cd ..
306 $ cd ..
305
307
306 #if stream-legacy
308 #if stream-legacy
307 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
309 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
308 streaming all changes
310 streaming all changes
309 1027 files to transfer, 96.3 KB of data
311 1027 files to transfer, 96.3 KB of data
310 transferred 96.3 KB in * seconds (*/sec) (glob)
312 transferred 96.3 KB in * seconds (*/sec) (glob)
311 searching for changes
313 searching for changes
312 no changes found
314 no changes found
313 #endif
315 #endif
314 #if stream-bundle2
316 #if stream-bundle2
315 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
317 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
316 streaming all changes
318 streaming all changes
317 1030 files to transfer, 96.4 KB of data
319 1030 files to transfer, 96.4 KB of data
318 transferred 96.4 KB in * seconds (* */sec) (glob)
320 transferred 96.4 KB in * seconds (* */sec) (glob)
319 #endif
321 #endif
320
322
321 $ killdaemons.py
323 $ killdaemons.py
322
324
323 Verify interaction between preferuncompressed and secret presence
325 Verify interaction between preferuncompressed and secret presence
324
326
325 $ cd server
327 $ cd server
326 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
328 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
327 $ cat hg.pid > $DAEMON_PIDS
329 $ cat hg.pid > $DAEMON_PIDS
328 $ cd ..
330 $ cd ..
329
331
330 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
332 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
331 requesting all changes
333 requesting all changes
332 adding changesets
334 adding changesets
333 adding manifests
335 adding manifests
334 adding file changes
336 adding file changes
335 added 1 changesets with 1 changes to 1 files
337 added 1 changesets with 1 changes to 1 files
336 new changesets 96ee1d7354c4
338 new changesets 96ee1d7354c4
337
339
338 $ killdaemons.py
340 $ killdaemons.py
339
341
340 Clone not allowed when full bundles disabled and can't serve secrets
342 Clone not allowed when full bundles disabled and can't serve secrets
341
343
342 $ cd server
344 $ cd server
343 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
345 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
344 $ cat hg.pid > $DAEMON_PIDS
346 $ cat hg.pid > $DAEMON_PIDS
345 $ cd ..
347 $ cd ..
346
348
347 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
349 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
348 warning: stream clone requested but server has them disabled
350 warning: stream clone requested but server has them disabled
349 requesting all changes
351 requesting all changes
350 remote: abort: server has pull-based clones disabled
352 remote: abort: server has pull-based clones disabled
351 abort: pull failed on remote
353 abort: pull failed on remote
352 (remove --pull if specified or upgrade Mercurial)
354 (remove --pull if specified or upgrade Mercurial)
353 [255]
355 [255]
354
356
355 Local stream clone with secrets involved
357 Local stream clone with secrets involved
356 (This is just a test over behavior: if you have access to the repo's files,
358 (This is just a test over behavior: if you have access to the repo's files,
357 there is no security so it isn't important to prevent a clone here.)
359 there is no security so it isn't important to prevent a clone here.)
358
360
359 $ hg clone -U --stream server local-secret
361 $ hg clone -U --stream server local-secret
360 warning: stream clone requested but server has them disabled
362 warning: stream clone requested but server has them disabled
361 requesting all changes
363 requesting all changes
362 adding changesets
364 adding changesets
363 adding manifests
365 adding manifests
364 adding file changes
366 adding file changes
365 added 1 changesets with 1 changes to 1 files
367 added 1 changesets with 1 changes to 1 files
366 new changesets 96ee1d7354c4
368 new changesets 96ee1d7354c4
367
369
368 Stream clone while repo is changing:
370 Stream clone while repo is changing:
369
371
370 $ mkdir changing
372 $ mkdir changing
371 $ cd changing
373 $ cd changing
372
374
373 extension for delaying the server process so we reliably can modify the repo
375 extension for delaying the server process so we reliably can modify the repo
374 while cloning
376 while cloning
375
377
376 $ cat > delayer.py <<EOF
378 $ cat > delayer.py <<EOF
377 > import time
379 > import time
378 > from mercurial import extensions, vfs
380 > from mercurial import extensions, vfs
379 > def __call__(orig, self, path, *args, **kwargs):
381 > def __call__(orig, self, path, *args, **kwargs):
380 > if path == 'data/f1.i':
382 > if path == 'data/f1.i':
381 > time.sleep(2)
383 > time.sleep(2)
382 > return orig(self, path, *args, **kwargs)
384 > return orig(self, path, *args, **kwargs)
383 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
385 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
384 > EOF
386 > EOF
385
387
386 prepare repo with small and big file to cover both code paths in emitrevlogdata
388 prepare repo with small and big file to cover both code paths in emitrevlogdata
387
389
388 $ hg init repo
390 $ hg init repo
389 $ touch repo/f1
391 $ touch repo/f1
390 $ $TESTDIR/seq.py 50000 > repo/f2
392 $ $TESTDIR/seq.py 50000 > repo/f2
391 $ hg -R repo ci -Aqm "0"
393 $ hg -R repo ci -Aqm "0"
392 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
394 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
393 $ cat hg.pid >> $DAEMON_PIDS
395 $ cat hg.pid >> $DAEMON_PIDS
394
396
395 clone while modifying the repo between stating file with write lock and
397 clone while modifying the repo between stating file with write lock and
396 actually serving file content
398 actually serving file content
397
399
398 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
400 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
399 $ sleep 1
401 $ sleep 1
400 $ echo >> repo/f1
402 $ echo >> repo/f1
401 $ echo >> repo/f2
403 $ echo >> repo/f2
402 $ hg -R repo ci -m "1"
404 $ hg -R repo ci -m "1"
403 $ wait
405 $ wait
404 $ hg -R clone id
406 $ hg -R clone id
405 000000000000
407 000000000000
406 $ cd ..
408 $ cd ..
407
409
408 Stream repository with bookmarks
410 Stream repository with bookmarks
409 --------------------------------
411 --------------------------------
410
412
411 (revert introduction of secret changeset)
413 (revert introduction of secret changeset)
412
414
413 $ hg -R server phase --draft 'secret()'
415 $ hg -R server phase --draft 'secret()'
414
416
415 add a bookmark
417 add a bookmark
416
418
417 $ hg -R server bookmark -r tip some-bookmark
419 $ hg -R server bookmark -r tip some-bookmark
418
420
419 clone it
421 clone it
420
422
421 #if stream-legacy
423 #if stream-legacy
422 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
424 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
423 streaming all changes
425 streaming all changes
424 1027 files to transfer, 96.3 KB of data
426 1027 files to transfer, 96.3 KB of data
425 transferred 96.3 KB in * seconds (*) (glob)
427 transferred 96.3 KB in * seconds (*) (glob)
426 searching for changes
428 searching for changes
427 no changes found
429 no changes found
428 updating to branch default
430 updating to branch default
429 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
431 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
430 #endif
432 #endif
431 #if stream-bundle2
433 #if stream-bundle2
432 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
434 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
433 streaming all changes
435 streaming all changes
434 1033 files to transfer, 96.6 KB of data
436 1033 files to transfer, 96.6 KB of data
435 transferred 96.6 KB in * seconds (* */sec) (glob)
437 transferred 96.6 KB in * seconds (* */sec) (glob)
436 updating to branch default
438 updating to branch default
437 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
439 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
438 #endif
440 #endif
439 $ hg -R with-bookmarks bookmarks
441 $ hg -R with-bookmarks bookmarks
440 some-bookmark 1:c17445101a72
442 some-bookmark 1:c17445101a72
441
443
442 Stream repository with phases
444 Stream repository with phases
443 -----------------------------
445 -----------------------------
444
446
445 Clone as publishing
447 Clone as publishing
446
448
447 $ hg -R server phase -r 'all()'
449 $ hg -R server phase -r 'all()'
448 0: draft
450 0: draft
449 1: draft
451 1: draft
450
452
451 #if stream-legacy
453 #if stream-legacy
452 $ hg clone --stream http://localhost:$HGPORT phase-publish
454 $ hg clone --stream http://localhost:$HGPORT phase-publish
453 streaming all changes
455 streaming all changes
454 1027 files to transfer, 96.3 KB of data
456 1027 files to transfer, 96.3 KB of data
455 transferred 96.3 KB in * seconds (*) (glob)
457 transferred 96.3 KB in * seconds (*) (glob)
456 searching for changes
458 searching for changes
457 no changes found
459 no changes found
458 updating to branch default
460 updating to branch default
459 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
461 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
460 #endif
462 #endif
461 #if stream-bundle2
463 #if stream-bundle2
462 $ hg clone --stream http://localhost:$HGPORT phase-publish
464 $ hg clone --stream http://localhost:$HGPORT phase-publish
463 streaming all changes
465 streaming all changes
464 1033 files to transfer, 96.6 KB of data
466 1033 files to transfer, 96.6 KB of data
465 transferred 96.6 KB in * seconds (* */sec) (glob)
467 transferred 96.6 KB in * seconds (* */sec) (glob)
466 updating to branch default
468 updating to branch default
467 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
469 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
468 #endif
470 #endif
469 $ hg -R phase-publish phase -r 'all()'
471 $ hg -R phase-publish phase -r 'all()'
470 0: public
472 0: public
471 1: public
473 1: public
472
474
473 Clone as non publishing
475 Clone as non publishing
474
476
475 $ cat << EOF >> server/.hg/hgrc
477 $ cat << EOF >> server/.hg/hgrc
476 > [phases]
478 > [phases]
477 > publish = False
479 > publish = False
478 > EOF
480 > EOF
479 $ killdaemons.py
481 $ killdaemons.py
480 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
482 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
481 $ cat hg.pid > $DAEMON_PIDS
483 $ cat hg.pid > $DAEMON_PIDS
482
484
483 #if stream-legacy
485 #if stream-legacy
484
486
485 With v1 of the stream protocol, changeset are always cloned as public. It make
487 With v1 of the stream protocol, changeset are always cloned as public. It make
486 stream v1 unsuitable for non-publishing repository.
488 stream v1 unsuitable for non-publishing repository.
487
489
488 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
490 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
489 streaming all changes
491 streaming all changes
490 1027 files to transfer, 96.3 KB of data
492 1027 files to transfer, 96.3 KB of data
491 transferred 96.3 KB in * seconds (*) (glob)
493 transferred 96.3 KB in * seconds (*) (glob)
492 searching for changes
494 searching for changes
493 no changes found
495 no changes found
494 updating to branch default
496 updating to branch default
495 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
497 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
496 $ hg -R phase-no-publish phase -r 'all()'
498 $ hg -R phase-no-publish phase -r 'all()'
497 0: public
499 0: public
498 1: public
500 1: public
499 #endif
501 #endif
500 #if stream-bundle2
502 #if stream-bundle2
501 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
503 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
502 streaming all changes
504 streaming all changes
503 1034 files to transfer, 96.7 KB of data
505 1034 files to transfer, 96.7 KB of data
504 transferred 96.7 KB in * seconds (* */sec) (glob)
506 transferred 96.7 KB in * seconds (* */sec) (glob)
505 updating to branch default
507 updating to branch default
506 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
508 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
507 $ hg -R phase-no-publish phase -r 'all()'
509 $ hg -R phase-no-publish phase -r 'all()'
508 0: draft
510 0: draft
509 1: draft
511 1: draft
510 #endif
512 #endif
511
513
512 $ killdaemons.py
514 $ killdaemons.py
@@ -1,172 +1,174 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 Test creating a consuming stream bundle v2
3 Test creating a consuming stream bundle v2
4
4
5 $ getmainid() {
5 $ getmainid() {
6 > hg -R main log --template '{node}\n' --rev "$1"
6 > hg -R main log --template '{node}\n' --rev "$1"
7 > }
7 > }
8
8
9 $ cp $HGRCPATH $TESTTMP/hgrc.orig
9 $ cp $HGRCPATH $TESTTMP/hgrc.orig
10
10
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > evolution.createmarkers=True
13 > evolution.createmarkers=True
14 > evolution.exchange=True
14 > evolution.exchange=True
15 > bundle2-output-capture=True
15 > bundle2-output-capture=True
16 > [ui]
16 > [ui]
17 > ssh="$PYTHON" "$TESTDIR/dummyssh"
17 > ssh="$PYTHON" "$TESTDIR/dummyssh"
18 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
18 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
19 > [web]
19 > [web]
20 > push_ssl = false
20 > push_ssl = false
21 > allow_push = *
21 > allow_push = *
22 > [phases]
22 > [phases]
23 > publish=False
23 > publish=False
24 > [extensions]
24 > [extensions]
25 > drawdag=$TESTDIR/drawdag.py
25 > drawdag=$TESTDIR/drawdag.py
26 > clonebundles=
26 > clonebundles=
27 > EOF
27 > EOF
28
28
29 The extension requires a repo (currently unused)
29 The extension requires a repo (currently unused)
30
30
31 $ hg init main
31 $ hg init main
32 $ cd main
32 $ cd main
33
33
34 $ hg debugdrawdag <<'EOF'
34 $ hg debugdrawdag <<'EOF'
35 > E
35 > E
36 > |
36 > |
37 > D
37 > D
38 > |
38 > |
39 > C
39 > C
40 > |
40 > |
41 > B
41 > B
42 > |
42 > |
43 > A
43 > A
44 > EOF
44 > EOF
45
45
46 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
46 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
47 $ hg debugbundle bundle.hg
47 $ hg debugbundle bundle.hg
48 Stream params: {}
48 Stream params: {}
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore} (mandatory: True)
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore} (mandatory: True)
50 $ hg debugbundle --spec bundle.hg
50 $ hg debugbundle --spec bundle.hg
51 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore
51 none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore
52
52
53 Test that we can apply the bundle as a stream clone bundle
53 Test that we can apply the bundle as a stream clone bundle
54
54
55 $ cat > .hg/clonebundles.manifest << EOF
55 $ cat > .hg/clonebundles.manifest << EOF
56 > http://localhost:$HGPORT1/bundle.hg BUNDLESPEC=`hg debugbundle --spec bundle.hg`
56 > http://localhost:$HGPORT1/bundle.hg BUNDLESPEC=`hg debugbundle --spec bundle.hg`
57 > EOF
57 > EOF
58
58
59 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
59 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
60 $ cat hg.pid >> $DAEMON_PIDS
60 $ cat hg.pid >> $DAEMON_PIDS
61
61
62 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
62 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
63 $ cat http.pid >> $DAEMON_PIDS
63 $ cat http.pid >> $DAEMON_PIDS
64
64
65 $ cd ..
65 $ cd ..
66 $ hg clone http://localhost:$HGPORT streamv2-clone-implicit --debug
66 $ hg clone http://localhost:$HGPORT streamv2-clone-implicit --debug
67 using http://localhost:$HGPORT/
67 using http://localhost:$HGPORT/
68 sending capabilities command
68 sending capabilities command
69 sending clonebundles command
69 sending clonebundles command
70 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
70 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
71 bundle2-input-bundle: with-transaction
71 bundle2-input-bundle: with-transaction
72 bundle2-input-part: "stream2" (params: 3 mandatory) supported
72 bundle2-input-part: "stream2" (params: 3 mandatory) supported
73 applying stream bundle
73 applying stream bundle
74 11 files to transfer, 1.65 KB of data
74 11 files to transfer, 1.65 KB of data
75 starting 4 threads for background file closing (?)
75 starting 4 threads for background file closing (?)
76 starting 4 threads for background file closing (?)
76 starting 4 threads for background file closing (?)
77 adding [s] data/A.i (66 bytes)
77 adding [s] data/A.i (66 bytes)
78 adding [s] data/B.i (66 bytes)
78 adding [s] data/B.i (66 bytes)
79 adding [s] data/C.i (66 bytes)
79 adding [s] data/C.i (66 bytes)
80 adding [s] data/D.i (66 bytes)
80 adding [s] data/D.i (66 bytes)
81 adding [s] data/E.i (66 bytes)
81 adding [s] data/E.i (66 bytes)
82 adding [s] 00manifest.i (584 bytes)
82 adding [s] 00manifest.i (584 bytes)
83 adding [s] 00changelog.i (595 bytes)
83 adding [s] 00changelog.i (595 bytes)
84 adding [s] phaseroots (43 bytes)
84 adding [s] phaseroots (43 bytes)
85 adding [c] branch2-served (94 bytes)
85 adding [c] branch2-served (94 bytes)
86 adding [c] rbc-names-v1 (7 bytes)
86 adding [c] rbc-names-v1 (7 bytes)
87 adding [c] rbc-revs-v1 (40 bytes)
87 adding [c] rbc-revs-v1 (40 bytes)
88 transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
88 transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
89 bundle2-input-part: total payload size 1840
89 bundle2-input-part: total payload size 1840
90 bundle2-input-bundle: 0 parts total
90 bundle2-input-bundle: 0 parts total
91 updating the branch cache
91 finished applying clone bundle
92 finished applying clone bundle
92 query 1; heads
93 query 1; heads
93 sending batch command
94 sending batch command
94 searching for changes
95 searching for changes
95 all remote heads known locally
96 all remote heads known locally
96 no changes found
97 no changes found
97 sending getbundle command
98 sending getbundle command
98 bundle2-input-bundle: with-transaction
99 bundle2-input-bundle: with-transaction
99 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
100 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
100 bundle2-input-part: "phase-heads" supported
101 bundle2-input-part: "phase-heads" supported
101 bundle2-input-part: total payload size 24
102 bundle2-input-part: total payload size 24
102 bundle2-input-bundle: 1 parts total
103 bundle2-input-bundle: 1 parts total
103 checking for updated bookmarks
104 checking for updated bookmarks
104 updating to branch default
105 updating to branch default
105 resolving manifests
106 resolving manifests
106 branchmerge: False, force: False, partial: False
107 branchmerge: False, force: False, partial: False
107 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
108 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
108 A: remote created -> g
109 A: remote created -> g
109 getting A
110 getting A
110 B: remote created -> g
111 B: remote created -> g
111 getting B
112 getting B
112 C: remote created -> g
113 C: remote created -> g
113 getting C
114 getting C
114 D: remote created -> g
115 D: remote created -> g
115 getting D
116 getting D
116 E: remote created -> g
117 E: remote created -> g
117 getting E
118 getting E
118 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
119 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
119
120
120 $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
121 $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
121 using http://localhost:$HGPORT/
122 using http://localhost:$HGPORT/
122 sending capabilities command
123 sending capabilities command
123 sending clonebundles command
124 sending clonebundles command
124 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
125 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
125 bundle2-input-bundle: with-transaction
126 bundle2-input-bundle: with-transaction
126 bundle2-input-part: "stream2" (params: 3 mandatory) supported
127 bundle2-input-part: "stream2" (params: 3 mandatory) supported
127 applying stream bundle
128 applying stream bundle
128 11 files to transfer, 1.65 KB of data
129 11 files to transfer, 1.65 KB of data
129 starting 4 threads for background file closing (?)
130 starting 4 threads for background file closing (?)
130 starting 4 threads for background file closing (?)
131 starting 4 threads for background file closing (?)
131 adding [s] data/A.i (66 bytes)
132 adding [s] data/A.i (66 bytes)
132 adding [s] data/B.i (66 bytes)
133 adding [s] data/B.i (66 bytes)
133 adding [s] data/C.i (66 bytes)
134 adding [s] data/C.i (66 bytes)
134 adding [s] data/D.i (66 bytes)
135 adding [s] data/D.i (66 bytes)
135 adding [s] data/E.i (66 bytes)
136 adding [s] data/E.i (66 bytes)
136 adding [s] 00manifest.i (584 bytes)
137 adding [s] 00manifest.i (584 bytes)
137 adding [s] 00changelog.i (595 bytes)
138 adding [s] 00changelog.i (595 bytes)
138 adding [s] phaseroots (43 bytes)
139 adding [s] phaseroots (43 bytes)
139 adding [c] branch2-served (94 bytes)
140 adding [c] branch2-served (94 bytes)
140 adding [c] rbc-names-v1 (7 bytes)
141 adding [c] rbc-names-v1 (7 bytes)
141 adding [c] rbc-revs-v1 (40 bytes)
142 adding [c] rbc-revs-v1 (40 bytes)
142 transferred 1.65 KB in *.* seconds (*/sec) (glob)
143 transferred 1.65 KB in *.* seconds (*/sec) (glob)
143 bundle2-input-part: total payload size 1840
144 bundle2-input-part: total payload size 1840
144 bundle2-input-bundle: 0 parts total
145 bundle2-input-bundle: 0 parts total
146 updating the branch cache
145 finished applying clone bundle
147 finished applying clone bundle
146 query 1; heads
148 query 1; heads
147 sending batch command
149 sending batch command
148 searching for changes
150 searching for changes
149 all remote heads known locally
151 all remote heads known locally
150 no changes found
152 no changes found
151 sending getbundle command
153 sending getbundle command
152 bundle2-input-bundle: with-transaction
154 bundle2-input-bundle: with-transaction
153 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
155 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
154 bundle2-input-part: "phase-heads" supported
156 bundle2-input-part: "phase-heads" supported
155 bundle2-input-part: total payload size 24
157 bundle2-input-part: total payload size 24
156 bundle2-input-bundle: 1 parts total
158 bundle2-input-bundle: 1 parts total
157 checking for updated bookmarks
159 checking for updated bookmarks
158 updating to branch default
160 updating to branch default
159 resolving manifests
161 resolving manifests
160 branchmerge: False, force: False, partial: False
162 branchmerge: False, force: False, partial: False
161 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
163 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
162 A: remote created -> g
164 A: remote created -> g
163 getting A
165 getting A
164 B: remote created -> g
166 B: remote created -> g
165 getting B
167 getting B
166 C: remote created -> g
168 C: remote created -> g
167 getting C
169 getting C
168 D: remote created -> g
170 D: remote created -> g
169 getting D
171 getting D
170 E: remote created -> g
172 E: remote created -> g
171 getting E
173 getting E
172 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now