##// END OF EJS Templates
branchmap: add a cache validation cache, avoid expensive re-hash on every use...
Kyle Lippincott -
r46088:89f0d9f8 default
parent child Browse files
Show More
@@ -1,585 +1,597 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 return b"\0".join(items)
86 return b"\0".join(items)
87
87
88
88
89 def stripdesc(desc):
89 def stripdesc(desc):
90 """strip trailing whitespace and leading and trailing empty lines"""
90 """strip trailing whitespace and leading and trailing empty lines"""
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92
92
93
93
94 class appender(object):
94 class appender(object):
95 '''the changelog index must be updated last on disk, so we use this class
95 '''the changelog index must be updated last on disk, so we use this class
96 to delay writes to it'''
96 to delay writes to it'''
97
97
98 def __init__(self, vfs, name, mode, buf):
98 def __init__(self, vfs, name, mode, buf):
99 self.data = buf
99 self.data = buf
100 fp = vfs(name, mode)
100 fp = vfs(name, mode)
101 self.fp = fp
101 self.fp = fp
102 self.offset = fp.tell()
102 self.offset = fp.tell()
103 self.size = vfs.fstat(fp).st_size
103 self.size = vfs.fstat(fp).st_size
104 self._end = self.size
104 self._end = self.size
105
105
106 def end(self):
106 def end(self):
107 return self._end
107 return self._end
108
108
109 def tell(self):
109 def tell(self):
110 return self.offset
110 return self.offset
111
111
112 def flush(self):
112 def flush(self):
113 pass
113 pass
114
114
115 @property
115 @property
116 def closed(self):
116 def closed(self):
117 return self.fp.closed
117 return self.fp.closed
118
118
119 def close(self):
119 def close(self):
120 self.fp.close()
120 self.fp.close()
121
121
122 def seek(self, offset, whence=0):
122 def seek(self, offset, whence=0):
123 '''virtual file offset spans real file and data'''
123 '''virtual file offset spans real file and data'''
124 if whence == 0:
124 if whence == 0:
125 self.offset = offset
125 self.offset = offset
126 elif whence == 1:
126 elif whence == 1:
127 self.offset += offset
127 self.offset += offset
128 elif whence == 2:
128 elif whence == 2:
129 self.offset = self.end() + offset
129 self.offset = self.end() + offset
130 if self.offset < self.size:
130 if self.offset < self.size:
131 self.fp.seek(self.offset)
131 self.fp.seek(self.offset)
132
132
133 def read(self, count=-1):
133 def read(self, count=-1):
134 '''only trick here is reads that span real file and data'''
134 '''only trick here is reads that span real file and data'''
135 ret = b""
135 ret = b""
136 if self.offset < self.size:
136 if self.offset < self.size:
137 s = self.fp.read(count)
137 s = self.fp.read(count)
138 ret = s
138 ret = s
139 self.offset += len(s)
139 self.offset += len(s)
140 if count > 0:
140 if count > 0:
141 count -= len(s)
141 count -= len(s)
142 if count != 0:
142 if count != 0:
143 doff = self.offset - self.size
143 doff = self.offset - self.size
144 self.data.insert(0, b"".join(self.data))
144 self.data.insert(0, b"".join(self.data))
145 del self.data[1:]
145 del self.data[1:]
146 s = self.data[0][doff : doff + count]
146 s = self.data[0][doff : doff + count]
147 self.offset += len(s)
147 self.offset += len(s)
148 ret += s
148 ret += s
149 return ret
149 return ret
150
150
151 def write(self, s):
151 def write(self, s):
152 self.data.append(bytes(s))
152 self.data.append(bytes(s))
153 self.offset += len(s)
153 self.offset += len(s)
154 self._end += len(s)
154 self._end += len(s)
155
155
156 def __enter__(self):
156 def __enter__(self):
157 self.fp.__enter__()
157 self.fp.__enter__()
158 return self
158 return self
159
159
160 def __exit__(self, *args):
160 def __exit__(self, *args):
161 return self.fp.__exit__(*args)
161 return self.fp.__exit__(*args)
162
162
163
163
164 class _divertopener(object):
164 class _divertopener(object):
165 def __init__(self, opener, target):
165 def __init__(self, opener, target):
166 self._opener = opener
166 self._opener = opener
167 self._target = target
167 self._target = target
168
168
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 if name != self._target:
170 if name != self._target:
171 return self._opener(name, mode, **kwargs)
171 return self._opener(name, mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
173
173
174 def __getattr__(self, attr):
174 def __getattr__(self, attr):
175 return getattr(self._opener, attr)
175 return getattr(self._opener, attr)
176
176
177
177
178 def _delayopener(opener, target, buf):
178 def _delayopener(opener, target, buf):
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
180
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 if name != target:
182 if name != target:
183 return opener(name, mode, **kwargs)
183 return opener(name, mode, **kwargs)
184 assert not kwargs
184 assert not kwargs
185 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
186
186
187 return _delay
187 return _delay
188
188
189
189
190 @attr.s
190 @attr.s
191 class _changelogrevision(object):
191 class _changelogrevision(object):
192 # Extensions might modify _defaultextra, so let the constructor below pass
192 # Extensions might modify _defaultextra, so let the constructor below pass
193 # it in
193 # it in
194 extra = attr.ib()
194 extra = attr.ib()
195 manifest = attr.ib(default=nullid)
195 manifest = attr.ib(default=nullid)
196 user = attr.ib(default=b'')
196 user = attr.ib(default=b'')
197 date = attr.ib(default=(0, 0))
197 date = attr.ib(default=(0, 0))
198 files = attr.ib(default=attr.Factory(list))
198 files = attr.ib(default=attr.Factory(list))
199 filesadded = attr.ib(default=None)
199 filesadded = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
203 description = attr.ib(default=b'')
203 description = attr.ib(default=b'')
204
204
205
205
206 class changelogrevision(object):
206 class changelogrevision(object):
207 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
208
208
209 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
210 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
211 the parsed object.
211 the parsed object.
212 """
212 """
213
213
214 __slots__ = (
214 __slots__ = (
215 '_offsets',
215 '_offsets',
216 '_text',
216 '_text',
217 '_sidedata',
217 '_sidedata',
218 '_cpsd',
218 '_cpsd',
219 )
219 )
220
220
221 def __new__(cls, text, sidedata, cpsd):
221 def __new__(cls, text, sidedata, cpsd):
222 if not text:
222 if not text:
223 return _changelogrevision(extra=_defaultextra)
223 return _changelogrevision(extra=_defaultextra)
224
224
225 self = super(changelogrevision, cls).__new__(cls)
225 self = super(changelogrevision, cls).__new__(cls)
226 # We could return here and implement the following as an __init__.
226 # We could return here and implement the following as an __init__.
227 # But doing it here is equivalent and saves an extra function call.
227 # But doing it here is equivalent and saves an extra function call.
228
228
229 # format used:
229 # format used:
230 # nodeid\n : manifest node in ascii
230 # nodeid\n : manifest node in ascii
231 # user\n : user, no \n or \r allowed
231 # user\n : user, no \n or \r allowed
232 # time tz extra\n : date (time is int or float, timezone is int)
232 # time tz extra\n : date (time is int or float, timezone is int)
233 # : extra is metadata, encoded and separated by '\0'
233 # : extra is metadata, encoded and separated by '\0'
234 # : older versions ignore it
234 # : older versions ignore it
235 # files\n\n : files modified by the cset, no \n or \r allowed
235 # files\n\n : files modified by the cset, no \n or \r allowed
236 # (.*) : comment (free text, ideally utf-8)
236 # (.*) : comment (free text, ideally utf-8)
237 #
237 #
238 # changelog v0 doesn't use extra
238 # changelog v0 doesn't use extra
239
239
240 nl1 = text.index(b'\n')
240 nl1 = text.index(b'\n')
241 nl2 = text.index(b'\n', nl1 + 1)
241 nl2 = text.index(b'\n', nl1 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
243
243
244 # The list of files may be empty. Which means nl3 is the first of the
244 # The list of files may be empty. Which means nl3 is the first of the
245 # double newline that precedes the description.
245 # double newline that precedes the description.
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 doublenl = nl3
247 doublenl = nl3
248 else:
248 else:
249 doublenl = text.index(b'\n\n', nl3 + 1)
249 doublenl = text.index(b'\n\n', nl3 + 1)
250
250
251 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._text = text
252 self._text = text
253 self._sidedata = sidedata
253 self._sidedata = sidedata
254 self._cpsd = cpsd
254 self._cpsd = cpsd
255
255
256 return self
256 return self
257
257
258 @property
258 @property
259 def manifest(self):
259 def manifest(self):
260 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
261
261
262 @property
262 @property
263 def user(self):
263 def user(self):
264 off = self._offsets
264 off = self._offsets
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266
266
267 @property
267 @property
268 def _rawdate(self):
268 def _rawdate(self):
269 off = self._offsets
269 off = self._offsets
270 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
271 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
272
272
273 @property
273 @property
274 def _rawextra(self):
274 def _rawextra(self):
275 off = self._offsets
275 off = self._offsets
276 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
277 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
278 if len(fields) != 3:
278 if len(fields) != 3:
279 return None
279 return None
280
280
281 return fields[2]
281 return fields[2]
282
282
283 @property
283 @property
284 def date(self):
284 def date(self):
285 raw = self._rawdate
285 raw = self._rawdate
286 time = float(raw[0])
286 time = float(raw[0])
287 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
288 try:
288 try:
289 timezone = int(raw[1])
289 timezone = int(raw[1])
290 except ValueError:
290 except ValueError:
291 timezone = 0
291 timezone = 0
292
292
293 return time, timezone
293 return time, timezone
294
294
295 @property
295 @property
296 def extra(self):
296 def extra(self):
297 raw = self._rawextra
297 raw = self._rawextra
298 if raw is None:
298 if raw is None:
299 return _defaultextra
299 return _defaultextra
300
300
301 return decodeextra(raw)
301 return decodeextra(raw)
302
302
303 @property
303 @property
304 def files(self):
304 def files(self):
305 off = self._offsets
305 off = self._offsets
306 if off[2] == off[3]:
306 if off[2] == off[3]:
307 return []
307 return []
308
308
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
310
310
311 @property
311 @property
312 def filesadded(self):
312 def filesadded(self):
313 if self._cpsd:
313 if self._cpsd:
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
315 if not rawindices:
315 if not rawindices:
316 return []
316 return []
317 else:
317 else:
318 rawindices = self.extra.get(b'filesadded')
318 rawindices = self.extra.get(b'filesadded')
319 if rawindices is None:
319 if rawindices is None:
320 return None
320 return None
321 return metadata.decodefileindices(self.files, rawindices)
321 return metadata.decodefileindices(self.files, rawindices)
322
322
323 @property
323 @property
324 def filesremoved(self):
324 def filesremoved(self):
325 if self._cpsd:
325 if self._cpsd:
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
327 if not rawindices:
327 if not rawindices:
328 return []
328 return []
329 else:
329 else:
330 rawindices = self.extra.get(b'filesremoved')
330 rawindices = self.extra.get(b'filesremoved')
331 if rawindices is None:
331 if rawindices is None:
332 return None
332 return None
333 return metadata.decodefileindices(self.files, rawindices)
333 return metadata.decodefileindices(self.files, rawindices)
334
334
335 @property
335 @property
336 def p1copies(self):
336 def p1copies(self):
337 if self._cpsd:
337 if self._cpsd:
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
339 if not rawcopies:
339 if not rawcopies:
340 return {}
340 return {}
341 else:
341 else:
342 rawcopies = self.extra.get(b'p1copies')
342 rawcopies = self.extra.get(b'p1copies')
343 if rawcopies is None:
343 if rawcopies is None:
344 return None
344 return None
345 return metadata.decodecopies(self.files, rawcopies)
345 return metadata.decodecopies(self.files, rawcopies)
346
346
347 @property
347 @property
348 def p2copies(self):
348 def p2copies(self):
349 if self._cpsd:
349 if self._cpsd:
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
351 if not rawcopies:
351 if not rawcopies:
352 return {}
352 return {}
353 else:
353 else:
354 rawcopies = self.extra.get(b'p2copies')
354 rawcopies = self.extra.get(b'p2copies')
355 if rawcopies is None:
355 if rawcopies is None:
356 return None
356 return None
357 return metadata.decodecopies(self.files, rawcopies)
357 return metadata.decodecopies(self.files, rawcopies)
358
358
359 @property
359 @property
360 def description(self):
360 def description(self):
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
362
362
363
363
364 class changelog(revlog.revlog):
364 class changelog(revlog.revlog):
365 def __init__(self, opener, trypending=False):
365 def __init__(self, opener, trypending=False):
366 """Load a changelog revlog using an opener.
366 """Load a changelog revlog using an opener.
367
367
368 If ``trypending`` is true, we attempt to load the index from a
368 If ``trypending`` is true, we attempt to load the index from a
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
370 The ``00changelog.i.a`` file contains index (and possibly inline
370 The ``00changelog.i.a`` file contains index (and possibly inline
371 revision) data for a transaction that hasn't been finalized yet.
371 revision) data for a transaction that hasn't been finalized yet.
372 It exists in a separate file to facilitate readers (such as
372 It exists in a separate file to facilitate readers (such as
373 hooks processes) accessing data before a transaction is finalized.
373 hooks processes) accessing data before a transaction is finalized.
374 """
374 """
375 if trypending and opener.exists(b'00changelog.i.a'):
375 if trypending and opener.exists(b'00changelog.i.a'):
376 indexfile = b'00changelog.i.a'
376 indexfile = b'00changelog.i.a'
377 else:
377 else:
378 indexfile = b'00changelog.i'
378 indexfile = b'00changelog.i'
379
379
380 datafile = b'00changelog.d'
380 datafile = b'00changelog.d'
381 revlog.revlog.__init__(
381 revlog.revlog.__init__(
382 self,
382 self,
383 opener,
383 opener,
384 indexfile,
384 indexfile,
385 datafile=datafile,
385 datafile=datafile,
386 checkambig=True,
386 checkambig=True,
387 mmaplargeindex=True,
387 mmaplargeindex=True,
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
389 )
389 )
390
390
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
392 # changelogs don't benefit from generaldelta.
392 # changelogs don't benefit from generaldelta.
393
393
394 self.version &= ~revlog.FLAG_GENERALDELTA
394 self.version &= ~revlog.FLAG_GENERALDELTA
395 self._generaldelta = False
395 self._generaldelta = False
396
396
397 # Delta chains for changelogs tend to be very small because entries
397 # Delta chains for changelogs tend to be very small because entries
398 # tend to be small and don't delta well with each. So disable delta
398 # tend to be small and don't delta well with each. So disable delta
399 # chains.
399 # chains.
400 self._storedeltachains = False
400 self._storedeltachains = False
401
401
402 self._realopener = opener
402 self._realopener = opener
403 self._delayed = False
403 self._delayed = False
404 self._delaybuf = None
404 self._delaybuf = None
405 self._divert = False
405 self._divert = False
406 self.filteredrevs = frozenset()
406 self._filteredrevs = frozenset()
407 self._filteredrevs_hashcache = {}
407 self._copiesstorage = opener.options.get(b'copies-storage')
408 self._copiesstorage = opener.options.get(b'copies-storage')
408
409
410 @property
411 def filteredrevs(self):
412 return self._filteredrevs
413
414 @filteredrevs.setter
415 def filteredrevs(self, val):
416 # Ensure all updates go through this function
417 assert isinstance(val, frozenset)
418 self._filteredrevs = val
419 self._filteredrevs_hashcache = {}
420
409 def delayupdate(self, tr):
421 def delayupdate(self, tr):
410 """delay visibility of index updates to other readers"""
422 """delay visibility of index updates to other readers"""
411
423
412 if not self._delayed:
424 if not self._delayed:
413 if len(self) == 0:
425 if len(self) == 0:
414 self._divert = True
426 self._divert = True
415 if self._realopener.exists(self.indexfile + b'.a'):
427 if self._realopener.exists(self.indexfile + b'.a'):
416 self._realopener.unlink(self.indexfile + b'.a')
428 self._realopener.unlink(self.indexfile + b'.a')
417 self.opener = _divertopener(self._realopener, self.indexfile)
429 self.opener = _divertopener(self._realopener, self.indexfile)
418 else:
430 else:
419 self._delaybuf = []
431 self._delaybuf = []
420 self.opener = _delayopener(
432 self.opener = _delayopener(
421 self._realopener, self.indexfile, self._delaybuf
433 self._realopener, self.indexfile, self._delaybuf
422 )
434 )
423 self._delayed = True
435 self._delayed = True
424 tr.addpending(b'cl-%i' % id(self), self._writepending)
436 tr.addpending(b'cl-%i' % id(self), self._writepending)
425 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
437 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
426
438
427 def _finalize(self, tr):
439 def _finalize(self, tr):
428 """finalize index updates"""
440 """finalize index updates"""
429 self._delayed = False
441 self._delayed = False
430 self.opener = self._realopener
442 self.opener = self._realopener
431 # move redirected index data back into place
443 # move redirected index data back into place
432 if self._divert:
444 if self._divert:
433 assert not self._delaybuf
445 assert not self._delaybuf
434 tmpname = self.indexfile + b".a"
446 tmpname = self.indexfile + b".a"
435 nfile = self.opener.open(tmpname)
447 nfile = self.opener.open(tmpname)
436 nfile.close()
448 nfile.close()
437 self.opener.rename(tmpname, self.indexfile, checkambig=True)
449 self.opener.rename(tmpname, self.indexfile, checkambig=True)
438 elif self._delaybuf:
450 elif self._delaybuf:
439 fp = self.opener(self.indexfile, b'a', checkambig=True)
451 fp = self.opener(self.indexfile, b'a', checkambig=True)
440 fp.write(b"".join(self._delaybuf))
452 fp.write(b"".join(self._delaybuf))
441 fp.close()
453 fp.close()
442 self._delaybuf = None
454 self._delaybuf = None
443 self._divert = False
455 self._divert = False
444 # split when we're done
456 # split when we're done
445 self._enforceinlinesize(tr)
457 self._enforceinlinesize(tr)
446
458
447 def _writepending(self, tr):
459 def _writepending(self, tr):
448 """create a file containing the unfinalized state for
460 """create a file containing the unfinalized state for
449 pretxnchangegroup"""
461 pretxnchangegroup"""
450 if self._delaybuf:
462 if self._delaybuf:
451 # make a temporary copy of the index
463 # make a temporary copy of the index
452 fp1 = self._realopener(self.indexfile)
464 fp1 = self._realopener(self.indexfile)
453 pendingfilename = self.indexfile + b".a"
465 pendingfilename = self.indexfile + b".a"
454 # register as a temp file to ensure cleanup on failure
466 # register as a temp file to ensure cleanup on failure
455 tr.registertmp(pendingfilename)
467 tr.registertmp(pendingfilename)
456 # write existing data
468 # write existing data
457 fp2 = self._realopener(pendingfilename, b"w")
469 fp2 = self._realopener(pendingfilename, b"w")
458 fp2.write(fp1.read())
470 fp2.write(fp1.read())
459 # add pending data
471 # add pending data
460 fp2.write(b"".join(self._delaybuf))
472 fp2.write(b"".join(self._delaybuf))
461 fp2.close()
473 fp2.close()
462 # switch modes so finalize can simply rename
474 # switch modes so finalize can simply rename
463 self._delaybuf = None
475 self._delaybuf = None
464 self._divert = True
476 self._divert = True
465 self.opener = _divertopener(self._realopener, self.indexfile)
477 self.opener = _divertopener(self._realopener, self.indexfile)
466
478
467 if self._divert:
479 if self._divert:
468 return True
480 return True
469
481
470 return False
482 return False
471
483
472 def _enforceinlinesize(self, tr, fp=None):
484 def _enforceinlinesize(self, tr, fp=None):
473 if not self._delayed:
485 if not self._delayed:
474 revlog.revlog._enforceinlinesize(self, tr, fp)
486 revlog.revlog._enforceinlinesize(self, tr, fp)
475
487
476 def read(self, node):
488 def read(self, node):
477 """Obtain data from a parsed changelog revision.
489 """Obtain data from a parsed changelog revision.
478
490
479 Returns a 6-tuple of:
491 Returns a 6-tuple of:
480
492
481 - manifest node in binary
493 - manifest node in binary
482 - author/user as a localstr
494 - author/user as a localstr
483 - date as a 2-tuple of (time, timezone)
495 - date as a 2-tuple of (time, timezone)
484 - list of files
496 - list of files
485 - commit message as a localstr
497 - commit message as a localstr
486 - dict of extra metadata
498 - dict of extra metadata
487
499
488 Unless you need to access all fields, consider calling
500 Unless you need to access all fields, consider calling
489 ``changelogrevision`` instead, as it is faster for partial object
501 ``changelogrevision`` instead, as it is faster for partial object
490 access.
502 access.
491 """
503 """
492 d, s = self._revisiondata(node)
504 d, s = self._revisiondata(node)
493 c = changelogrevision(
505 c = changelogrevision(
494 d, s, self._copiesstorage == b'changeset-sidedata'
506 d, s, self._copiesstorage == b'changeset-sidedata'
495 )
507 )
496 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
508 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
497
509
498 def changelogrevision(self, nodeorrev):
510 def changelogrevision(self, nodeorrev):
499 """Obtain a ``changelogrevision`` for a node or revision."""
511 """Obtain a ``changelogrevision`` for a node or revision."""
500 text, sidedata = self._revisiondata(nodeorrev)
512 text, sidedata = self._revisiondata(nodeorrev)
501 return changelogrevision(
513 return changelogrevision(
502 text, sidedata, self._copiesstorage == b'changeset-sidedata'
514 text, sidedata, self._copiesstorage == b'changeset-sidedata'
503 )
515 )
504
516
505 def readfiles(self, node):
517 def readfiles(self, node):
506 """
518 """
507 short version of read that only returns the files modified by the cset
519 short version of read that only returns the files modified by the cset
508 """
520 """
509 text = self.revision(node)
521 text = self.revision(node)
510 if not text:
522 if not text:
511 return []
523 return []
512 last = text.index(b"\n\n")
524 last = text.index(b"\n\n")
513 l = text[:last].split(b'\n')
525 l = text[:last].split(b'\n')
514 return l[3:]
526 return l[3:]
515
527
516 def add(
528 def add(
517 self,
529 self,
518 manifest,
530 manifest,
519 files,
531 files,
520 desc,
532 desc,
521 transaction,
533 transaction,
522 p1,
534 p1,
523 p2,
535 p2,
524 user,
536 user,
525 date=None,
537 date=None,
526 extra=None,
538 extra=None,
527 ):
539 ):
528 # Convert to UTF-8 encoded bytestrings as the very first
540 # Convert to UTF-8 encoded bytestrings as the very first
529 # thing: calling any method on a localstr object will turn it
541 # thing: calling any method on a localstr object will turn it
530 # into a str object and the cached UTF-8 string is thus lost.
542 # into a str object and the cached UTF-8 string is thus lost.
531 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
543 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
532
544
533 user = user.strip()
545 user = user.strip()
534 # An empty username or a username with a "\n" will make the
546 # An empty username or a username with a "\n" will make the
535 # revision text contain two "\n\n" sequences -> corrupt
547 # revision text contain two "\n\n" sequences -> corrupt
536 # repository since read cannot unpack the revision.
548 # repository since read cannot unpack the revision.
537 if not user:
549 if not user:
538 raise error.StorageError(_(b"empty username"))
550 raise error.StorageError(_(b"empty username"))
539 if b"\n" in user:
551 if b"\n" in user:
540 raise error.StorageError(
552 raise error.StorageError(
541 _(b"username %r contains a newline") % pycompat.bytestr(user)
553 _(b"username %r contains a newline") % pycompat.bytestr(user)
542 )
554 )
543
555
544 desc = stripdesc(desc)
556 desc = stripdesc(desc)
545
557
546 if date:
558 if date:
547 parseddate = b"%d %d" % dateutil.parsedate(date)
559 parseddate = b"%d %d" % dateutil.parsedate(date)
548 else:
560 else:
549 parseddate = b"%d %d" % dateutil.makedate()
561 parseddate = b"%d %d" % dateutil.makedate()
550 if extra:
562 if extra:
551 branch = extra.get(b"branch")
563 branch = extra.get(b"branch")
552 if branch in (b"default", b""):
564 if branch in (b"default", b""):
553 del extra[b"branch"]
565 del extra[b"branch"]
554 elif branch in (b".", b"null", b"tip"):
566 elif branch in (b".", b"null", b"tip"):
555 raise error.StorageError(
567 raise error.StorageError(
556 _(b'the name \'%s\' is reserved') % branch
568 _(b'the name \'%s\' is reserved') % branch
557 )
569 )
558 sortedfiles = sorted(files.touched)
570 sortedfiles = sorted(files.touched)
559 sidedata = None
571 sidedata = None
560 if self._copiesstorage == b'changeset-sidedata':
572 if self._copiesstorage == b'changeset-sidedata':
561 sidedata = metadata.encode_copies_sidedata(files)
573 sidedata = metadata.encode_copies_sidedata(files)
562
574
563 if extra:
575 if extra:
564 extra = encodeextra(extra)
576 extra = encodeextra(extra)
565 parseddate = b"%s %s" % (parseddate, extra)
577 parseddate = b"%s %s" % (parseddate, extra)
566 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
578 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
567 text = b"\n".join(l)
579 text = b"\n".join(l)
568 return self.addrevision(
580 return self.addrevision(
569 text, transaction, len(self), p1, p2, sidedata=sidedata
581 text, transaction, len(self), p1, p2, sidedata=sidedata
570 )
582 )
571
583
572 def branchinfo(self, rev):
584 def branchinfo(self, rev):
573 """return the branch name and open/close state of a revision
585 """return the branch name and open/close state of a revision
574
586
575 This function exists because creating a changectx object
587 This function exists because creating a changectx object
576 just to access this is costly."""
588 just to access this is costly."""
577 extra = self.read(rev)[5]
589 extra = self.read(rev)[5]
578 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
590 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
579
591
580 def _nodeduplicatecallback(self, transaction, node):
592 def _nodeduplicatecallback(self, transaction, node):
581 # keep track of revisions that got "re-added", eg: unbunde of know rev.
593 # keep track of revisions that got "re-added", eg: unbunde of know rev.
582 #
594 #
583 # We track them in a list to preserve their order from the source bundle
595 # We track them in a list to preserve their order from the source bundle
584 duplicates = transaction.changes.setdefault(b'revduplicates', [])
596 duplicates = transaction.changes.setdefault(b'revduplicates', [])
585 duplicates.append(self.rev(node))
597 duplicates.append(self.rev(node))
@@ -1,2254 +1,2256 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 '''Struct with a list of files per status.
69 '''Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 '''
73 '''
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 '''Report no changes for push/pull, excluded is None or a list of
126 '''Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 '''
128 '''
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 if inst.errno == errno.ETIMEDOUT:
160 if inst.errno == errno.ETIMEDOUT:
161 reason = _(b'timed out waiting for lock held by %r') % (
161 reason = _(b'timed out waiting for lock held by %r') % (
162 pycompat.bytestr(inst.locker)
162 pycompat.bytestr(inst.locker)
163 )
163 )
164 else:
164 else:
165 reason = _(b'lock held by %r') % inst.locker
165 reason = _(b'lock held by %r') % inst.locker
166 ui.error(
166 ui.error(
167 _(b"abort: %s: %s\n")
167 _(b"abort: %s: %s\n")
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 )
169 )
170 if not inst.locker:
170 if not inst.locker:
171 ui.error(_(b"(lock might be very busy)\n"))
171 ui.error(_(b"(lock might be very busy)\n"))
172 except error.LockUnavailable as inst:
172 except error.LockUnavailable as inst:
173 ui.error(
173 ui.error(
174 _(b"abort: could not lock %s: %s\n")
174 _(b"abort: could not lock %s: %s\n")
175 % (
175 % (
176 inst.desc or stringutil.forcebytestr(inst.filename),
176 inst.desc or stringutil.forcebytestr(inst.filename),
177 encoding.strtolocal(inst.strerror),
177 encoding.strtolocal(inst.strerror),
178 )
178 )
179 )
179 )
180 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
181 if inst.args:
181 if inst.args:
182 msg = _(b"abort: remote error:\n")
182 msg = _(b"abort: remote error:\n")
183 else:
183 else:
184 msg = _(b"abort: remote error\n")
184 msg = _(b"abort: remote error\n")
185 ui.error(msg)
185 ui.error(msg)
186 if inst.args:
186 if inst.args:
187 ui.error(b''.join(inst.args))
187 ui.error(b''.join(inst.args))
188 if inst.hint:
188 if inst.hint:
189 ui.error(b'(%s)\n' % inst.hint)
189 ui.error(b'(%s)\n' % inst.hint)
190 except error.RepoError as inst:
190 except error.RepoError as inst:
191 ui.error(_(b"abort: %s!\n") % inst)
191 ui.error(_(b"abort: %s!\n") % inst)
192 if inst.hint:
192 if inst.hint:
193 ui.error(_(b"(%s)\n") % inst.hint)
193 ui.error(_(b"(%s)\n") % inst.hint)
194 except error.ResponseError as inst:
194 except error.ResponseError as inst:
195 ui.error(_(b"abort: %s") % inst.args[0])
195 ui.error(_(b"abort: %s") % inst.args[0])
196 msg = inst.args[1]
196 msg = inst.args[1]
197 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
198 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
199 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
200 ui.error(b" %r\n" % (msg,))
200 ui.error(b" %r\n" % (msg,))
201 elif not msg:
201 elif not msg:
202 ui.error(_(b" empty string\n"))
202 ui.error(_(b" empty string\n"))
203 else:
203 else:
204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
206 ui.error(_(b"abort: file censored %s!\n") % inst)
206 ui.error(_(b"abort: file censored %s!\n") % inst)
207 except error.StorageError as inst:
207 except error.StorageError as inst:
208 ui.error(_(b"abort: %s!\n") % inst)
208 ui.error(_(b"abort: %s!\n") % inst)
209 if inst.hint:
209 if inst.hint:
210 ui.error(_(b"(%s)\n") % inst.hint)
210 ui.error(_(b"(%s)\n") % inst.hint)
211 except error.InterventionRequired as inst:
211 except error.InterventionRequired as inst:
212 ui.error(b"%s\n" % inst)
212 ui.error(b"%s\n" % inst)
213 if inst.hint:
213 if inst.hint:
214 ui.error(_(b"(%s)\n") % inst.hint)
214 ui.error(_(b"(%s)\n") % inst.hint)
215 return 1
215 return 1
216 except error.WdirUnsupported:
216 except error.WdirUnsupported:
217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
218 except error.Abort as inst:
218 except error.Abort as inst:
219 ui.error(_(b"abort: %s\n") % inst)
219 ui.error(_(b"abort: %s\n") % inst)
220 if inst.hint:
220 if inst.hint:
221 ui.error(_(b"(%s)\n") % inst.hint)
221 ui.error(_(b"(%s)\n") % inst.hint)
222 except ImportError as inst:
222 except ImportError as inst:
223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
224 m = stringutil.forcebytestr(inst).split()[-1]
224 m = stringutil.forcebytestr(inst).split()[-1]
225 if m in b"mpatch bdiff".split():
225 if m in b"mpatch bdiff".split():
226 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 ui.error(_(b"(did you forget to compile extensions?)\n"))
227 elif m in b"zlib".split():
227 elif m in b"zlib".split():
228 ui.error(_(b"(is your Python install correct?)\n"))
228 ui.error(_(b"(is your Python install correct?)\n"))
229 except (IOError, OSError) as inst:
229 except (IOError, OSError) as inst:
230 if util.safehasattr(inst, b"code"): # HTTPError
230 if util.safehasattr(inst, b"code"): # HTTPError
231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
233 try: # usually it is in the form (errno, strerror)
233 try: # usually it is in the form (errno, strerror)
234 reason = inst.reason.args[1]
234 reason = inst.reason.args[1]
235 except (AttributeError, IndexError):
235 except (AttributeError, IndexError):
236 # it might be anything, for example a string
236 # it might be anything, for example a string
237 reason = inst.reason
237 reason = inst.reason
238 if isinstance(reason, pycompat.unicode):
238 if isinstance(reason, pycompat.unicode):
239 # SSLError of Python 2.7.9 contains a unicode
239 # SSLError of Python 2.7.9 contains a unicode
240 reason = encoding.unitolocal(reason)
240 reason = encoding.unitolocal(reason)
241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
242 elif (
242 elif (
243 util.safehasattr(inst, b"args")
243 util.safehasattr(inst, b"args")
244 and inst.args
244 and inst.args
245 and inst.args[0] == errno.EPIPE
245 and inst.args[0] == errno.EPIPE
246 ):
246 ):
247 pass
247 pass
248 elif getattr(inst, "strerror", None): # common IOError or OSError
248 elif getattr(inst, "strerror", None): # common IOError or OSError
249 if getattr(inst, "filename", None) is not None:
249 if getattr(inst, "filename", None) is not None:
250 ui.error(
250 ui.error(
251 _(b"abort: %s: '%s'\n")
251 _(b"abort: %s: '%s'\n")
252 % (
252 % (
253 encoding.strtolocal(inst.strerror),
253 encoding.strtolocal(inst.strerror),
254 stringutil.forcebytestr(inst.filename),
254 stringutil.forcebytestr(inst.filename),
255 )
255 )
256 )
256 )
257 else:
257 else:
258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
259 else: # suspicious IOError
259 else: # suspicious IOError
260 raise
260 raise
261 except MemoryError:
261 except MemoryError:
262 ui.error(_(b"abort: out of memory\n"))
262 ui.error(_(b"abort: out of memory\n"))
263 except SystemExit as inst:
263 except SystemExit as inst:
264 # Commands shouldn't sys.exit directly, but give a return code.
264 # Commands shouldn't sys.exit directly, but give a return code.
265 # Just in case catch this and and pass exit code to caller.
265 # Just in case catch this and and pass exit code to caller.
266 return inst.code
266 return inst.code
267
267
268 return -1
268 return -1
269
269
270
270
271 def checknewlabel(repo, lbl, kind):
271 def checknewlabel(repo, lbl, kind):
272 # Do not use the "kind" parameter in ui output.
272 # Do not use the "kind" parameter in ui output.
273 # It makes strings difficult to translate.
273 # It makes strings difficult to translate.
274 if lbl in [b'tip', b'.', b'null']:
274 if lbl in [b'tip', b'.', b'null']:
275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
276 for c in (b':', b'\0', b'\n', b'\r'):
276 for c in (b':', b'\0', b'\n', b'\r'):
277 if c in lbl:
277 if c in lbl:
278 raise error.Abort(
278 raise error.Abort(
279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
280 )
280 )
281 try:
281 try:
282 int(lbl)
282 int(lbl)
283 raise error.Abort(_(b"cannot use an integer as a name"))
283 raise error.Abort(_(b"cannot use an integer as a name"))
284 except ValueError:
284 except ValueError:
285 pass
285 pass
286 if lbl.strip() != lbl:
286 if lbl.strip() != lbl:
287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
288
288
289
289
290 def checkfilename(f):
290 def checkfilename(f):
291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 if b'\r' in f or b'\n' in f:
292 if b'\r' in f or b'\n' in f:
293 raise error.Abort(
293 raise error.Abort(
294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
295 % pycompat.bytestr(f)
295 % pycompat.bytestr(f)
296 )
296 )
297
297
298
298
299 def checkportable(ui, f):
299 def checkportable(ui, f):
300 '''Check if filename f is portable and warn or abort depending on config'''
300 '''Check if filename f is portable and warn or abort depending on config'''
301 checkfilename(f)
301 checkfilename(f)
302 abort, warn = checkportabilityalert(ui)
302 abort, warn = checkportabilityalert(ui)
303 if abort or warn:
303 if abort or warn:
304 msg = util.checkwinfilename(f)
304 msg = util.checkwinfilename(f)
305 if msg:
305 if msg:
306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
307 if abort:
307 if abort:
308 raise error.Abort(msg)
308 raise error.Abort(msg)
309 ui.warn(_(b"warning: %s\n") % msg)
309 ui.warn(_(b"warning: %s\n") % msg)
310
310
311
311
312 def checkportabilityalert(ui):
312 def checkportabilityalert(ui):
313 '''check if the user's config requests nothing, a warning, or abort for
313 '''check if the user's config requests nothing, a warning, or abort for
314 non-portable filenames'''
314 non-portable filenames'''
315 val = ui.config(b'ui', b'portablefilenames')
315 val = ui.config(b'ui', b'portablefilenames')
316 lval = val.lower()
316 lval = val.lower()
317 bval = stringutil.parsebool(val)
317 bval = stringutil.parsebool(val)
318 abort = pycompat.iswindows or lval == b'abort'
318 abort = pycompat.iswindows or lval == b'abort'
319 warn = bval or lval == b'warn'
319 warn = bval or lval == b'warn'
320 if bval is None and not (warn or abort or lval == b'ignore'):
320 if bval is None and not (warn or abort or lval == b'ignore'):
321 raise error.ConfigError(
321 raise error.ConfigError(
322 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 _(b"ui.portablefilenames value is invalid ('%s')") % val
323 )
323 )
324 return abort, warn
324 return abort, warn
325
325
326
326
327 class casecollisionauditor(object):
327 class casecollisionauditor(object):
328 def __init__(self, ui, abort, dirstate):
328 def __init__(self, ui, abort, dirstate):
329 self._ui = ui
329 self._ui = ui
330 self._abort = abort
330 self._abort = abort
331 allfiles = b'\0'.join(dirstate)
331 allfiles = b'\0'.join(dirstate)
332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
333 self._dirstate = dirstate
333 self._dirstate = dirstate
334 # The purpose of _newfiles is so that we don't complain about
334 # The purpose of _newfiles is so that we don't complain about
335 # case collisions if someone were to call this object with the
335 # case collisions if someone were to call this object with the
336 # same filename twice.
336 # same filename twice.
337 self._newfiles = set()
337 self._newfiles = set()
338
338
339 def __call__(self, f):
339 def __call__(self, f):
340 if f in self._newfiles:
340 if f in self._newfiles:
341 return
341 return
342 fl = encoding.lower(f)
342 fl = encoding.lower(f)
343 if fl in self._loweredfiles and f not in self._dirstate:
343 if fl in self._loweredfiles and f not in self._dirstate:
344 msg = _(b'possible case-folding collision for %s') % f
344 msg = _(b'possible case-folding collision for %s') % f
345 if self._abort:
345 if self._abort:
346 raise error.Abort(msg)
346 raise error.Abort(msg)
347 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._ui.warn(_(b"warning: %s\n") % msg)
348 self._loweredfiles.add(fl)
348 self._loweredfiles.add(fl)
349 self._newfiles.add(f)
349 self._newfiles.add(f)
350
350
351
351
352 def filteredhash(repo, maxrev):
352 def filteredhash(repo, maxrev):
353 """build hash of filtered revisions in the current repoview.
353 """build hash of filtered revisions in the current repoview.
354
354
355 Multiple caches perform up-to-date validation by checking that the
355 Multiple caches perform up-to-date validation by checking that the
356 tiprev and tipnode stored in the cache file match the current repository.
356 tiprev and tipnode stored in the cache file match the current repository.
357 However, this is not sufficient for validating repoviews because the set
357 However, this is not sufficient for validating repoviews because the set
358 of revisions in the view may change without the repository tiprev and
358 of revisions in the view may change without the repository tiprev and
359 tipnode changing.
359 tipnode changing.
360
360
361 This function hashes all the revs filtered from the view and returns
361 This function hashes all the revs filtered from the view and returns
362 that SHA-1 digest.
362 that SHA-1 digest.
363 """
363 """
364 cl = repo.changelog
364 cl = repo.changelog
365 if not cl.filteredrevs:
365 if not cl.filteredrevs:
366 return None
366 return None
367 key = None
367 key = cl._filteredrevs_hashcache.get(maxrev)
368 if not key:
368 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
369 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
369 if revs:
370 if revs:
370 s = hashutil.sha1()
371 s = hashutil.sha1()
371 for rev in revs:
372 for rev in revs:
372 s.update(b'%d;' % rev)
373 s.update(b'%d;' % rev)
373 key = s.digest()
374 key = s.digest()
375 cl._filteredrevs_hashcache[maxrev] = key
374 return key
376 return key
375
377
376
378
377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
379 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 '''yield every hg repository under path, always recursively.
380 '''yield every hg repository under path, always recursively.
379 The recurse flag will only control recursion into repo working dirs'''
381 The recurse flag will only control recursion into repo working dirs'''
380
382
381 def errhandler(err):
383 def errhandler(err):
382 if err.filename == path:
384 if err.filename == path:
383 raise err
385 raise err
384
386
385 samestat = getattr(os.path, 'samestat', None)
387 samestat = getattr(os.path, 'samestat', None)
386 if followsym and samestat is not None:
388 if followsym and samestat is not None:
387
389
388 def adddir(dirlst, dirname):
390 def adddir(dirlst, dirname):
389 dirstat = os.stat(dirname)
391 dirstat = os.stat(dirname)
390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
392 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 if not match:
393 if not match:
392 dirlst.append(dirstat)
394 dirlst.append(dirstat)
393 return not match
395 return not match
394
396
395 else:
397 else:
396 followsym = False
398 followsym = False
397
399
398 if (seen_dirs is None) and followsym:
400 if (seen_dirs is None) and followsym:
399 seen_dirs = []
401 seen_dirs = []
400 adddir(seen_dirs, path)
402 adddir(seen_dirs, path)
401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
403 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 dirs.sort()
404 dirs.sort()
403 if b'.hg' in dirs:
405 if b'.hg' in dirs:
404 yield root # found a repository
406 yield root # found a repository
405 qroot = os.path.join(root, b'.hg', b'patches')
407 qroot = os.path.join(root, b'.hg', b'patches')
406 if os.path.isdir(os.path.join(qroot, b'.hg')):
408 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 yield qroot # we have a patch queue repo here
409 yield qroot # we have a patch queue repo here
408 if recurse:
410 if recurse:
409 # avoid recursing inside the .hg directory
411 # avoid recursing inside the .hg directory
410 dirs.remove(b'.hg')
412 dirs.remove(b'.hg')
411 else:
413 else:
412 dirs[:] = [] # don't descend further
414 dirs[:] = [] # don't descend further
413 elif followsym:
415 elif followsym:
414 newdirs = []
416 newdirs = []
415 for d in dirs:
417 for d in dirs:
416 fname = os.path.join(root, d)
418 fname = os.path.join(root, d)
417 if adddir(seen_dirs, fname):
419 if adddir(seen_dirs, fname):
418 if os.path.islink(fname):
420 if os.path.islink(fname):
419 for hgname in walkrepos(fname, True, seen_dirs):
421 for hgname in walkrepos(fname, True, seen_dirs):
420 yield hgname
422 yield hgname
421 else:
423 else:
422 newdirs.append(d)
424 newdirs.append(d)
423 dirs[:] = newdirs
425 dirs[:] = newdirs
424
426
425
427
426 def binnode(ctx):
428 def binnode(ctx):
427 """Return binary node id for a given basectx"""
429 """Return binary node id for a given basectx"""
428 node = ctx.node()
430 node = ctx.node()
429 if node is None:
431 if node is None:
430 return wdirid
432 return wdirid
431 return node
433 return node
432
434
433
435
434 def intrev(ctx):
436 def intrev(ctx):
435 """Return integer for a given basectx that can be used in comparison or
437 """Return integer for a given basectx that can be used in comparison or
436 arithmetic operation"""
438 arithmetic operation"""
437 rev = ctx.rev()
439 rev = ctx.rev()
438 if rev is None:
440 if rev is None:
439 return wdirrev
441 return wdirrev
440 return rev
442 return rev
441
443
442
444
443 def formatchangeid(ctx):
445 def formatchangeid(ctx):
444 """Format changectx as '{rev}:{node|formatnode}', which is the default
446 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 template provided by logcmdutil.changesettemplater"""
447 template provided by logcmdutil.changesettemplater"""
446 repo = ctx.repo()
448 repo = ctx.repo()
447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
449 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448
450
449
451
450 def formatrevnode(ui, rev, node):
452 def formatrevnode(ui, rev, node):
451 """Format given revision and node depending on the current verbosity"""
453 """Format given revision and node depending on the current verbosity"""
452 if ui.debugflag:
454 if ui.debugflag:
453 hexfunc = hex
455 hexfunc = hex
454 else:
456 else:
455 hexfunc = short
457 hexfunc = short
456 return b'%d:%s' % (rev, hexfunc(node))
458 return b'%d:%s' % (rev, hexfunc(node))
457
459
458
460
459 def resolvehexnodeidprefix(repo, prefix):
461 def resolvehexnodeidprefix(repo, prefix):
460 if prefix.startswith(b'x'):
462 if prefix.startswith(b'x'):
461 prefix = prefix[1:]
463 prefix = prefix[1:]
462 try:
464 try:
463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
465 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 # This matches the shortesthexnodeidprefix() function below.
466 # This matches the shortesthexnodeidprefix() function below.
465 node = repo.unfiltered().changelog._partialmatch(prefix)
467 node = repo.unfiltered().changelog._partialmatch(prefix)
466 except error.AmbiguousPrefixLookupError:
468 except error.AmbiguousPrefixLookupError:
467 revset = repo.ui.config(
469 revset = repo.ui.config(
468 b'experimental', b'revisions.disambiguatewithin'
470 b'experimental', b'revisions.disambiguatewithin'
469 )
471 )
470 if revset:
472 if revset:
471 # Clear config to avoid infinite recursion
473 # Clear config to avoid infinite recursion
472 configoverrides = {
474 configoverrides = {
473 (b'experimental', b'revisions.disambiguatewithin'): None
475 (b'experimental', b'revisions.disambiguatewithin'): None
474 }
476 }
475 with repo.ui.configoverride(configoverrides):
477 with repo.ui.configoverride(configoverrides):
476 revs = repo.anyrevs([revset], user=True)
478 revs = repo.anyrevs([revset], user=True)
477 matches = []
479 matches = []
478 for rev in revs:
480 for rev in revs:
479 node = repo.changelog.node(rev)
481 node = repo.changelog.node(rev)
480 if hex(node).startswith(prefix):
482 if hex(node).startswith(prefix):
481 matches.append(node)
483 matches.append(node)
482 if len(matches) == 1:
484 if len(matches) == 1:
483 return matches[0]
485 return matches[0]
484 raise
486 raise
485 if node is None:
487 if node is None:
486 return
488 return
487 repo.changelog.rev(node) # make sure node isn't filtered
489 repo.changelog.rev(node) # make sure node isn't filtered
488 return node
490 return node
489
491
490
492
491 def mayberevnum(repo, prefix):
493 def mayberevnum(repo, prefix):
492 """Checks if the given prefix may be mistaken for a revision number"""
494 """Checks if the given prefix may be mistaken for a revision number"""
493 try:
495 try:
494 i = int(prefix)
496 i = int(prefix)
495 # if we are a pure int, then starting with zero will not be
497 # if we are a pure int, then starting with zero will not be
496 # confused as a rev; or, obviously, if the int is larger
498 # confused as a rev; or, obviously, if the int is larger
497 # than the value of the tip rev. We still need to disambiguate if
499 # than the value of the tip rev. We still need to disambiguate if
498 # prefix == '0', since that *is* a valid revnum.
500 # prefix == '0', since that *is* a valid revnum.
499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
501 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 return False
502 return False
501 return True
503 return True
502 except ValueError:
504 except ValueError:
503 return False
505 return False
504
506
505
507
506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
508 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 """Find the shortest unambiguous prefix that matches hexnode.
509 """Find the shortest unambiguous prefix that matches hexnode.
508
510
509 If "cache" is not None, it must be a dictionary that can be used for
511 If "cache" is not None, it must be a dictionary that can be used for
510 caching between calls to this method.
512 caching between calls to this method.
511 """
513 """
512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
514 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 # which would be unacceptably slow. so we look for hash collision in
515 # which would be unacceptably slow. so we look for hash collision in
514 # unfiltered space, which means some hashes may be slightly longer.
516 # unfiltered space, which means some hashes may be slightly longer.
515
517
516 minlength = max(minlength, 1)
518 minlength = max(minlength, 1)
517
519
518 def disambiguate(prefix):
520 def disambiguate(prefix):
519 """Disambiguate against revnums."""
521 """Disambiguate against revnums."""
520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
522 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 if mayberevnum(repo, prefix):
523 if mayberevnum(repo, prefix):
522 return b'x' + prefix
524 return b'x' + prefix
523 else:
525 else:
524 return prefix
526 return prefix
525
527
526 hexnode = hex(node)
528 hexnode = hex(node)
527 for length in range(len(prefix), len(hexnode) + 1):
529 for length in range(len(prefix), len(hexnode) + 1):
528 prefix = hexnode[:length]
530 prefix = hexnode[:length]
529 if not mayberevnum(repo, prefix):
531 if not mayberevnum(repo, prefix):
530 return prefix
532 return prefix
531
533
532 cl = repo.unfiltered().changelog
534 cl = repo.unfiltered().changelog
533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
535 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 if revset:
536 if revset:
535 revs = None
537 revs = None
536 if cache is not None:
538 if cache is not None:
537 revs = cache.get(b'disambiguationrevset')
539 revs = cache.get(b'disambiguationrevset')
538 if revs is None:
540 if revs is None:
539 revs = repo.anyrevs([revset], user=True)
541 revs = repo.anyrevs([revset], user=True)
540 if cache is not None:
542 if cache is not None:
541 cache[b'disambiguationrevset'] = revs
543 cache[b'disambiguationrevset'] = revs
542 if cl.rev(node) in revs:
544 if cl.rev(node) in revs:
543 hexnode = hex(node)
545 hexnode = hex(node)
544 nodetree = None
546 nodetree = None
545 if cache is not None:
547 if cache is not None:
546 nodetree = cache.get(b'disambiguationnodetree')
548 nodetree = cache.get(b'disambiguationnodetree')
547 if not nodetree:
549 if not nodetree:
548 if util.safehasattr(parsers, 'nodetree'):
550 if util.safehasattr(parsers, 'nodetree'):
549 # The CExt is the only implementation to provide a nodetree
551 # The CExt is the only implementation to provide a nodetree
550 # class so far.
552 # class so far.
551 index = cl.index
553 index = cl.index
552 if util.safehasattr(index, 'get_cindex'):
554 if util.safehasattr(index, 'get_cindex'):
553 # the rust wrapped need to give access to its internal index
555 # the rust wrapped need to give access to its internal index
554 index = index.get_cindex()
556 index = index.get_cindex()
555 nodetree = parsers.nodetree(index, len(revs))
557 nodetree = parsers.nodetree(index, len(revs))
556 for r in revs:
558 for r in revs:
557 nodetree.insert(r)
559 nodetree.insert(r)
558 if cache is not None:
560 if cache is not None:
559 cache[b'disambiguationnodetree'] = nodetree
561 cache[b'disambiguationnodetree'] = nodetree
560 if nodetree is not None:
562 if nodetree is not None:
561 length = max(nodetree.shortest(node), minlength)
563 length = max(nodetree.shortest(node), minlength)
562 prefix = hexnode[:length]
564 prefix = hexnode[:length]
563 return disambiguate(prefix)
565 return disambiguate(prefix)
564 for length in range(minlength, len(hexnode) + 1):
566 for length in range(minlength, len(hexnode) + 1):
565 matches = []
567 matches = []
566 prefix = hexnode[:length]
568 prefix = hexnode[:length]
567 for rev in revs:
569 for rev in revs:
568 otherhexnode = repo[rev].hex()
570 otherhexnode = repo[rev].hex()
569 if prefix == otherhexnode[:length]:
571 if prefix == otherhexnode[:length]:
570 matches.append(otherhexnode)
572 matches.append(otherhexnode)
571 if len(matches) == 1:
573 if len(matches) == 1:
572 return disambiguate(prefix)
574 return disambiguate(prefix)
573
575
574 try:
576 try:
575 return disambiguate(cl.shortest(node, minlength))
577 return disambiguate(cl.shortest(node, minlength))
576 except error.LookupError:
578 except error.LookupError:
577 raise error.RepoLookupError()
579 raise error.RepoLookupError()
578
580
579
581
580 def isrevsymbol(repo, symbol):
582 def isrevsymbol(repo, symbol):
581 """Checks if a symbol exists in the repo.
583 """Checks if a symbol exists in the repo.
582
584
583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
585 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 symbol is an ambiguous nodeid prefix.
586 symbol is an ambiguous nodeid prefix.
585 """
587 """
586 try:
588 try:
587 revsymbol(repo, symbol)
589 revsymbol(repo, symbol)
588 return True
590 return True
589 except error.RepoLookupError:
591 except error.RepoLookupError:
590 return False
592 return False
591
593
592
594
593 def revsymbol(repo, symbol):
595 def revsymbol(repo, symbol):
594 """Returns a context given a single revision symbol (as string).
596 """Returns a context given a single revision symbol (as string).
595
597
596 This is similar to revsingle(), but accepts only a single revision symbol,
598 This is similar to revsingle(), but accepts only a single revision symbol,
597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
599 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 not "max(public())".
600 not "max(public())".
599 """
601 """
600 if not isinstance(symbol, bytes):
602 if not isinstance(symbol, bytes):
601 msg = (
603 msg = (
602 b"symbol (%s of type %s) was not a string, did you mean "
604 b"symbol (%s of type %s) was not a string, did you mean "
603 b"repo[symbol]?" % (symbol, type(symbol))
605 b"repo[symbol]?" % (symbol, type(symbol))
604 )
606 )
605 raise error.ProgrammingError(msg)
607 raise error.ProgrammingError(msg)
606 try:
608 try:
607 if symbol in (b'.', b'tip', b'null'):
609 if symbol in (b'.', b'tip', b'null'):
608 return repo[symbol]
610 return repo[symbol]
609
611
610 try:
612 try:
611 r = int(symbol)
613 r = int(symbol)
612 if b'%d' % r != symbol:
614 if b'%d' % r != symbol:
613 raise ValueError
615 raise ValueError
614 l = len(repo.changelog)
616 l = len(repo.changelog)
615 if r < 0:
617 if r < 0:
616 r += l
618 r += l
617 if r < 0 or r >= l and r != wdirrev:
619 if r < 0 or r >= l and r != wdirrev:
618 raise ValueError
620 raise ValueError
619 return repo[r]
621 return repo[r]
620 except error.FilteredIndexError:
622 except error.FilteredIndexError:
621 raise
623 raise
622 except (ValueError, OverflowError, IndexError):
624 except (ValueError, OverflowError, IndexError):
623 pass
625 pass
624
626
625 if len(symbol) == 40:
627 if len(symbol) == 40:
626 try:
628 try:
627 node = bin(symbol)
629 node = bin(symbol)
628 rev = repo.changelog.rev(node)
630 rev = repo.changelog.rev(node)
629 return repo[rev]
631 return repo[rev]
630 except error.FilteredLookupError:
632 except error.FilteredLookupError:
631 raise
633 raise
632 except (TypeError, LookupError):
634 except (TypeError, LookupError):
633 pass
635 pass
634
636
635 # look up bookmarks through the name interface
637 # look up bookmarks through the name interface
636 try:
638 try:
637 node = repo.names.singlenode(repo, symbol)
639 node = repo.names.singlenode(repo, symbol)
638 rev = repo.changelog.rev(node)
640 rev = repo.changelog.rev(node)
639 return repo[rev]
641 return repo[rev]
640 except KeyError:
642 except KeyError:
641 pass
643 pass
642
644
643 node = resolvehexnodeidprefix(repo, symbol)
645 node = resolvehexnodeidprefix(repo, symbol)
644 if node is not None:
646 if node is not None:
645 rev = repo.changelog.rev(node)
647 rev = repo.changelog.rev(node)
646 return repo[rev]
648 return repo[rev]
647
649
648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
650 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649
651
650 except error.WdirUnsupported:
652 except error.WdirUnsupported:
651 return repo[None]
653 return repo[None]
652 except (
654 except (
653 error.FilteredIndexError,
655 error.FilteredIndexError,
654 error.FilteredLookupError,
656 error.FilteredLookupError,
655 error.FilteredRepoLookupError,
657 error.FilteredRepoLookupError,
656 ):
658 ):
657 raise _filterederror(repo, symbol)
659 raise _filterederror(repo, symbol)
658
660
659
661
660 def _filterederror(repo, changeid):
662 def _filterederror(repo, changeid):
661 """build an exception to be raised about a filtered changeid
663 """build an exception to be raised about a filtered changeid
662
664
663 This is extracted in a function to help extensions (eg: evolve) to
665 This is extracted in a function to help extensions (eg: evolve) to
664 experiment with various message variants."""
666 experiment with various message variants."""
665 if repo.filtername.startswith(b'visible'):
667 if repo.filtername.startswith(b'visible'):
666
668
667 # Check if the changeset is obsolete
669 # Check if the changeset is obsolete
668 unfilteredrepo = repo.unfiltered()
670 unfilteredrepo = repo.unfiltered()
669 ctx = revsymbol(unfilteredrepo, changeid)
671 ctx = revsymbol(unfilteredrepo, changeid)
670
672
671 # If the changeset is obsolete, enrich the message with the reason
673 # If the changeset is obsolete, enrich the message with the reason
672 # that made this changeset not visible
674 # that made this changeset not visible
673 if ctx.obsolete():
675 if ctx.obsolete():
674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
676 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 else:
677 else:
676 msg = _(b"hidden revision '%s'") % changeid
678 msg = _(b"hidden revision '%s'") % changeid
677
679
678 hint = _(b'use --hidden to access hidden revisions')
680 hint = _(b'use --hidden to access hidden revisions')
679
681
680 return error.FilteredRepoLookupError(msg, hint=hint)
682 return error.FilteredRepoLookupError(msg, hint=hint)
681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
683 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 msg %= (changeid, repo.filtername)
684 msg %= (changeid, repo.filtername)
683 return error.FilteredRepoLookupError(msg)
685 return error.FilteredRepoLookupError(msg)
684
686
685
687
686 def revsingle(repo, revspec, default=b'.', localalias=None):
688 def revsingle(repo, revspec, default=b'.', localalias=None):
687 if not revspec and revspec != 0:
689 if not revspec and revspec != 0:
688 return repo[default]
690 return repo[default]
689
691
690 l = revrange(repo, [revspec], localalias=localalias)
692 l = revrange(repo, [revspec], localalias=localalias)
691 if not l:
693 if not l:
692 raise error.Abort(_(b'empty revision set'))
694 raise error.Abort(_(b'empty revision set'))
693 return repo[l.last()]
695 return repo[l.last()]
694
696
695
697
696 def _pairspec(revspec):
698 def _pairspec(revspec):
697 tree = revsetlang.parse(revspec)
699 tree = revsetlang.parse(revspec)
698 return tree and tree[0] in (
700 return tree and tree[0] in (
699 b'range',
701 b'range',
700 b'rangepre',
702 b'rangepre',
701 b'rangepost',
703 b'rangepost',
702 b'rangeall',
704 b'rangeall',
703 )
705 )
704
706
705
707
706 def revpair(repo, revs):
708 def revpair(repo, revs):
707 if not revs:
709 if not revs:
708 return repo[b'.'], repo[None]
710 return repo[b'.'], repo[None]
709
711
710 l = revrange(repo, revs)
712 l = revrange(repo, revs)
711
713
712 if not l:
714 if not l:
713 raise error.Abort(_(b'empty revision range'))
715 raise error.Abort(_(b'empty revision range'))
714
716
715 first = l.first()
717 first = l.first()
716 second = l.last()
718 second = l.last()
717
719
718 if (
720 if (
719 first == second
721 first == second
720 and len(revs) >= 2
722 and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)
723 and not all(revrange(repo, [r]) for r in revs)
722 ):
724 ):
723 raise error.Abort(_(b'empty revision on one side of range'))
725 raise error.Abort(_(b'empty revision on one side of range'))
724
726
725 # if top-level is range expression, the result must always be a pair
727 # if top-level is range expression, the result must always be a pair
726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
728 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 return repo[first], repo[None]
729 return repo[first], repo[None]
728
730
729 return repo[first], repo[second]
731 return repo[first], repo[second]
730
732
731
733
732 def revrange(repo, specs, localalias=None):
734 def revrange(repo, specs, localalias=None):
733 """Execute 1 to many revsets and return the union.
735 """Execute 1 to many revsets and return the union.
734
736
735 This is the preferred mechanism for executing revsets using user-specified
737 This is the preferred mechanism for executing revsets using user-specified
736 config options, such as revset aliases.
738 config options, such as revset aliases.
737
739
738 The revsets specified by ``specs`` will be executed via a chained ``OR``
740 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 expression. If ``specs`` is empty, an empty result is returned.
741 expression. If ``specs`` is empty, an empty result is returned.
740
742
741 ``specs`` can contain integers, in which case they are assumed to be
743 ``specs`` can contain integers, in which case they are assumed to be
742 revision numbers.
744 revision numbers.
743
745
744 It is assumed the revsets are already formatted. If you have arguments
746 It is assumed the revsets are already formatted. If you have arguments
745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
747 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 and pass the result as an element of ``specs``.
748 and pass the result as an element of ``specs``.
747
749
748 Specifying a single revset is allowed.
750 Specifying a single revset is allowed.
749
751
750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
752 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 integer revisions.
753 integer revisions.
752 """
754 """
753 allspecs = []
755 allspecs = []
754 for spec in specs:
756 for spec in specs:
755 if isinstance(spec, int):
757 if isinstance(spec, int):
756 spec = revsetlang.formatspec(b'%d', spec)
758 spec = revsetlang.formatspec(b'%d', spec)
757 allspecs.append(spec)
759 allspecs.append(spec)
758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
760 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759
761
760
762
761 def meaningfulparents(repo, ctx):
763 def meaningfulparents(repo, ctx):
762 """Return list of meaningful (or all if debug) parentrevs for rev.
764 """Return list of meaningful (or all if debug) parentrevs for rev.
763
765
764 For merges (two non-nullrev revisions) both parents are meaningful.
766 For merges (two non-nullrev revisions) both parents are meaningful.
765 Otherwise the first parent revision is considered meaningful if it
767 Otherwise the first parent revision is considered meaningful if it
766 is not the preceding revision.
768 is not the preceding revision.
767 """
769 """
768 parents = ctx.parents()
770 parents = ctx.parents()
769 if len(parents) > 1:
771 if len(parents) > 1:
770 return parents
772 return parents
771 if repo.ui.debugflag:
773 if repo.ui.debugflag:
772 return [parents[0], repo[nullrev]]
774 return [parents[0], repo[nullrev]]
773 if parents[0].rev() >= intrev(ctx) - 1:
775 if parents[0].rev() >= intrev(ctx) - 1:
774 return []
776 return []
775 return parents
777 return parents
776
778
777
779
778 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
780 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
779 """Return a function that produced paths for presenting to the user.
781 """Return a function that produced paths for presenting to the user.
780
782
781 The returned function takes a repo-relative path and produces a path
783 The returned function takes a repo-relative path and produces a path
782 that can be presented in the UI.
784 that can be presented in the UI.
783
785
784 Depending on the value of ui.relative-paths, either a repo-relative or
786 Depending on the value of ui.relative-paths, either a repo-relative or
785 cwd-relative path will be produced.
787 cwd-relative path will be produced.
786
788
787 legacyrelativevalue is the value to use if ui.relative-paths=legacy
789 legacyrelativevalue is the value to use if ui.relative-paths=legacy
788
790
789 If forcerelativevalue is not None, then that value will be used regardless
791 If forcerelativevalue is not None, then that value will be used regardless
790 of what ui.relative-paths is set to.
792 of what ui.relative-paths is set to.
791 """
793 """
792 if forcerelativevalue is not None:
794 if forcerelativevalue is not None:
793 relative = forcerelativevalue
795 relative = forcerelativevalue
794 else:
796 else:
795 config = repo.ui.config(b'ui', b'relative-paths')
797 config = repo.ui.config(b'ui', b'relative-paths')
796 if config == b'legacy':
798 if config == b'legacy':
797 relative = legacyrelativevalue
799 relative = legacyrelativevalue
798 else:
800 else:
799 relative = stringutil.parsebool(config)
801 relative = stringutil.parsebool(config)
800 if relative is None:
802 if relative is None:
801 raise error.ConfigError(
803 raise error.ConfigError(
802 _(b"ui.relative-paths is not a boolean ('%s')") % config
804 _(b"ui.relative-paths is not a boolean ('%s')") % config
803 )
805 )
804
806
805 if relative:
807 if relative:
806 cwd = repo.getcwd()
808 cwd = repo.getcwd()
807 if cwd != b'':
809 if cwd != b'':
808 # this branch would work even if cwd == b'' (ie cwd = repo
810 # this branch would work even if cwd == b'' (ie cwd = repo
809 # root), but its generality makes the returned function slower
811 # root), but its generality makes the returned function slower
810 pathto = repo.pathto
812 pathto = repo.pathto
811 return lambda f: pathto(f, cwd)
813 return lambda f: pathto(f, cwd)
812 if repo.ui.configbool(b'ui', b'slash'):
814 if repo.ui.configbool(b'ui', b'slash'):
813 return lambda f: f
815 return lambda f: f
814 else:
816 else:
815 return util.localpath
817 return util.localpath
816
818
817
819
818 def subdiruipathfn(subpath, uipathfn):
820 def subdiruipathfn(subpath, uipathfn):
819 '''Create a new uipathfn that treats the file as relative to subpath.'''
821 '''Create a new uipathfn that treats the file as relative to subpath.'''
820 return lambda f: uipathfn(posixpath.join(subpath, f))
822 return lambda f: uipathfn(posixpath.join(subpath, f))
821
823
822
824
823 def anypats(pats, opts):
825 def anypats(pats, opts):
824 '''Checks if any patterns, including --include and --exclude were given.
826 '''Checks if any patterns, including --include and --exclude were given.
825
827
826 Some commands (e.g. addremove) use this condition for deciding whether to
828 Some commands (e.g. addremove) use this condition for deciding whether to
827 print absolute or relative paths.
829 print absolute or relative paths.
828 '''
830 '''
829 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
831 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
830
832
831
833
832 def expandpats(pats):
834 def expandpats(pats):
833 '''Expand bare globs when running on windows.
835 '''Expand bare globs when running on windows.
834 On posix we assume it already has already been done by sh.'''
836 On posix we assume it already has already been done by sh.'''
835 if not util.expandglobs:
837 if not util.expandglobs:
836 return list(pats)
838 return list(pats)
837 ret = []
839 ret = []
838 for kindpat in pats:
840 for kindpat in pats:
839 kind, pat = matchmod._patsplit(kindpat, None)
841 kind, pat = matchmod._patsplit(kindpat, None)
840 if kind is None:
842 if kind is None:
841 try:
843 try:
842 globbed = glob.glob(pat)
844 globbed = glob.glob(pat)
843 except re.error:
845 except re.error:
844 globbed = [pat]
846 globbed = [pat]
845 if globbed:
847 if globbed:
846 ret.extend(globbed)
848 ret.extend(globbed)
847 continue
849 continue
848 ret.append(kindpat)
850 ret.append(kindpat)
849 return ret
851 return ret
850
852
851
853
852 def matchandpats(
854 def matchandpats(
853 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
855 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
854 ):
856 ):
855 '''Return a matcher and the patterns that were used.
857 '''Return a matcher and the patterns that were used.
856 The matcher will warn about bad matches, unless an alternate badfn callback
858 The matcher will warn about bad matches, unless an alternate badfn callback
857 is provided.'''
859 is provided.'''
858 if opts is None:
860 if opts is None:
859 opts = {}
861 opts = {}
860 if not globbed and default == b'relpath':
862 if not globbed and default == b'relpath':
861 pats = expandpats(pats or [])
863 pats = expandpats(pats or [])
862
864
863 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
865 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
864
866
865 def bad(f, msg):
867 def bad(f, msg):
866 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
868 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
867
869
868 if badfn is None:
870 if badfn is None:
869 badfn = bad
871 badfn = bad
870
872
871 m = ctx.match(
873 m = ctx.match(
872 pats,
874 pats,
873 opts.get(b'include'),
875 opts.get(b'include'),
874 opts.get(b'exclude'),
876 opts.get(b'exclude'),
875 default,
877 default,
876 listsubrepos=opts.get(b'subrepos'),
878 listsubrepos=opts.get(b'subrepos'),
877 badfn=badfn,
879 badfn=badfn,
878 )
880 )
879
881
880 if m.always():
882 if m.always():
881 pats = []
883 pats = []
882 return m, pats
884 return m, pats
883
885
884
886
885 def match(
887 def match(
886 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
888 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
887 ):
889 ):
888 '''Return a matcher that will warn about bad matches.'''
890 '''Return a matcher that will warn about bad matches.'''
889 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
891 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
890
892
891
893
892 def matchall(repo):
894 def matchall(repo):
893 '''Return a matcher that will efficiently match everything.'''
895 '''Return a matcher that will efficiently match everything.'''
894 return matchmod.always()
896 return matchmod.always()
895
897
896
898
897 def matchfiles(repo, files, badfn=None):
899 def matchfiles(repo, files, badfn=None):
898 '''Return a matcher that will efficiently match exactly these files.'''
900 '''Return a matcher that will efficiently match exactly these files.'''
899 return matchmod.exact(files, badfn=badfn)
901 return matchmod.exact(files, badfn=badfn)
900
902
901
903
902 def parsefollowlinespattern(repo, rev, pat, msg):
904 def parsefollowlinespattern(repo, rev, pat, msg):
903 """Return a file name from `pat` pattern suitable for usage in followlines
905 """Return a file name from `pat` pattern suitable for usage in followlines
904 logic.
906 logic.
905 """
907 """
906 if not matchmod.patkind(pat):
908 if not matchmod.patkind(pat):
907 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
909 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
908 else:
910 else:
909 ctx = repo[rev]
911 ctx = repo[rev]
910 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
912 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
911 files = [f for f in ctx if m(f)]
913 files = [f for f in ctx if m(f)]
912 if len(files) != 1:
914 if len(files) != 1:
913 raise error.ParseError(msg)
915 raise error.ParseError(msg)
914 return files[0]
916 return files[0]
915
917
916
918
917 def getorigvfs(ui, repo):
919 def getorigvfs(ui, repo):
918 """return a vfs suitable to save 'orig' file
920 """return a vfs suitable to save 'orig' file
919
921
920 return None if no special directory is configured"""
922 return None if no special directory is configured"""
921 origbackuppath = ui.config(b'ui', b'origbackuppath')
923 origbackuppath = ui.config(b'ui', b'origbackuppath')
922 if not origbackuppath:
924 if not origbackuppath:
923 return None
925 return None
924 return vfs.vfs(repo.wvfs.join(origbackuppath))
926 return vfs.vfs(repo.wvfs.join(origbackuppath))
925
927
926
928
927 def backuppath(ui, repo, filepath):
929 def backuppath(ui, repo, filepath):
928 '''customize where working copy backup files (.orig files) are created
930 '''customize where working copy backup files (.orig files) are created
929
931
930 Fetch user defined path from config file: [ui] origbackuppath = <path>
932 Fetch user defined path from config file: [ui] origbackuppath = <path>
931 Fall back to default (filepath with .orig suffix) if not specified
933 Fall back to default (filepath with .orig suffix) if not specified
932
934
933 filepath is repo-relative
935 filepath is repo-relative
934
936
935 Returns an absolute path
937 Returns an absolute path
936 '''
938 '''
937 origvfs = getorigvfs(ui, repo)
939 origvfs = getorigvfs(ui, repo)
938 if origvfs is None:
940 if origvfs is None:
939 return repo.wjoin(filepath + b".orig")
941 return repo.wjoin(filepath + b".orig")
940
942
941 origbackupdir = origvfs.dirname(filepath)
943 origbackupdir = origvfs.dirname(filepath)
942 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
944 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
943 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
945 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
944
946
945 # Remove any files that conflict with the backup file's path
947 # Remove any files that conflict with the backup file's path
946 for f in reversed(list(pathutil.finddirs(filepath))):
948 for f in reversed(list(pathutil.finddirs(filepath))):
947 if origvfs.isfileorlink(f):
949 if origvfs.isfileorlink(f):
948 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
950 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
949 origvfs.unlink(f)
951 origvfs.unlink(f)
950 break
952 break
951
953
952 origvfs.makedirs(origbackupdir)
954 origvfs.makedirs(origbackupdir)
953
955
954 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
956 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
955 ui.note(
957 ui.note(
956 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
958 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
957 )
959 )
958 origvfs.rmtree(filepath, forcibly=True)
960 origvfs.rmtree(filepath, forcibly=True)
959
961
960 return origvfs.join(filepath)
962 return origvfs.join(filepath)
961
963
962
964
963 class _containsnode(object):
965 class _containsnode(object):
964 """proxy __contains__(node) to container.__contains__ which accepts revs"""
966 """proxy __contains__(node) to container.__contains__ which accepts revs"""
965
967
966 def __init__(self, repo, revcontainer):
968 def __init__(self, repo, revcontainer):
967 self._torev = repo.changelog.rev
969 self._torev = repo.changelog.rev
968 self._revcontains = revcontainer.__contains__
970 self._revcontains = revcontainer.__contains__
969
971
970 def __contains__(self, node):
972 def __contains__(self, node):
971 return self._revcontains(self._torev(node))
973 return self._revcontains(self._torev(node))
972
974
973
975
974 def cleanupnodes(
976 def cleanupnodes(
975 repo,
977 repo,
976 replacements,
978 replacements,
977 operation,
979 operation,
978 moves=None,
980 moves=None,
979 metadata=None,
981 metadata=None,
980 fixphase=False,
982 fixphase=False,
981 targetphase=None,
983 targetphase=None,
982 backup=True,
984 backup=True,
983 ):
985 ):
984 """do common cleanups when old nodes are replaced by new nodes
986 """do common cleanups when old nodes are replaced by new nodes
985
987
986 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
988 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
987 (we might also want to move working directory parent in the future)
989 (we might also want to move working directory parent in the future)
988
990
989 By default, bookmark moves are calculated automatically from 'replacements',
991 By default, bookmark moves are calculated automatically from 'replacements',
990 but 'moves' can be used to override that. Also, 'moves' may include
992 but 'moves' can be used to override that. Also, 'moves' may include
991 additional bookmark moves that should not have associated obsmarkers.
993 additional bookmark moves that should not have associated obsmarkers.
992
994
993 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
995 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
994 have replacements. operation is a string, like "rebase".
996 have replacements. operation is a string, like "rebase".
995
997
996 metadata is dictionary containing metadata to be stored in obsmarker if
998 metadata is dictionary containing metadata to be stored in obsmarker if
997 obsolescence is enabled.
999 obsolescence is enabled.
998 """
1000 """
999 assert fixphase or targetphase is None
1001 assert fixphase or targetphase is None
1000 if not replacements and not moves:
1002 if not replacements and not moves:
1001 return
1003 return
1002
1004
1003 # translate mapping's other forms
1005 # translate mapping's other forms
1004 if not util.safehasattr(replacements, b'items'):
1006 if not util.safehasattr(replacements, b'items'):
1005 replacements = {(n,): () for n in replacements}
1007 replacements = {(n,): () for n in replacements}
1006 else:
1008 else:
1007 # upgrading non tuple "source" to tuple ones for BC
1009 # upgrading non tuple "source" to tuple ones for BC
1008 repls = {}
1010 repls = {}
1009 for key, value in replacements.items():
1011 for key, value in replacements.items():
1010 if not isinstance(key, tuple):
1012 if not isinstance(key, tuple):
1011 key = (key,)
1013 key = (key,)
1012 repls[key] = value
1014 repls[key] = value
1013 replacements = repls
1015 replacements = repls
1014
1016
1015 # Unfiltered repo is needed since nodes in replacements might be hidden.
1017 # Unfiltered repo is needed since nodes in replacements might be hidden.
1016 unfi = repo.unfiltered()
1018 unfi = repo.unfiltered()
1017
1019
1018 # Calculate bookmark movements
1020 # Calculate bookmark movements
1019 if moves is None:
1021 if moves is None:
1020 moves = {}
1022 moves = {}
1021 for oldnodes, newnodes in replacements.items():
1023 for oldnodes, newnodes in replacements.items():
1022 for oldnode in oldnodes:
1024 for oldnode in oldnodes:
1023 if oldnode in moves:
1025 if oldnode in moves:
1024 continue
1026 continue
1025 if len(newnodes) > 1:
1027 if len(newnodes) > 1:
1026 # usually a split, take the one with biggest rev number
1028 # usually a split, take the one with biggest rev number
1027 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1029 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1028 elif len(newnodes) == 0:
1030 elif len(newnodes) == 0:
1029 # move bookmark backwards
1031 # move bookmark backwards
1030 allreplaced = []
1032 allreplaced = []
1031 for rep in replacements:
1033 for rep in replacements:
1032 allreplaced.extend(rep)
1034 allreplaced.extend(rep)
1033 roots = list(
1035 roots = list(
1034 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1036 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1035 )
1037 )
1036 if roots:
1038 if roots:
1037 newnode = roots[0].node()
1039 newnode = roots[0].node()
1038 else:
1040 else:
1039 newnode = nullid
1041 newnode = nullid
1040 else:
1042 else:
1041 newnode = newnodes[0]
1043 newnode = newnodes[0]
1042 moves[oldnode] = newnode
1044 moves[oldnode] = newnode
1043
1045
1044 allnewnodes = [n for ns in replacements.values() for n in ns]
1046 allnewnodes = [n for ns in replacements.values() for n in ns]
1045 toretract = {}
1047 toretract = {}
1046 toadvance = {}
1048 toadvance = {}
1047 if fixphase:
1049 if fixphase:
1048 precursors = {}
1050 precursors = {}
1049 for oldnodes, newnodes in replacements.items():
1051 for oldnodes, newnodes in replacements.items():
1050 for oldnode in oldnodes:
1052 for oldnode in oldnodes:
1051 for newnode in newnodes:
1053 for newnode in newnodes:
1052 precursors.setdefault(newnode, []).append(oldnode)
1054 precursors.setdefault(newnode, []).append(oldnode)
1053
1055
1054 allnewnodes.sort(key=lambda n: unfi[n].rev())
1056 allnewnodes.sort(key=lambda n: unfi[n].rev())
1055 newphases = {}
1057 newphases = {}
1056
1058
1057 def phase(ctx):
1059 def phase(ctx):
1058 return newphases.get(ctx.node(), ctx.phase())
1060 return newphases.get(ctx.node(), ctx.phase())
1059
1061
1060 for newnode in allnewnodes:
1062 for newnode in allnewnodes:
1061 ctx = unfi[newnode]
1063 ctx = unfi[newnode]
1062 parentphase = max(phase(p) for p in ctx.parents())
1064 parentphase = max(phase(p) for p in ctx.parents())
1063 if targetphase is None:
1065 if targetphase is None:
1064 oldphase = max(
1066 oldphase = max(
1065 unfi[oldnode].phase() for oldnode in precursors[newnode]
1067 unfi[oldnode].phase() for oldnode in precursors[newnode]
1066 )
1068 )
1067 newphase = max(oldphase, parentphase)
1069 newphase = max(oldphase, parentphase)
1068 else:
1070 else:
1069 newphase = max(targetphase, parentphase)
1071 newphase = max(targetphase, parentphase)
1070 newphases[newnode] = newphase
1072 newphases[newnode] = newphase
1071 if newphase > ctx.phase():
1073 if newphase > ctx.phase():
1072 toretract.setdefault(newphase, []).append(newnode)
1074 toretract.setdefault(newphase, []).append(newnode)
1073 elif newphase < ctx.phase():
1075 elif newphase < ctx.phase():
1074 toadvance.setdefault(newphase, []).append(newnode)
1076 toadvance.setdefault(newphase, []).append(newnode)
1075
1077
1076 with repo.transaction(b'cleanup') as tr:
1078 with repo.transaction(b'cleanup') as tr:
1077 # Move bookmarks
1079 # Move bookmarks
1078 bmarks = repo._bookmarks
1080 bmarks = repo._bookmarks
1079 bmarkchanges = []
1081 bmarkchanges = []
1080 for oldnode, newnode in moves.items():
1082 for oldnode, newnode in moves.items():
1081 oldbmarks = repo.nodebookmarks(oldnode)
1083 oldbmarks = repo.nodebookmarks(oldnode)
1082 if not oldbmarks:
1084 if not oldbmarks:
1083 continue
1085 continue
1084 from . import bookmarks # avoid import cycle
1086 from . import bookmarks # avoid import cycle
1085
1087
1086 repo.ui.debug(
1088 repo.ui.debug(
1087 b'moving bookmarks %r from %s to %s\n'
1089 b'moving bookmarks %r from %s to %s\n'
1088 % (
1090 % (
1089 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1091 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1090 hex(oldnode),
1092 hex(oldnode),
1091 hex(newnode),
1093 hex(newnode),
1092 )
1094 )
1093 )
1095 )
1094 # Delete divergent bookmarks being parents of related newnodes
1096 # Delete divergent bookmarks being parents of related newnodes
1095 deleterevs = repo.revs(
1097 deleterevs = repo.revs(
1096 b'parents(roots(%ln & (::%n))) - parents(%n)',
1098 b'parents(roots(%ln & (::%n))) - parents(%n)',
1097 allnewnodes,
1099 allnewnodes,
1098 newnode,
1100 newnode,
1099 oldnode,
1101 oldnode,
1100 )
1102 )
1101 deletenodes = _containsnode(repo, deleterevs)
1103 deletenodes = _containsnode(repo, deleterevs)
1102 for name in oldbmarks:
1104 for name in oldbmarks:
1103 bmarkchanges.append((name, newnode))
1105 bmarkchanges.append((name, newnode))
1104 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1106 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1105 bmarkchanges.append((b, None))
1107 bmarkchanges.append((b, None))
1106
1108
1107 if bmarkchanges:
1109 if bmarkchanges:
1108 bmarks.applychanges(repo, tr, bmarkchanges)
1110 bmarks.applychanges(repo, tr, bmarkchanges)
1109
1111
1110 for phase, nodes in toretract.items():
1112 for phase, nodes in toretract.items():
1111 phases.retractboundary(repo, tr, phase, nodes)
1113 phases.retractboundary(repo, tr, phase, nodes)
1112 for phase, nodes in toadvance.items():
1114 for phase, nodes in toadvance.items():
1113 phases.advanceboundary(repo, tr, phase, nodes)
1115 phases.advanceboundary(repo, tr, phase, nodes)
1114
1116
1115 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1117 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1116 # Obsolete or strip nodes
1118 # Obsolete or strip nodes
1117 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1119 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1118 # If a node is already obsoleted, and we want to obsolete it
1120 # If a node is already obsoleted, and we want to obsolete it
1119 # without a successor, skip that obssolete request since it's
1121 # without a successor, skip that obssolete request since it's
1120 # unnecessary. That's the "if s or not isobs(n)" check below.
1122 # unnecessary. That's the "if s or not isobs(n)" check below.
1121 # Also sort the node in topology order, that might be useful for
1123 # Also sort the node in topology order, that might be useful for
1122 # some obsstore logic.
1124 # some obsstore logic.
1123 # NOTE: the sorting might belong to createmarkers.
1125 # NOTE: the sorting might belong to createmarkers.
1124 torev = unfi.changelog.rev
1126 torev = unfi.changelog.rev
1125 sortfunc = lambda ns: torev(ns[0][0])
1127 sortfunc = lambda ns: torev(ns[0][0])
1126 rels = []
1128 rels = []
1127 for ns, s in sorted(replacements.items(), key=sortfunc):
1129 for ns, s in sorted(replacements.items(), key=sortfunc):
1128 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1130 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1129 rels.append(rel)
1131 rels.append(rel)
1130 if rels:
1132 if rels:
1131 obsolete.createmarkers(
1133 obsolete.createmarkers(
1132 repo, rels, operation=operation, metadata=metadata
1134 repo, rels, operation=operation, metadata=metadata
1133 )
1135 )
1134 elif phases.supportinternal(repo) and mayusearchived:
1136 elif phases.supportinternal(repo) and mayusearchived:
1135 # this assume we do not have "unstable" nodes above the cleaned ones
1137 # this assume we do not have "unstable" nodes above the cleaned ones
1136 allreplaced = set()
1138 allreplaced = set()
1137 for ns in replacements.keys():
1139 for ns in replacements.keys():
1138 allreplaced.update(ns)
1140 allreplaced.update(ns)
1139 if backup:
1141 if backup:
1140 from . import repair # avoid import cycle
1142 from . import repair # avoid import cycle
1141
1143
1142 node = min(allreplaced, key=repo.changelog.rev)
1144 node = min(allreplaced, key=repo.changelog.rev)
1143 repair.backupbundle(
1145 repair.backupbundle(
1144 repo, allreplaced, allreplaced, node, operation
1146 repo, allreplaced, allreplaced, node, operation
1145 )
1147 )
1146 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1148 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1147 else:
1149 else:
1148 from . import repair # avoid import cycle
1150 from . import repair # avoid import cycle
1149
1151
1150 tostrip = list(n for ns in replacements for n in ns)
1152 tostrip = list(n for ns in replacements for n in ns)
1151 if tostrip:
1153 if tostrip:
1152 repair.delayedstrip(
1154 repair.delayedstrip(
1153 repo.ui, repo, tostrip, operation, backup=backup
1155 repo.ui, repo, tostrip, operation, backup=backup
1154 )
1156 )
1155
1157
1156
1158
1157 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1159 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1158 if opts is None:
1160 if opts is None:
1159 opts = {}
1161 opts = {}
1160 m = matcher
1162 m = matcher
1161 dry_run = opts.get(b'dry_run')
1163 dry_run = opts.get(b'dry_run')
1162 try:
1164 try:
1163 similarity = float(opts.get(b'similarity') or 0)
1165 similarity = float(opts.get(b'similarity') or 0)
1164 except ValueError:
1166 except ValueError:
1165 raise error.Abort(_(b'similarity must be a number'))
1167 raise error.Abort(_(b'similarity must be a number'))
1166 if similarity < 0 or similarity > 100:
1168 if similarity < 0 or similarity > 100:
1167 raise error.Abort(_(b'similarity must be between 0 and 100'))
1169 raise error.Abort(_(b'similarity must be between 0 and 100'))
1168 similarity /= 100.0
1170 similarity /= 100.0
1169
1171
1170 ret = 0
1172 ret = 0
1171
1173
1172 wctx = repo[None]
1174 wctx = repo[None]
1173 for subpath in sorted(wctx.substate):
1175 for subpath in sorted(wctx.substate):
1174 submatch = matchmod.subdirmatcher(subpath, m)
1176 submatch = matchmod.subdirmatcher(subpath, m)
1175 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1177 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1176 sub = wctx.sub(subpath)
1178 sub = wctx.sub(subpath)
1177 subprefix = repo.wvfs.reljoin(prefix, subpath)
1179 subprefix = repo.wvfs.reljoin(prefix, subpath)
1178 subuipathfn = subdiruipathfn(subpath, uipathfn)
1180 subuipathfn = subdiruipathfn(subpath, uipathfn)
1179 try:
1181 try:
1180 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1182 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1181 ret = 1
1183 ret = 1
1182 except error.LookupError:
1184 except error.LookupError:
1183 repo.ui.status(
1185 repo.ui.status(
1184 _(b"skipping missing subrepository: %s\n")
1186 _(b"skipping missing subrepository: %s\n")
1185 % uipathfn(subpath)
1187 % uipathfn(subpath)
1186 )
1188 )
1187
1189
1188 rejected = []
1190 rejected = []
1189
1191
1190 def badfn(f, msg):
1192 def badfn(f, msg):
1191 if f in m.files():
1193 if f in m.files():
1192 m.bad(f, msg)
1194 m.bad(f, msg)
1193 rejected.append(f)
1195 rejected.append(f)
1194
1196
1195 badmatch = matchmod.badmatch(m, badfn)
1197 badmatch = matchmod.badmatch(m, badfn)
1196 added, unknown, deleted, removed, forgotten = _interestingfiles(
1198 added, unknown, deleted, removed, forgotten = _interestingfiles(
1197 repo, badmatch
1199 repo, badmatch
1198 )
1200 )
1199
1201
1200 unknownset = set(unknown + forgotten)
1202 unknownset = set(unknown + forgotten)
1201 toprint = unknownset.copy()
1203 toprint = unknownset.copy()
1202 toprint.update(deleted)
1204 toprint.update(deleted)
1203 for abs in sorted(toprint):
1205 for abs in sorted(toprint):
1204 if repo.ui.verbose or not m.exact(abs):
1206 if repo.ui.verbose or not m.exact(abs):
1205 if abs in unknownset:
1207 if abs in unknownset:
1206 status = _(b'adding %s\n') % uipathfn(abs)
1208 status = _(b'adding %s\n') % uipathfn(abs)
1207 label = b'ui.addremove.added'
1209 label = b'ui.addremove.added'
1208 else:
1210 else:
1209 status = _(b'removing %s\n') % uipathfn(abs)
1211 status = _(b'removing %s\n') % uipathfn(abs)
1210 label = b'ui.addremove.removed'
1212 label = b'ui.addremove.removed'
1211 repo.ui.status(status, label=label)
1213 repo.ui.status(status, label=label)
1212
1214
1213 renames = _findrenames(
1215 renames = _findrenames(
1214 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1216 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1215 )
1217 )
1216
1218
1217 if not dry_run:
1219 if not dry_run:
1218 _markchanges(repo, unknown + forgotten, deleted, renames)
1220 _markchanges(repo, unknown + forgotten, deleted, renames)
1219
1221
1220 for f in rejected:
1222 for f in rejected:
1221 if f in m.files():
1223 if f in m.files():
1222 return 1
1224 return 1
1223 return ret
1225 return ret
1224
1226
1225
1227
1226 def marktouched(repo, files, similarity=0.0):
1228 def marktouched(repo, files, similarity=0.0):
1227 '''Assert that files have somehow been operated upon. files are relative to
1229 '''Assert that files have somehow been operated upon. files are relative to
1228 the repo root.'''
1230 the repo root.'''
1229 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1231 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1230 rejected = []
1232 rejected = []
1231
1233
1232 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1234 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1233
1235
1234 if repo.ui.verbose:
1236 if repo.ui.verbose:
1235 unknownset = set(unknown + forgotten)
1237 unknownset = set(unknown + forgotten)
1236 toprint = unknownset.copy()
1238 toprint = unknownset.copy()
1237 toprint.update(deleted)
1239 toprint.update(deleted)
1238 for abs in sorted(toprint):
1240 for abs in sorted(toprint):
1239 if abs in unknownset:
1241 if abs in unknownset:
1240 status = _(b'adding %s\n') % abs
1242 status = _(b'adding %s\n') % abs
1241 else:
1243 else:
1242 status = _(b'removing %s\n') % abs
1244 status = _(b'removing %s\n') % abs
1243 repo.ui.status(status)
1245 repo.ui.status(status)
1244
1246
1245 # TODO: We should probably have the caller pass in uipathfn and apply it to
1247 # TODO: We should probably have the caller pass in uipathfn and apply it to
1246 # the messages above too. legacyrelativevalue=True is consistent with how
1248 # the messages above too. legacyrelativevalue=True is consistent with how
1247 # it used to work.
1249 # it used to work.
1248 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1250 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1249 renames = _findrenames(
1251 renames = _findrenames(
1250 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1252 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1251 )
1253 )
1252
1254
1253 _markchanges(repo, unknown + forgotten, deleted, renames)
1255 _markchanges(repo, unknown + forgotten, deleted, renames)
1254
1256
1255 for f in rejected:
1257 for f in rejected:
1256 if f in m.files():
1258 if f in m.files():
1257 return 1
1259 return 1
1258 return 0
1260 return 0
1259
1261
1260
1262
1261 def _interestingfiles(repo, matcher):
1263 def _interestingfiles(repo, matcher):
1262 '''Walk dirstate with matcher, looking for files that addremove would care
1264 '''Walk dirstate with matcher, looking for files that addremove would care
1263 about.
1265 about.
1264
1266
1265 This is different from dirstate.status because it doesn't care about
1267 This is different from dirstate.status because it doesn't care about
1266 whether files are modified or clean.'''
1268 whether files are modified or clean.'''
1267 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1269 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1268 audit_path = pathutil.pathauditor(repo.root, cached=True)
1270 audit_path = pathutil.pathauditor(repo.root, cached=True)
1269
1271
1270 ctx = repo[None]
1272 ctx = repo[None]
1271 dirstate = repo.dirstate
1273 dirstate = repo.dirstate
1272 matcher = repo.narrowmatch(matcher, includeexact=True)
1274 matcher = repo.narrowmatch(matcher, includeexact=True)
1273 walkresults = dirstate.walk(
1275 walkresults = dirstate.walk(
1274 matcher,
1276 matcher,
1275 subrepos=sorted(ctx.substate),
1277 subrepos=sorted(ctx.substate),
1276 unknown=True,
1278 unknown=True,
1277 ignored=False,
1279 ignored=False,
1278 full=False,
1280 full=False,
1279 )
1281 )
1280 for abs, st in pycompat.iteritems(walkresults):
1282 for abs, st in pycompat.iteritems(walkresults):
1281 dstate = dirstate[abs]
1283 dstate = dirstate[abs]
1282 if dstate == b'?' and audit_path.check(abs):
1284 if dstate == b'?' and audit_path.check(abs):
1283 unknown.append(abs)
1285 unknown.append(abs)
1284 elif dstate != b'r' and not st:
1286 elif dstate != b'r' and not st:
1285 deleted.append(abs)
1287 deleted.append(abs)
1286 elif dstate == b'r' and st:
1288 elif dstate == b'r' and st:
1287 forgotten.append(abs)
1289 forgotten.append(abs)
1288 # for finding renames
1290 # for finding renames
1289 elif dstate == b'r' and not st:
1291 elif dstate == b'r' and not st:
1290 removed.append(abs)
1292 removed.append(abs)
1291 elif dstate == b'a':
1293 elif dstate == b'a':
1292 added.append(abs)
1294 added.append(abs)
1293
1295
1294 return added, unknown, deleted, removed, forgotten
1296 return added, unknown, deleted, removed, forgotten
1295
1297
1296
1298
1297 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1299 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1298 '''Find renames from removed files to added ones.'''
1300 '''Find renames from removed files to added ones.'''
1299 renames = {}
1301 renames = {}
1300 if similarity > 0:
1302 if similarity > 0:
1301 for old, new, score in similar.findrenames(
1303 for old, new, score in similar.findrenames(
1302 repo, added, removed, similarity
1304 repo, added, removed, similarity
1303 ):
1305 ):
1304 if (
1306 if (
1305 repo.ui.verbose
1307 repo.ui.verbose
1306 or not matcher.exact(old)
1308 or not matcher.exact(old)
1307 or not matcher.exact(new)
1309 or not matcher.exact(new)
1308 ):
1310 ):
1309 repo.ui.status(
1311 repo.ui.status(
1310 _(
1312 _(
1311 b'recording removal of %s as rename to %s '
1313 b'recording removal of %s as rename to %s '
1312 b'(%d%% similar)\n'
1314 b'(%d%% similar)\n'
1313 )
1315 )
1314 % (uipathfn(old), uipathfn(new), score * 100)
1316 % (uipathfn(old), uipathfn(new), score * 100)
1315 )
1317 )
1316 renames[new] = old
1318 renames[new] = old
1317 return renames
1319 return renames
1318
1320
1319
1321
1320 def _markchanges(repo, unknown, deleted, renames):
1322 def _markchanges(repo, unknown, deleted, renames):
1321 '''Marks the files in unknown as added, the files in deleted as removed,
1323 '''Marks the files in unknown as added, the files in deleted as removed,
1322 and the files in renames as copied.'''
1324 and the files in renames as copied.'''
1323 wctx = repo[None]
1325 wctx = repo[None]
1324 with repo.wlock():
1326 with repo.wlock():
1325 wctx.forget(deleted)
1327 wctx.forget(deleted)
1326 wctx.add(unknown)
1328 wctx.add(unknown)
1327 for new, old in pycompat.iteritems(renames):
1329 for new, old in pycompat.iteritems(renames):
1328 wctx.copy(old, new)
1330 wctx.copy(old, new)
1329
1331
1330
1332
1331 def getrenamedfn(repo, endrev=None):
1333 def getrenamedfn(repo, endrev=None):
1332 if copiesmod.usechangesetcentricalgo(repo):
1334 if copiesmod.usechangesetcentricalgo(repo):
1333
1335
1334 def getrenamed(fn, rev):
1336 def getrenamed(fn, rev):
1335 ctx = repo[rev]
1337 ctx = repo[rev]
1336 p1copies = ctx.p1copies()
1338 p1copies = ctx.p1copies()
1337 if fn in p1copies:
1339 if fn in p1copies:
1338 return p1copies[fn]
1340 return p1copies[fn]
1339 p2copies = ctx.p2copies()
1341 p2copies = ctx.p2copies()
1340 if fn in p2copies:
1342 if fn in p2copies:
1341 return p2copies[fn]
1343 return p2copies[fn]
1342 return None
1344 return None
1343
1345
1344 return getrenamed
1346 return getrenamed
1345
1347
1346 rcache = {}
1348 rcache = {}
1347 if endrev is None:
1349 if endrev is None:
1348 endrev = len(repo)
1350 endrev = len(repo)
1349
1351
1350 def getrenamed(fn, rev):
1352 def getrenamed(fn, rev):
1351 '''looks up all renames for a file (up to endrev) the first
1353 '''looks up all renames for a file (up to endrev) the first
1352 time the file is given. It indexes on the changerev and only
1354 time the file is given. It indexes on the changerev and only
1353 parses the manifest if linkrev != changerev.
1355 parses the manifest if linkrev != changerev.
1354 Returns rename info for fn at changerev rev.'''
1356 Returns rename info for fn at changerev rev.'''
1355 if fn not in rcache:
1357 if fn not in rcache:
1356 rcache[fn] = {}
1358 rcache[fn] = {}
1357 fl = repo.file(fn)
1359 fl = repo.file(fn)
1358 for i in fl:
1360 for i in fl:
1359 lr = fl.linkrev(i)
1361 lr = fl.linkrev(i)
1360 renamed = fl.renamed(fl.node(i))
1362 renamed = fl.renamed(fl.node(i))
1361 rcache[fn][lr] = renamed and renamed[0]
1363 rcache[fn][lr] = renamed and renamed[0]
1362 if lr >= endrev:
1364 if lr >= endrev:
1363 break
1365 break
1364 if rev in rcache[fn]:
1366 if rev in rcache[fn]:
1365 return rcache[fn][rev]
1367 return rcache[fn][rev]
1366
1368
1367 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1369 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1368 # filectx logic.
1370 # filectx logic.
1369 try:
1371 try:
1370 return repo[rev][fn].copysource()
1372 return repo[rev][fn].copysource()
1371 except error.LookupError:
1373 except error.LookupError:
1372 return None
1374 return None
1373
1375
1374 return getrenamed
1376 return getrenamed
1375
1377
1376
1378
1377 def getcopiesfn(repo, endrev=None):
1379 def getcopiesfn(repo, endrev=None):
1378 if copiesmod.usechangesetcentricalgo(repo):
1380 if copiesmod.usechangesetcentricalgo(repo):
1379
1381
1380 def copiesfn(ctx):
1382 def copiesfn(ctx):
1381 if ctx.p2copies():
1383 if ctx.p2copies():
1382 allcopies = ctx.p1copies().copy()
1384 allcopies = ctx.p1copies().copy()
1383 # There should be no overlap
1385 # There should be no overlap
1384 allcopies.update(ctx.p2copies())
1386 allcopies.update(ctx.p2copies())
1385 return sorted(allcopies.items())
1387 return sorted(allcopies.items())
1386 else:
1388 else:
1387 return sorted(ctx.p1copies().items())
1389 return sorted(ctx.p1copies().items())
1388
1390
1389 else:
1391 else:
1390 getrenamed = getrenamedfn(repo, endrev)
1392 getrenamed = getrenamedfn(repo, endrev)
1391
1393
1392 def copiesfn(ctx):
1394 def copiesfn(ctx):
1393 copies = []
1395 copies = []
1394 for fn in ctx.files():
1396 for fn in ctx.files():
1395 rename = getrenamed(fn, ctx.rev())
1397 rename = getrenamed(fn, ctx.rev())
1396 if rename:
1398 if rename:
1397 copies.append((fn, rename))
1399 copies.append((fn, rename))
1398 return copies
1400 return copies
1399
1401
1400 return copiesfn
1402 return copiesfn
1401
1403
1402
1404
1403 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1405 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1404 """Update the dirstate to reflect the intent of copying src to dst. For
1406 """Update the dirstate to reflect the intent of copying src to dst. For
1405 different reasons it might not end with dst being marked as copied from src.
1407 different reasons it might not end with dst being marked as copied from src.
1406 """
1408 """
1407 origsrc = repo.dirstate.copied(src) or src
1409 origsrc = repo.dirstate.copied(src) or src
1408 if dst == origsrc: # copying back a copy?
1410 if dst == origsrc: # copying back a copy?
1409 if repo.dirstate[dst] not in b'mn' and not dryrun:
1411 if repo.dirstate[dst] not in b'mn' and not dryrun:
1410 repo.dirstate.normallookup(dst)
1412 repo.dirstate.normallookup(dst)
1411 else:
1413 else:
1412 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1414 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1413 if not ui.quiet:
1415 if not ui.quiet:
1414 ui.warn(
1416 ui.warn(
1415 _(
1417 _(
1416 b"%s has not been committed yet, so no copy "
1418 b"%s has not been committed yet, so no copy "
1417 b"data will be stored for %s.\n"
1419 b"data will be stored for %s.\n"
1418 )
1420 )
1419 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1421 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1420 )
1422 )
1421 if repo.dirstate[dst] in b'?r' and not dryrun:
1423 if repo.dirstate[dst] in b'?r' and not dryrun:
1422 wctx.add([dst])
1424 wctx.add([dst])
1423 elif not dryrun:
1425 elif not dryrun:
1424 wctx.copy(origsrc, dst)
1426 wctx.copy(origsrc, dst)
1425
1427
1426
1428
1427 def movedirstate(repo, newctx, match=None):
1429 def movedirstate(repo, newctx, match=None):
1428 """Move the dirstate to newctx and adjust it as necessary.
1430 """Move the dirstate to newctx and adjust it as necessary.
1429
1431
1430 A matcher can be provided as an optimization. It is probably a bug to pass
1432 A matcher can be provided as an optimization. It is probably a bug to pass
1431 a matcher that doesn't match all the differences between the parent of the
1433 a matcher that doesn't match all the differences between the parent of the
1432 working copy and newctx.
1434 working copy and newctx.
1433 """
1435 """
1434 oldctx = repo[b'.']
1436 oldctx = repo[b'.']
1435 ds = repo.dirstate
1437 ds = repo.dirstate
1436 copies = dict(ds.copies())
1438 copies = dict(ds.copies())
1437 ds.setparents(newctx.node(), nullid)
1439 ds.setparents(newctx.node(), nullid)
1438 s = newctx.status(oldctx, match=match)
1440 s = newctx.status(oldctx, match=match)
1439 for f in s.modified:
1441 for f in s.modified:
1440 if ds[f] == b'r':
1442 if ds[f] == b'r':
1441 # modified + removed -> removed
1443 # modified + removed -> removed
1442 continue
1444 continue
1443 ds.normallookup(f)
1445 ds.normallookup(f)
1444
1446
1445 for f in s.added:
1447 for f in s.added:
1446 if ds[f] == b'r':
1448 if ds[f] == b'r':
1447 # added + removed -> unknown
1449 # added + removed -> unknown
1448 ds.drop(f)
1450 ds.drop(f)
1449 elif ds[f] != b'a':
1451 elif ds[f] != b'a':
1450 ds.add(f)
1452 ds.add(f)
1451
1453
1452 for f in s.removed:
1454 for f in s.removed:
1453 if ds[f] == b'a':
1455 if ds[f] == b'a':
1454 # removed + added -> normal
1456 # removed + added -> normal
1455 ds.normallookup(f)
1457 ds.normallookup(f)
1456 elif ds[f] != b'r':
1458 elif ds[f] != b'r':
1457 ds.remove(f)
1459 ds.remove(f)
1458
1460
1459 # Merge old parent and old working dir copies
1461 # Merge old parent and old working dir copies
1460 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1462 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1461 oldcopies.update(copies)
1463 oldcopies.update(copies)
1462 copies = {
1464 copies = {
1463 dst: oldcopies.get(src, src)
1465 dst: oldcopies.get(src, src)
1464 for dst, src in pycompat.iteritems(oldcopies)
1466 for dst, src in pycompat.iteritems(oldcopies)
1465 }
1467 }
1466 # Adjust the dirstate copies
1468 # Adjust the dirstate copies
1467 for dst, src in pycompat.iteritems(copies):
1469 for dst, src in pycompat.iteritems(copies):
1468 if src not in newctx or dst in newctx or ds[dst] != b'a':
1470 if src not in newctx or dst in newctx or ds[dst] != b'a':
1469 src = None
1471 src = None
1470 ds.copy(src, dst)
1472 ds.copy(src, dst)
1471 repo._quick_access_changeid_invalidate()
1473 repo._quick_access_changeid_invalidate()
1472
1474
1473
1475
1474 def filterrequirements(requirements):
1476 def filterrequirements(requirements):
1475 """ filters the requirements into two sets:
1477 """ filters the requirements into two sets:
1476
1478
1477 wcreq: requirements which should be written in .hg/requires
1479 wcreq: requirements which should be written in .hg/requires
1478 storereq: which should be written in .hg/store/requires
1480 storereq: which should be written in .hg/store/requires
1479
1481
1480 Returns (wcreq, storereq)
1482 Returns (wcreq, storereq)
1481 """
1483 """
1482 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1484 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1483 wc, store = set(), set()
1485 wc, store = set(), set()
1484 for r in requirements:
1486 for r in requirements:
1485 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1487 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1486 wc.add(r)
1488 wc.add(r)
1487 else:
1489 else:
1488 store.add(r)
1490 store.add(r)
1489 return wc, store
1491 return wc, store
1490 return requirements, None
1492 return requirements, None
1491
1493
1492
1494
1493 def writereporequirements(repo, requirements=None):
1495 def writereporequirements(repo, requirements=None):
1494 """ writes requirements for the repo to .hg/requires """
1496 """ writes requirements for the repo to .hg/requires """
1495 if requirements:
1497 if requirements:
1496 repo.requirements = requirements
1498 repo.requirements = requirements
1497 wcreq, storereq = filterrequirements(repo.requirements)
1499 wcreq, storereq = filterrequirements(repo.requirements)
1498 if wcreq is not None:
1500 if wcreq is not None:
1499 writerequires(repo.vfs, wcreq)
1501 writerequires(repo.vfs, wcreq)
1500 if storereq is not None:
1502 if storereq is not None:
1501 writerequires(repo.svfs, storereq)
1503 writerequires(repo.svfs, storereq)
1502
1504
1503
1505
1504 def writerequires(opener, requirements):
1506 def writerequires(opener, requirements):
1505 with opener(b'requires', b'w', atomictemp=True) as fp:
1507 with opener(b'requires', b'w', atomictemp=True) as fp:
1506 for r in sorted(requirements):
1508 for r in sorted(requirements):
1507 fp.write(b"%s\n" % r)
1509 fp.write(b"%s\n" % r)
1508
1510
1509
1511
1510 class filecachesubentry(object):
1512 class filecachesubentry(object):
1511 def __init__(self, path, stat):
1513 def __init__(self, path, stat):
1512 self.path = path
1514 self.path = path
1513 self.cachestat = None
1515 self.cachestat = None
1514 self._cacheable = None
1516 self._cacheable = None
1515
1517
1516 if stat:
1518 if stat:
1517 self.cachestat = filecachesubentry.stat(self.path)
1519 self.cachestat = filecachesubentry.stat(self.path)
1518
1520
1519 if self.cachestat:
1521 if self.cachestat:
1520 self._cacheable = self.cachestat.cacheable()
1522 self._cacheable = self.cachestat.cacheable()
1521 else:
1523 else:
1522 # None means we don't know yet
1524 # None means we don't know yet
1523 self._cacheable = None
1525 self._cacheable = None
1524
1526
1525 def refresh(self):
1527 def refresh(self):
1526 if self.cacheable():
1528 if self.cacheable():
1527 self.cachestat = filecachesubentry.stat(self.path)
1529 self.cachestat = filecachesubentry.stat(self.path)
1528
1530
1529 def cacheable(self):
1531 def cacheable(self):
1530 if self._cacheable is not None:
1532 if self._cacheable is not None:
1531 return self._cacheable
1533 return self._cacheable
1532
1534
1533 # we don't know yet, assume it is for now
1535 # we don't know yet, assume it is for now
1534 return True
1536 return True
1535
1537
1536 def changed(self):
1538 def changed(self):
1537 # no point in going further if we can't cache it
1539 # no point in going further if we can't cache it
1538 if not self.cacheable():
1540 if not self.cacheable():
1539 return True
1541 return True
1540
1542
1541 newstat = filecachesubentry.stat(self.path)
1543 newstat = filecachesubentry.stat(self.path)
1542
1544
1543 # we may not know if it's cacheable yet, check again now
1545 # we may not know if it's cacheable yet, check again now
1544 if newstat and self._cacheable is None:
1546 if newstat and self._cacheable is None:
1545 self._cacheable = newstat.cacheable()
1547 self._cacheable = newstat.cacheable()
1546
1548
1547 # check again
1549 # check again
1548 if not self._cacheable:
1550 if not self._cacheable:
1549 return True
1551 return True
1550
1552
1551 if self.cachestat != newstat:
1553 if self.cachestat != newstat:
1552 self.cachestat = newstat
1554 self.cachestat = newstat
1553 return True
1555 return True
1554 else:
1556 else:
1555 return False
1557 return False
1556
1558
1557 @staticmethod
1559 @staticmethod
1558 def stat(path):
1560 def stat(path):
1559 try:
1561 try:
1560 return util.cachestat(path)
1562 return util.cachestat(path)
1561 except OSError as e:
1563 except OSError as e:
1562 if e.errno != errno.ENOENT:
1564 if e.errno != errno.ENOENT:
1563 raise
1565 raise
1564
1566
1565
1567
1566 class filecacheentry(object):
1568 class filecacheentry(object):
1567 def __init__(self, paths, stat=True):
1569 def __init__(self, paths, stat=True):
1568 self._entries = []
1570 self._entries = []
1569 for path in paths:
1571 for path in paths:
1570 self._entries.append(filecachesubentry(path, stat))
1572 self._entries.append(filecachesubentry(path, stat))
1571
1573
1572 def changed(self):
1574 def changed(self):
1573 '''true if any entry has changed'''
1575 '''true if any entry has changed'''
1574 for entry in self._entries:
1576 for entry in self._entries:
1575 if entry.changed():
1577 if entry.changed():
1576 return True
1578 return True
1577 return False
1579 return False
1578
1580
1579 def refresh(self):
1581 def refresh(self):
1580 for entry in self._entries:
1582 for entry in self._entries:
1581 entry.refresh()
1583 entry.refresh()
1582
1584
1583
1585
1584 class filecache(object):
1586 class filecache(object):
1585 """A property like decorator that tracks files under .hg/ for updates.
1587 """A property like decorator that tracks files under .hg/ for updates.
1586
1588
1587 On first access, the files defined as arguments are stat()ed and the
1589 On first access, the files defined as arguments are stat()ed and the
1588 results cached. The decorated function is called. The results are stashed
1590 results cached. The decorated function is called. The results are stashed
1589 away in a ``_filecache`` dict on the object whose method is decorated.
1591 away in a ``_filecache`` dict on the object whose method is decorated.
1590
1592
1591 On subsequent access, the cached result is used as it is set to the
1593 On subsequent access, the cached result is used as it is set to the
1592 instance dictionary.
1594 instance dictionary.
1593
1595
1594 On external property set/delete operations, the caller must update the
1596 On external property set/delete operations, the caller must update the
1595 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1597 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1596 instead of directly setting <attr>.
1598 instead of directly setting <attr>.
1597
1599
1598 When using the property API, the cached data is always used if available.
1600 When using the property API, the cached data is always used if available.
1599 No stat() is performed to check if the file has changed.
1601 No stat() is performed to check if the file has changed.
1600
1602
1601 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1603 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1602 can populate an entry before the property's getter is called. In this case,
1604 can populate an entry before the property's getter is called. In this case,
1603 entries in ``_filecache`` will be used during property operations,
1605 entries in ``_filecache`` will be used during property operations,
1604 if available. If the underlying file changes, it is up to external callers
1606 if available. If the underlying file changes, it is up to external callers
1605 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1607 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1606 method result as well as possibly calling ``del obj._filecache[attr]`` to
1608 method result as well as possibly calling ``del obj._filecache[attr]`` to
1607 remove the ``filecacheentry``.
1609 remove the ``filecacheentry``.
1608 """
1610 """
1609
1611
1610 def __init__(self, *paths):
1612 def __init__(self, *paths):
1611 self.paths = paths
1613 self.paths = paths
1612
1614
1613 def join(self, obj, fname):
1615 def join(self, obj, fname):
1614 """Used to compute the runtime path of a cached file.
1616 """Used to compute the runtime path of a cached file.
1615
1617
1616 Users should subclass filecache and provide their own version of this
1618 Users should subclass filecache and provide their own version of this
1617 function to call the appropriate join function on 'obj' (an instance
1619 function to call the appropriate join function on 'obj' (an instance
1618 of the class that its member function was decorated).
1620 of the class that its member function was decorated).
1619 """
1621 """
1620 raise NotImplementedError
1622 raise NotImplementedError
1621
1623
1622 def __call__(self, func):
1624 def __call__(self, func):
1623 self.func = func
1625 self.func = func
1624 self.sname = func.__name__
1626 self.sname = func.__name__
1625 self.name = pycompat.sysbytes(self.sname)
1627 self.name = pycompat.sysbytes(self.sname)
1626 return self
1628 return self
1627
1629
1628 def __get__(self, obj, type=None):
1630 def __get__(self, obj, type=None):
1629 # if accessed on the class, return the descriptor itself.
1631 # if accessed on the class, return the descriptor itself.
1630 if obj is None:
1632 if obj is None:
1631 return self
1633 return self
1632
1634
1633 assert self.sname not in obj.__dict__
1635 assert self.sname not in obj.__dict__
1634
1636
1635 entry = obj._filecache.get(self.name)
1637 entry = obj._filecache.get(self.name)
1636
1638
1637 if entry:
1639 if entry:
1638 if entry.changed():
1640 if entry.changed():
1639 entry.obj = self.func(obj)
1641 entry.obj = self.func(obj)
1640 else:
1642 else:
1641 paths = [self.join(obj, path) for path in self.paths]
1643 paths = [self.join(obj, path) for path in self.paths]
1642
1644
1643 # We stat -before- creating the object so our cache doesn't lie if
1645 # We stat -before- creating the object so our cache doesn't lie if
1644 # a writer modified between the time we read and stat
1646 # a writer modified between the time we read and stat
1645 entry = filecacheentry(paths, True)
1647 entry = filecacheentry(paths, True)
1646 entry.obj = self.func(obj)
1648 entry.obj = self.func(obj)
1647
1649
1648 obj._filecache[self.name] = entry
1650 obj._filecache[self.name] = entry
1649
1651
1650 obj.__dict__[self.sname] = entry.obj
1652 obj.__dict__[self.sname] = entry.obj
1651 return entry.obj
1653 return entry.obj
1652
1654
1653 # don't implement __set__(), which would make __dict__ lookup as slow as
1655 # don't implement __set__(), which would make __dict__ lookup as slow as
1654 # function call.
1656 # function call.
1655
1657
1656 def set(self, obj, value):
1658 def set(self, obj, value):
1657 if self.name not in obj._filecache:
1659 if self.name not in obj._filecache:
1658 # we add an entry for the missing value because X in __dict__
1660 # we add an entry for the missing value because X in __dict__
1659 # implies X in _filecache
1661 # implies X in _filecache
1660 paths = [self.join(obj, path) for path in self.paths]
1662 paths = [self.join(obj, path) for path in self.paths]
1661 ce = filecacheentry(paths, False)
1663 ce = filecacheentry(paths, False)
1662 obj._filecache[self.name] = ce
1664 obj._filecache[self.name] = ce
1663 else:
1665 else:
1664 ce = obj._filecache[self.name]
1666 ce = obj._filecache[self.name]
1665
1667
1666 ce.obj = value # update cached copy
1668 ce.obj = value # update cached copy
1667 obj.__dict__[self.sname] = value # update copy returned by obj.x
1669 obj.__dict__[self.sname] = value # update copy returned by obj.x
1668
1670
1669
1671
1670 def extdatasource(repo, source):
1672 def extdatasource(repo, source):
1671 """Gather a map of rev -> value dict from the specified source
1673 """Gather a map of rev -> value dict from the specified source
1672
1674
1673 A source spec is treated as a URL, with a special case shell: type
1675 A source spec is treated as a URL, with a special case shell: type
1674 for parsing the output from a shell command.
1676 for parsing the output from a shell command.
1675
1677
1676 The data is parsed as a series of newline-separated records where
1678 The data is parsed as a series of newline-separated records where
1677 each record is a revision specifier optionally followed by a space
1679 each record is a revision specifier optionally followed by a space
1678 and a freeform string value. If the revision is known locally, it
1680 and a freeform string value. If the revision is known locally, it
1679 is converted to a rev, otherwise the record is skipped.
1681 is converted to a rev, otherwise the record is skipped.
1680
1682
1681 Note that both key and value are treated as UTF-8 and converted to
1683 Note that both key and value are treated as UTF-8 and converted to
1682 the local encoding. This allows uniformity between local and
1684 the local encoding. This allows uniformity between local and
1683 remote data sources.
1685 remote data sources.
1684 """
1686 """
1685
1687
1686 spec = repo.ui.config(b"extdata", source)
1688 spec = repo.ui.config(b"extdata", source)
1687 if not spec:
1689 if not spec:
1688 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1690 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1689
1691
1690 data = {}
1692 data = {}
1691 src = proc = None
1693 src = proc = None
1692 try:
1694 try:
1693 if spec.startswith(b"shell:"):
1695 if spec.startswith(b"shell:"):
1694 # external commands should be run relative to the repo root
1696 # external commands should be run relative to the repo root
1695 cmd = spec[6:]
1697 cmd = spec[6:]
1696 proc = subprocess.Popen(
1698 proc = subprocess.Popen(
1697 procutil.tonativestr(cmd),
1699 procutil.tonativestr(cmd),
1698 shell=True,
1700 shell=True,
1699 bufsize=-1,
1701 bufsize=-1,
1700 close_fds=procutil.closefds,
1702 close_fds=procutil.closefds,
1701 stdout=subprocess.PIPE,
1703 stdout=subprocess.PIPE,
1702 cwd=procutil.tonativestr(repo.root),
1704 cwd=procutil.tonativestr(repo.root),
1703 )
1705 )
1704 src = proc.stdout
1706 src = proc.stdout
1705 else:
1707 else:
1706 # treat as a URL or file
1708 # treat as a URL or file
1707 src = url.open(repo.ui, spec)
1709 src = url.open(repo.ui, spec)
1708 for l in src:
1710 for l in src:
1709 if b" " in l:
1711 if b" " in l:
1710 k, v = l.strip().split(b" ", 1)
1712 k, v = l.strip().split(b" ", 1)
1711 else:
1713 else:
1712 k, v = l.strip(), b""
1714 k, v = l.strip(), b""
1713
1715
1714 k = encoding.tolocal(k)
1716 k = encoding.tolocal(k)
1715 try:
1717 try:
1716 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1718 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1717 except (error.LookupError, error.RepoLookupError):
1719 except (error.LookupError, error.RepoLookupError):
1718 pass # we ignore data for nodes that don't exist locally
1720 pass # we ignore data for nodes that don't exist locally
1719 finally:
1721 finally:
1720 if proc:
1722 if proc:
1721 try:
1723 try:
1722 proc.communicate()
1724 proc.communicate()
1723 except ValueError:
1725 except ValueError:
1724 # This happens if we started iterating src and then
1726 # This happens if we started iterating src and then
1725 # get a parse error on a line. It should be safe to ignore.
1727 # get a parse error on a line. It should be safe to ignore.
1726 pass
1728 pass
1727 if src:
1729 if src:
1728 src.close()
1730 src.close()
1729 if proc and proc.returncode != 0:
1731 if proc and proc.returncode != 0:
1730 raise error.Abort(
1732 raise error.Abort(
1731 _(b"extdata command '%s' failed: %s")
1733 _(b"extdata command '%s' failed: %s")
1732 % (cmd, procutil.explainexit(proc.returncode))
1734 % (cmd, procutil.explainexit(proc.returncode))
1733 )
1735 )
1734
1736
1735 return data
1737 return data
1736
1738
1737
1739
1738 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1740 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1739 if lock is None:
1741 if lock is None:
1740 raise error.LockInheritanceContractViolation(
1742 raise error.LockInheritanceContractViolation(
1741 b'lock can only be inherited while held'
1743 b'lock can only be inherited while held'
1742 )
1744 )
1743 if environ is None:
1745 if environ is None:
1744 environ = {}
1746 environ = {}
1745 with lock.inherit() as locker:
1747 with lock.inherit() as locker:
1746 environ[envvar] = locker
1748 environ[envvar] = locker
1747 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1749 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1748
1750
1749
1751
1750 def wlocksub(repo, cmd, *args, **kwargs):
1752 def wlocksub(repo, cmd, *args, **kwargs):
1751 """run cmd as a subprocess that allows inheriting repo's wlock
1753 """run cmd as a subprocess that allows inheriting repo's wlock
1752
1754
1753 This can only be called while the wlock is held. This takes all the
1755 This can only be called while the wlock is held. This takes all the
1754 arguments that ui.system does, and returns the exit code of the
1756 arguments that ui.system does, and returns the exit code of the
1755 subprocess."""
1757 subprocess."""
1756 return _locksub(
1758 return _locksub(
1757 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1759 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1758 )
1760 )
1759
1761
1760
1762
1761 class progress(object):
1763 class progress(object):
1762 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1764 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1763 self.ui = ui
1765 self.ui = ui
1764 self.pos = 0
1766 self.pos = 0
1765 self.topic = topic
1767 self.topic = topic
1766 self.unit = unit
1768 self.unit = unit
1767 self.total = total
1769 self.total = total
1768 self.debug = ui.configbool(b'progress', b'debug')
1770 self.debug = ui.configbool(b'progress', b'debug')
1769 self._updatebar = updatebar
1771 self._updatebar = updatebar
1770
1772
1771 def __enter__(self):
1773 def __enter__(self):
1772 return self
1774 return self
1773
1775
1774 def __exit__(self, exc_type, exc_value, exc_tb):
1776 def __exit__(self, exc_type, exc_value, exc_tb):
1775 self.complete()
1777 self.complete()
1776
1778
1777 def update(self, pos, item=b"", total=None):
1779 def update(self, pos, item=b"", total=None):
1778 assert pos is not None
1780 assert pos is not None
1779 if total:
1781 if total:
1780 self.total = total
1782 self.total = total
1781 self.pos = pos
1783 self.pos = pos
1782 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1784 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1783 if self.debug:
1785 if self.debug:
1784 self._printdebug(item)
1786 self._printdebug(item)
1785
1787
1786 def increment(self, step=1, item=b"", total=None):
1788 def increment(self, step=1, item=b"", total=None):
1787 self.update(self.pos + step, item, total)
1789 self.update(self.pos + step, item, total)
1788
1790
1789 def complete(self):
1791 def complete(self):
1790 self.pos = None
1792 self.pos = None
1791 self.unit = b""
1793 self.unit = b""
1792 self.total = None
1794 self.total = None
1793 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1795 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1794
1796
1795 def _printdebug(self, item):
1797 def _printdebug(self, item):
1796 unit = b''
1798 unit = b''
1797 if self.unit:
1799 if self.unit:
1798 unit = b' ' + self.unit
1800 unit = b' ' + self.unit
1799 if item:
1801 if item:
1800 item = b' ' + item
1802 item = b' ' + item
1801
1803
1802 if self.total:
1804 if self.total:
1803 pct = 100.0 * self.pos / self.total
1805 pct = 100.0 * self.pos / self.total
1804 self.ui.debug(
1806 self.ui.debug(
1805 b'%s:%s %d/%d%s (%4.2f%%)\n'
1807 b'%s:%s %d/%d%s (%4.2f%%)\n'
1806 % (self.topic, item, self.pos, self.total, unit, pct)
1808 % (self.topic, item, self.pos, self.total, unit, pct)
1807 )
1809 )
1808 else:
1810 else:
1809 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1811 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1810
1812
1811
1813
1812 def gdinitconfig(ui):
1814 def gdinitconfig(ui):
1813 """helper function to know if a repo should be created as general delta
1815 """helper function to know if a repo should be created as general delta
1814 """
1816 """
1815 # experimental config: format.generaldelta
1817 # experimental config: format.generaldelta
1816 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1818 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1817 b'format', b'usegeneraldelta'
1819 b'format', b'usegeneraldelta'
1818 )
1820 )
1819
1821
1820
1822
1821 def gddeltaconfig(ui):
1823 def gddeltaconfig(ui):
1822 """helper function to know if incoming delta should be optimised
1824 """helper function to know if incoming delta should be optimised
1823 """
1825 """
1824 # experimental config: format.generaldelta
1826 # experimental config: format.generaldelta
1825 return ui.configbool(b'format', b'generaldelta')
1827 return ui.configbool(b'format', b'generaldelta')
1826
1828
1827
1829
1828 class simplekeyvaluefile(object):
1830 class simplekeyvaluefile(object):
1829 """A simple file with key=value lines
1831 """A simple file with key=value lines
1830
1832
1831 Keys must be alphanumerics and start with a letter, values must not
1833 Keys must be alphanumerics and start with a letter, values must not
1832 contain '\n' characters"""
1834 contain '\n' characters"""
1833
1835
1834 firstlinekey = b'__firstline'
1836 firstlinekey = b'__firstline'
1835
1837
1836 def __init__(self, vfs, path, keys=None):
1838 def __init__(self, vfs, path, keys=None):
1837 self.vfs = vfs
1839 self.vfs = vfs
1838 self.path = path
1840 self.path = path
1839
1841
1840 def read(self, firstlinenonkeyval=False):
1842 def read(self, firstlinenonkeyval=False):
1841 """Read the contents of a simple key-value file
1843 """Read the contents of a simple key-value file
1842
1844
1843 'firstlinenonkeyval' indicates whether the first line of file should
1845 'firstlinenonkeyval' indicates whether the first line of file should
1844 be treated as a key-value pair or reuturned fully under the
1846 be treated as a key-value pair or reuturned fully under the
1845 __firstline key."""
1847 __firstline key."""
1846 lines = self.vfs.readlines(self.path)
1848 lines = self.vfs.readlines(self.path)
1847 d = {}
1849 d = {}
1848 if firstlinenonkeyval:
1850 if firstlinenonkeyval:
1849 if not lines:
1851 if not lines:
1850 e = _(b"empty simplekeyvalue file")
1852 e = _(b"empty simplekeyvalue file")
1851 raise error.CorruptedState(e)
1853 raise error.CorruptedState(e)
1852 # we don't want to include '\n' in the __firstline
1854 # we don't want to include '\n' in the __firstline
1853 d[self.firstlinekey] = lines[0][:-1]
1855 d[self.firstlinekey] = lines[0][:-1]
1854 del lines[0]
1856 del lines[0]
1855
1857
1856 try:
1858 try:
1857 # the 'if line.strip()' part prevents us from failing on empty
1859 # the 'if line.strip()' part prevents us from failing on empty
1858 # lines which only contain '\n' therefore are not skipped
1860 # lines which only contain '\n' therefore are not skipped
1859 # by 'if line'
1861 # by 'if line'
1860 updatedict = dict(
1862 updatedict = dict(
1861 line[:-1].split(b'=', 1) for line in lines if line.strip()
1863 line[:-1].split(b'=', 1) for line in lines if line.strip()
1862 )
1864 )
1863 if self.firstlinekey in updatedict:
1865 if self.firstlinekey in updatedict:
1864 e = _(b"%r can't be used as a key")
1866 e = _(b"%r can't be used as a key")
1865 raise error.CorruptedState(e % self.firstlinekey)
1867 raise error.CorruptedState(e % self.firstlinekey)
1866 d.update(updatedict)
1868 d.update(updatedict)
1867 except ValueError as e:
1869 except ValueError as e:
1868 raise error.CorruptedState(stringutil.forcebytestr(e))
1870 raise error.CorruptedState(stringutil.forcebytestr(e))
1869 return d
1871 return d
1870
1872
1871 def write(self, data, firstline=None):
1873 def write(self, data, firstline=None):
1872 """Write key=>value mapping to a file
1874 """Write key=>value mapping to a file
1873 data is a dict. Keys must be alphanumerical and start with a letter.
1875 data is a dict. Keys must be alphanumerical and start with a letter.
1874 Values must not contain newline characters.
1876 Values must not contain newline characters.
1875
1877
1876 If 'firstline' is not None, it is written to file before
1878 If 'firstline' is not None, it is written to file before
1877 everything else, as it is, not in a key=value form"""
1879 everything else, as it is, not in a key=value form"""
1878 lines = []
1880 lines = []
1879 if firstline is not None:
1881 if firstline is not None:
1880 lines.append(b'%s\n' % firstline)
1882 lines.append(b'%s\n' % firstline)
1881
1883
1882 for k, v in data.items():
1884 for k, v in data.items():
1883 if k == self.firstlinekey:
1885 if k == self.firstlinekey:
1884 e = b"key name '%s' is reserved" % self.firstlinekey
1886 e = b"key name '%s' is reserved" % self.firstlinekey
1885 raise error.ProgrammingError(e)
1887 raise error.ProgrammingError(e)
1886 if not k[0:1].isalpha():
1888 if not k[0:1].isalpha():
1887 e = b"keys must start with a letter in a key-value file"
1889 e = b"keys must start with a letter in a key-value file"
1888 raise error.ProgrammingError(e)
1890 raise error.ProgrammingError(e)
1889 if not k.isalnum():
1891 if not k.isalnum():
1890 e = b"invalid key name in a simple key-value file"
1892 e = b"invalid key name in a simple key-value file"
1891 raise error.ProgrammingError(e)
1893 raise error.ProgrammingError(e)
1892 if b'\n' in v:
1894 if b'\n' in v:
1893 e = b"invalid value in a simple key-value file"
1895 e = b"invalid value in a simple key-value file"
1894 raise error.ProgrammingError(e)
1896 raise error.ProgrammingError(e)
1895 lines.append(b"%s=%s\n" % (k, v))
1897 lines.append(b"%s=%s\n" % (k, v))
1896 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1898 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1897 fp.write(b''.join(lines))
1899 fp.write(b''.join(lines))
1898
1900
1899
1901
1900 _reportobsoletedsource = [
1902 _reportobsoletedsource = [
1901 b'debugobsolete',
1903 b'debugobsolete',
1902 b'pull',
1904 b'pull',
1903 b'push',
1905 b'push',
1904 b'serve',
1906 b'serve',
1905 b'unbundle',
1907 b'unbundle',
1906 ]
1908 ]
1907
1909
1908 _reportnewcssource = [
1910 _reportnewcssource = [
1909 b'pull',
1911 b'pull',
1910 b'unbundle',
1912 b'unbundle',
1911 ]
1913 ]
1912
1914
1913
1915
1914 def prefetchfiles(repo, revmatches):
1916 def prefetchfiles(repo, revmatches):
1915 """Invokes the registered file prefetch functions, allowing extensions to
1917 """Invokes the registered file prefetch functions, allowing extensions to
1916 ensure the corresponding files are available locally, before the command
1918 ensure the corresponding files are available locally, before the command
1917 uses them.
1919 uses them.
1918
1920
1919 Args:
1921 Args:
1920 revmatches: a list of (revision, match) tuples to indicate the files to
1922 revmatches: a list of (revision, match) tuples to indicate the files to
1921 fetch at each revision. If any of the match elements is None, it matches
1923 fetch at each revision. If any of the match elements is None, it matches
1922 all files.
1924 all files.
1923 """
1925 """
1924
1926
1925 def _matcher(m):
1927 def _matcher(m):
1926 if m:
1928 if m:
1927 assert isinstance(m, matchmod.basematcher)
1929 assert isinstance(m, matchmod.basematcher)
1928 # The command itself will complain about files that don't exist, so
1930 # The command itself will complain about files that don't exist, so
1929 # don't duplicate the message.
1931 # don't duplicate the message.
1930 return matchmod.badmatch(m, lambda fn, msg: None)
1932 return matchmod.badmatch(m, lambda fn, msg: None)
1931 else:
1933 else:
1932 return matchall(repo)
1934 return matchall(repo)
1933
1935
1934 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1936 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1935
1937
1936 fileprefetchhooks(repo, revbadmatches)
1938 fileprefetchhooks(repo, revbadmatches)
1937
1939
1938
1940
1939 # a list of (repo, revs, match) prefetch functions
1941 # a list of (repo, revs, match) prefetch functions
1940 fileprefetchhooks = util.hooks()
1942 fileprefetchhooks = util.hooks()
1941
1943
1942 # A marker that tells the evolve extension to suppress its own reporting
1944 # A marker that tells the evolve extension to suppress its own reporting
1943 _reportstroubledchangesets = True
1945 _reportstroubledchangesets = True
1944
1946
1945
1947
1946 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1948 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1947 """register a callback to issue a summary after the transaction is closed
1949 """register a callback to issue a summary after the transaction is closed
1948
1950
1949 If as_validator is true, then the callbacks are registered as transaction
1951 If as_validator is true, then the callbacks are registered as transaction
1950 validators instead
1952 validators instead
1951 """
1953 """
1952
1954
1953 def txmatch(sources):
1955 def txmatch(sources):
1954 return any(txnname.startswith(source) for source in sources)
1956 return any(txnname.startswith(source) for source in sources)
1955
1957
1956 categories = []
1958 categories = []
1957
1959
1958 def reportsummary(func):
1960 def reportsummary(func):
1959 """decorator for report callbacks."""
1961 """decorator for report callbacks."""
1960 # The repoview life cycle is shorter than the one of the actual
1962 # The repoview life cycle is shorter than the one of the actual
1961 # underlying repository. So the filtered object can die before the
1963 # underlying repository. So the filtered object can die before the
1962 # weakref is used leading to troubles. We keep a reference to the
1964 # weakref is used leading to troubles. We keep a reference to the
1963 # unfiltered object and restore the filtering when retrieving the
1965 # unfiltered object and restore the filtering when retrieving the
1964 # repository through the weakref.
1966 # repository through the weakref.
1965 filtername = repo.filtername
1967 filtername = repo.filtername
1966 reporef = weakref.ref(repo.unfiltered())
1968 reporef = weakref.ref(repo.unfiltered())
1967
1969
1968 def wrapped(tr):
1970 def wrapped(tr):
1969 repo = reporef()
1971 repo = reporef()
1970 if filtername:
1972 if filtername:
1971 assert repo is not None # help pytype
1973 assert repo is not None # help pytype
1972 repo = repo.filtered(filtername)
1974 repo = repo.filtered(filtername)
1973 func(repo, tr)
1975 func(repo, tr)
1974
1976
1975 newcat = b'%02i-txnreport' % len(categories)
1977 newcat = b'%02i-txnreport' % len(categories)
1976 if as_validator:
1978 if as_validator:
1977 otr.addvalidator(newcat, wrapped)
1979 otr.addvalidator(newcat, wrapped)
1978 else:
1980 else:
1979 otr.addpostclose(newcat, wrapped)
1981 otr.addpostclose(newcat, wrapped)
1980 categories.append(newcat)
1982 categories.append(newcat)
1981 return wrapped
1983 return wrapped
1982
1984
1983 @reportsummary
1985 @reportsummary
1984 def reportchangegroup(repo, tr):
1986 def reportchangegroup(repo, tr):
1985 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1987 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1986 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1988 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1987 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1989 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1988 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1990 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1989 if cgchangesets or cgrevisions or cgfiles:
1991 if cgchangesets or cgrevisions or cgfiles:
1990 htext = b""
1992 htext = b""
1991 if cgheads:
1993 if cgheads:
1992 htext = _(b" (%+d heads)") % cgheads
1994 htext = _(b" (%+d heads)") % cgheads
1993 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1995 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1994 if as_validator:
1996 if as_validator:
1995 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1997 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1996 assert repo is not None # help pytype
1998 assert repo is not None # help pytype
1997 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1999 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1998
2000
1999 if txmatch(_reportobsoletedsource):
2001 if txmatch(_reportobsoletedsource):
2000
2002
2001 @reportsummary
2003 @reportsummary
2002 def reportobsoleted(repo, tr):
2004 def reportobsoleted(repo, tr):
2003 obsoleted = obsutil.getobsoleted(repo, tr)
2005 obsoleted = obsutil.getobsoleted(repo, tr)
2004 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2006 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2005 if newmarkers:
2007 if newmarkers:
2006 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2008 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2007 if obsoleted:
2009 if obsoleted:
2008 msg = _(b'obsoleted %i changesets\n')
2010 msg = _(b'obsoleted %i changesets\n')
2009 if as_validator:
2011 if as_validator:
2010 msg = _(b'obsoleting %i changesets\n')
2012 msg = _(b'obsoleting %i changesets\n')
2011 repo.ui.status(msg % len(obsoleted))
2013 repo.ui.status(msg % len(obsoleted))
2012
2014
2013 if obsolete.isenabled(
2015 if obsolete.isenabled(
2014 repo, obsolete.createmarkersopt
2016 repo, obsolete.createmarkersopt
2015 ) and repo.ui.configbool(
2017 ) and repo.ui.configbool(
2016 b'experimental', b'evolution.report-instabilities'
2018 b'experimental', b'evolution.report-instabilities'
2017 ):
2019 ):
2018 instabilitytypes = [
2020 instabilitytypes = [
2019 (b'orphan', b'orphan'),
2021 (b'orphan', b'orphan'),
2020 (b'phase-divergent', b'phasedivergent'),
2022 (b'phase-divergent', b'phasedivergent'),
2021 (b'content-divergent', b'contentdivergent'),
2023 (b'content-divergent', b'contentdivergent'),
2022 ]
2024 ]
2023
2025
2024 def getinstabilitycounts(repo):
2026 def getinstabilitycounts(repo):
2025 filtered = repo.changelog.filteredrevs
2027 filtered = repo.changelog.filteredrevs
2026 counts = {}
2028 counts = {}
2027 for instability, revset in instabilitytypes:
2029 for instability, revset in instabilitytypes:
2028 counts[instability] = len(
2030 counts[instability] = len(
2029 set(obsolete.getrevs(repo, revset)) - filtered
2031 set(obsolete.getrevs(repo, revset)) - filtered
2030 )
2032 )
2031 return counts
2033 return counts
2032
2034
2033 oldinstabilitycounts = getinstabilitycounts(repo)
2035 oldinstabilitycounts = getinstabilitycounts(repo)
2034
2036
2035 @reportsummary
2037 @reportsummary
2036 def reportnewinstabilities(repo, tr):
2038 def reportnewinstabilities(repo, tr):
2037 newinstabilitycounts = getinstabilitycounts(repo)
2039 newinstabilitycounts = getinstabilitycounts(repo)
2038 for instability, revset in instabilitytypes:
2040 for instability, revset in instabilitytypes:
2039 delta = (
2041 delta = (
2040 newinstabilitycounts[instability]
2042 newinstabilitycounts[instability]
2041 - oldinstabilitycounts[instability]
2043 - oldinstabilitycounts[instability]
2042 )
2044 )
2043 msg = getinstabilitymessage(delta, instability)
2045 msg = getinstabilitymessage(delta, instability)
2044 if msg:
2046 if msg:
2045 repo.ui.warn(msg)
2047 repo.ui.warn(msg)
2046
2048
2047 if txmatch(_reportnewcssource):
2049 if txmatch(_reportnewcssource):
2048
2050
2049 @reportsummary
2051 @reportsummary
2050 def reportnewcs(repo, tr):
2052 def reportnewcs(repo, tr):
2051 """Report the range of new revisions pulled/unbundled."""
2053 """Report the range of new revisions pulled/unbundled."""
2052 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2054 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2053 unfi = repo.unfiltered()
2055 unfi = repo.unfiltered()
2054 if origrepolen >= len(unfi):
2056 if origrepolen >= len(unfi):
2055 return
2057 return
2056
2058
2057 # Compute the bounds of new visible revisions' range.
2059 # Compute the bounds of new visible revisions' range.
2058 revs = smartset.spanset(repo, start=origrepolen)
2060 revs = smartset.spanset(repo, start=origrepolen)
2059 if revs:
2061 if revs:
2060 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2062 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2061
2063
2062 if minrev == maxrev:
2064 if minrev == maxrev:
2063 revrange = minrev
2065 revrange = minrev
2064 else:
2066 else:
2065 revrange = b'%s:%s' % (minrev, maxrev)
2067 revrange = b'%s:%s' % (minrev, maxrev)
2066 draft = len(repo.revs(b'%ld and draft()', revs))
2068 draft = len(repo.revs(b'%ld and draft()', revs))
2067 secret = len(repo.revs(b'%ld and secret()', revs))
2069 secret = len(repo.revs(b'%ld and secret()', revs))
2068 if not (draft or secret):
2070 if not (draft or secret):
2069 msg = _(b'new changesets %s\n') % revrange
2071 msg = _(b'new changesets %s\n') % revrange
2070 elif draft and secret:
2072 elif draft and secret:
2071 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2073 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2072 msg %= (revrange, draft, secret)
2074 msg %= (revrange, draft, secret)
2073 elif draft:
2075 elif draft:
2074 msg = _(b'new changesets %s (%d drafts)\n')
2076 msg = _(b'new changesets %s (%d drafts)\n')
2075 msg %= (revrange, draft)
2077 msg %= (revrange, draft)
2076 elif secret:
2078 elif secret:
2077 msg = _(b'new changesets %s (%d secrets)\n')
2079 msg = _(b'new changesets %s (%d secrets)\n')
2078 msg %= (revrange, secret)
2080 msg %= (revrange, secret)
2079 else:
2081 else:
2080 errormsg = b'entered unreachable condition'
2082 errormsg = b'entered unreachable condition'
2081 raise error.ProgrammingError(errormsg)
2083 raise error.ProgrammingError(errormsg)
2082 repo.ui.status(msg)
2084 repo.ui.status(msg)
2083
2085
2084 # search new changesets directly pulled as obsolete
2086 # search new changesets directly pulled as obsolete
2085 duplicates = tr.changes.get(b'revduplicates', ())
2087 duplicates = tr.changes.get(b'revduplicates', ())
2086 obsadded = unfi.revs(
2088 obsadded = unfi.revs(
2087 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2089 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2088 )
2090 )
2089 cl = repo.changelog
2091 cl = repo.changelog
2090 extinctadded = [r for r in obsadded if r not in cl]
2092 extinctadded = [r for r in obsadded if r not in cl]
2091 if extinctadded:
2093 if extinctadded:
2092 # They are not just obsolete, but obsolete and invisible
2094 # They are not just obsolete, but obsolete and invisible
2093 # we call them "extinct" internally but the terms have not been
2095 # we call them "extinct" internally but the terms have not been
2094 # exposed to users.
2096 # exposed to users.
2095 msg = b'(%d other changesets obsolete on arrival)\n'
2097 msg = b'(%d other changesets obsolete on arrival)\n'
2096 repo.ui.status(msg % len(extinctadded))
2098 repo.ui.status(msg % len(extinctadded))
2097
2099
2098 @reportsummary
2100 @reportsummary
2099 def reportphasechanges(repo, tr):
2101 def reportphasechanges(repo, tr):
2100 """Report statistics of phase changes for changesets pre-existing
2102 """Report statistics of phase changes for changesets pre-existing
2101 pull/unbundle.
2103 pull/unbundle.
2102 """
2104 """
2103 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2105 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2104 published = []
2106 published = []
2105 for revs, (old, new) in tr.changes.get(b'phases', []):
2107 for revs, (old, new) in tr.changes.get(b'phases', []):
2106 if new != phases.public:
2108 if new != phases.public:
2107 continue
2109 continue
2108 published.extend(rev for rev in revs if rev < origrepolen)
2110 published.extend(rev for rev in revs if rev < origrepolen)
2109 if not published:
2111 if not published:
2110 return
2112 return
2111 msg = _(b'%d local changesets published\n')
2113 msg = _(b'%d local changesets published\n')
2112 if as_validator:
2114 if as_validator:
2113 msg = _(b'%d local changesets will be published\n')
2115 msg = _(b'%d local changesets will be published\n')
2114 repo.ui.status(msg % len(published))
2116 repo.ui.status(msg % len(published))
2115
2117
2116
2118
2117 def getinstabilitymessage(delta, instability):
2119 def getinstabilitymessage(delta, instability):
2118 """function to return the message to show warning about new instabilities
2120 """function to return the message to show warning about new instabilities
2119
2121
2120 exists as a separate function so that extension can wrap to show more
2122 exists as a separate function so that extension can wrap to show more
2121 information like how to fix instabilities"""
2123 information like how to fix instabilities"""
2122 if delta > 0:
2124 if delta > 0:
2123 return _(b'%i new %s changesets\n') % (delta, instability)
2125 return _(b'%i new %s changesets\n') % (delta, instability)
2124
2126
2125
2127
2126 def nodesummaries(repo, nodes, maxnumnodes=4):
2128 def nodesummaries(repo, nodes, maxnumnodes=4):
2127 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2129 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2128 return b' '.join(short(h) for h in nodes)
2130 return b' '.join(short(h) for h in nodes)
2129 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2131 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2130 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2132 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2131
2133
2132
2134
2133 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2135 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2134 """check that no named branch has multiple heads"""
2136 """check that no named branch has multiple heads"""
2135 if desc in (b'strip', b'repair'):
2137 if desc in (b'strip', b'repair'):
2136 # skip the logic during strip
2138 # skip the logic during strip
2137 return
2139 return
2138 visible = repo.filtered(b'visible')
2140 visible = repo.filtered(b'visible')
2139 # possible improvement: we could restrict the check to affected branch
2141 # possible improvement: we could restrict the check to affected branch
2140 bm = visible.branchmap()
2142 bm = visible.branchmap()
2141 for name in bm:
2143 for name in bm:
2142 heads = bm.branchheads(name, closed=accountclosed)
2144 heads = bm.branchheads(name, closed=accountclosed)
2143 if len(heads) > 1:
2145 if len(heads) > 1:
2144 msg = _(b'rejecting multiple heads on branch "%s"')
2146 msg = _(b'rejecting multiple heads on branch "%s"')
2145 msg %= name
2147 msg %= name
2146 hint = _(b'%d heads: %s')
2148 hint = _(b'%d heads: %s')
2147 hint %= (len(heads), nodesummaries(repo, heads))
2149 hint %= (len(heads), nodesummaries(repo, heads))
2148 raise error.Abort(msg, hint=hint)
2150 raise error.Abort(msg, hint=hint)
2149
2151
2150
2152
2151 def wrapconvertsink(sink):
2153 def wrapconvertsink(sink):
2152 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2154 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2153 before it is used, whether or not the convert extension was formally loaded.
2155 before it is used, whether or not the convert extension was formally loaded.
2154 """
2156 """
2155 return sink
2157 return sink
2156
2158
2157
2159
2158 def unhidehashlikerevs(repo, specs, hiddentype):
2160 def unhidehashlikerevs(repo, specs, hiddentype):
2159 """parse the user specs and unhide changesets whose hash or revision number
2161 """parse the user specs and unhide changesets whose hash or revision number
2160 is passed.
2162 is passed.
2161
2163
2162 hiddentype can be: 1) 'warn': warn while unhiding changesets
2164 hiddentype can be: 1) 'warn': warn while unhiding changesets
2163 2) 'nowarn': don't warn while unhiding changesets
2165 2) 'nowarn': don't warn while unhiding changesets
2164
2166
2165 returns a repo object with the required changesets unhidden
2167 returns a repo object with the required changesets unhidden
2166 """
2168 """
2167 if not repo.filtername or not repo.ui.configbool(
2169 if not repo.filtername or not repo.ui.configbool(
2168 b'experimental', b'directaccess'
2170 b'experimental', b'directaccess'
2169 ):
2171 ):
2170 return repo
2172 return repo
2171
2173
2172 if repo.filtername not in (b'visible', b'visible-hidden'):
2174 if repo.filtername not in (b'visible', b'visible-hidden'):
2173 return repo
2175 return repo
2174
2176
2175 symbols = set()
2177 symbols = set()
2176 for spec in specs:
2178 for spec in specs:
2177 try:
2179 try:
2178 tree = revsetlang.parse(spec)
2180 tree = revsetlang.parse(spec)
2179 except error.ParseError: # will be reported by scmutil.revrange()
2181 except error.ParseError: # will be reported by scmutil.revrange()
2180 continue
2182 continue
2181
2183
2182 symbols.update(revsetlang.gethashlikesymbols(tree))
2184 symbols.update(revsetlang.gethashlikesymbols(tree))
2183
2185
2184 if not symbols:
2186 if not symbols:
2185 return repo
2187 return repo
2186
2188
2187 revs = _getrevsfromsymbols(repo, symbols)
2189 revs = _getrevsfromsymbols(repo, symbols)
2188
2190
2189 if not revs:
2191 if not revs:
2190 return repo
2192 return repo
2191
2193
2192 if hiddentype == b'warn':
2194 if hiddentype == b'warn':
2193 unfi = repo.unfiltered()
2195 unfi = repo.unfiltered()
2194 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2196 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2195 repo.ui.warn(
2197 repo.ui.warn(
2196 _(
2198 _(
2197 b"warning: accessing hidden changesets for write "
2199 b"warning: accessing hidden changesets for write "
2198 b"operation: %s\n"
2200 b"operation: %s\n"
2199 )
2201 )
2200 % revstr
2202 % revstr
2201 )
2203 )
2202
2204
2203 # we have to use new filtername to separate branch/tags cache until we can
2205 # we have to use new filtername to separate branch/tags cache until we can
2204 # disbale these cache when revisions are dynamically pinned.
2206 # disbale these cache when revisions are dynamically pinned.
2205 return repo.filtered(b'visible-hidden', revs)
2207 return repo.filtered(b'visible-hidden', revs)
2206
2208
2207
2209
2208 def _getrevsfromsymbols(repo, symbols):
2210 def _getrevsfromsymbols(repo, symbols):
2209 """parse the list of symbols and returns a set of revision numbers of hidden
2211 """parse the list of symbols and returns a set of revision numbers of hidden
2210 changesets present in symbols"""
2212 changesets present in symbols"""
2211 revs = set()
2213 revs = set()
2212 unfi = repo.unfiltered()
2214 unfi = repo.unfiltered()
2213 unficl = unfi.changelog
2215 unficl = unfi.changelog
2214 cl = repo.changelog
2216 cl = repo.changelog
2215 tiprev = len(unficl)
2217 tiprev = len(unficl)
2216 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2218 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2217 for s in symbols:
2219 for s in symbols:
2218 try:
2220 try:
2219 n = int(s)
2221 n = int(s)
2220 if n <= tiprev:
2222 if n <= tiprev:
2221 if not allowrevnums:
2223 if not allowrevnums:
2222 continue
2224 continue
2223 else:
2225 else:
2224 if n not in cl:
2226 if n not in cl:
2225 revs.add(n)
2227 revs.add(n)
2226 continue
2228 continue
2227 except ValueError:
2229 except ValueError:
2228 pass
2230 pass
2229
2231
2230 try:
2232 try:
2231 s = resolvehexnodeidprefix(unfi, s)
2233 s = resolvehexnodeidprefix(unfi, s)
2232 except (error.LookupError, error.WdirUnsupported):
2234 except (error.LookupError, error.WdirUnsupported):
2233 s = None
2235 s = None
2234
2236
2235 if s is not None:
2237 if s is not None:
2236 rev = unficl.rev(s)
2238 rev = unficl.rev(s)
2237 if rev not in cl:
2239 if rev not in cl:
2238 revs.add(rev)
2240 revs.add(rev)
2239
2241
2240 return revs
2242 return revs
2241
2243
2242
2244
2243 def bookmarkrevs(repo, mark):
2245 def bookmarkrevs(repo, mark):
2244 """
2246 """
2245 Select revisions reachable by a given bookmark
2247 Select revisions reachable by a given bookmark
2246 """
2248 """
2247 return repo.revs(
2249 return repo.revs(
2248 b"ancestors(bookmark(%s)) - "
2250 b"ancestors(bookmark(%s)) - "
2249 b"ancestors(head() and not bookmark(%s)) - "
2251 b"ancestors(head() and not bookmark(%s)) - "
2250 b"ancestors(bookmark() and not bookmark(%s))",
2252 b"ancestors(bookmark() and not bookmark(%s))",
2251 mark,
2253 mark,
2252 mark,
2254 mark,
2253 mark,
2255 mark,
2254 )
2256 )
General Comments 0
You need to be logged in to leave comments. Login now