##// END OF EJS Templates
changing-files: drop the now useless changelogrevision argument...
marmoute -
r46212:9003e652 default
parent child Browse files
Show More
@@ -1,606 +1,606 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 _defaultextra = {b'branch': b'default'}
30 _defaultextra = {b'branch': b'default'}
31
31
32
32
33 def _string_escape(text):
33 def _string_escape(text):
34 """
34 """
35 >>> from .pycompat import bytechr as chr
35 >>> from .pycompat import bytechr as chr
36 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
36 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
37 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s
38 >>> s
39 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
39 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 >>> res = _string_escape(s)
40 >>> res = _string_escape(s)
41 >>> s == _string_unescape(res)
41 >>> s == _string_unescape(res)
42 True
42 True
43 """
43 """
44 # subset of the string_escape codec
44 # subset of the string_escape codec
45 text = (
45 text = (
46 text.replace(b'\\', b'\\\\')
46 text.replace(b'\\', b'\\\\')
47 .replace(b'\n', b'\\n')
47 .replace(b'\n', b'\\n')
48 .replace(b'\r', b'\\r')
48 .replace(b'\r', b'\\r')
49 )
49 )
50 return text.replace(b'\0', b'\\0')
50 return text.replace(b'\0', b'\\0')
51
51
52
52
53 def _string_unescape(text):
53 def _string_unescape(text):
54 if b'\\0' in text:
54 if b'\\0' in text:
55 # fix up \0 without getting into trouble with \\0
55 # fix up \0 without getting into trouble with \\0
56 text = text.replace(b'\\\\', b'\\\\\n')
56 text = text.replace(b'\\\\', b'\\\\\n')
57 text = text.replace(b'\\0', b'\0')
57 text = text.replace(b'\\0', b'\0')
58 text = text.replace(b'\n', b'')
58 text = text.replace(b'\n', b'')
59 return stringutil.unescapestr(text)
59 return stringutil.unescapestr(text)
60
60
61
61
62 def decodeextra(text):
62 def decodeextra(text):
63 """
63 """
64 >>> from .pycompat import bytechr as chr
64 >>> from .pycompat import bytechr as chr
65 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
65 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
66 ... ).items())
66 ... ).items())
67 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
68 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
69 ... b'baz': chr(92) + chr(0) + b'2'})
69 ... b'baz': chr(92) + chr(0) + b'2'})
70 ... ).items())
70 ... ).items())
71 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
71 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
72 """
72 """
73 extra = _defaultextra.copy()
73 extra = _defaultextra.copy()
74 for l in text.split(b'\0'):
74 for l in text.split(b'\0'):
75 if l:
75 if l:
76 k, v = _string_unescape(l).split(b':', 1)
76 k, v = _string_unescape(l).split(b':', 1)
77 extra[k] = v
77 extra[k] = v
78 return extra
78 return extra
79
79
80
80
81 def encodeextra(d):
81 def encodeextra(d):
82 # keys must be sorted to produce a deterministic changelog entry
82 # keys must be sorted to produce a deterministic changelog entry
83 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
83 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
84 return b"\0".join(items)
84 return b"\0".join(items)
85
85
86
86
87 def stripdesc(desc):
87 def stripdesc(desc):
88 """strip trailing whitespace and leading and trailing empty lines"""
88 """strip trailing whitespace and leading and trailing empty lines"""
89 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
89 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
90
90
91
91
92 class appender(object):
92 class appender(object):
93 '''the changelog index must be updated last on disk, so we use this class
93 '''the changelog index must be updated last on disk, so we use this class
94 to delay writes to it'''
94 to delay writes to it'''
95
95
96 def __init__(self, vfs, name, mode, buf):
96 def __init__(self, vfs, name, mode, buf):
97 self.data = buf
97 self.data = buf
98 fp = vfs(name, mode)
98 fp = vfs(name, mode)
99 self.fp = fp
99 self.fp = fp
100 self.offset = fp.tell()
100 self.offset = fp.tell()
101 self.size = vfs.fstat(fp).st_size
101 self.size = vfs.fstat(fp).st_size
102 self._end = self.size
102 self._end = self.size
103
103
104 def end(self):
104 def end(self):
105 return self._end
105 return self._end
106
106
107 def tell(self):
107 def tell(self):
108 return self.offset
108 return self.offset
109
109
110 def flush(self):
110 def flush(self):
111 pass
111 pass
112
112
113 @property
113 @property
114 def closed(self):
114 def closed(self):
115 return self.fp.closed
115 return self.fp.closed
116
116
117 def close(self):
117 def close(self):
118 self.fp.close()
118 self.fp.close()
119
119
120 def seek(self, offset, whence=0):
120 def seek(self, offset, whence=0):
121 '''virtual file offset spans real file and data'''
121 '''virtual file offset spans real file and data'''
122 if whence == 0:
122 if whence == 0:
123 self.offset = offset
123 self.offset = offset
124 elif whence == 1:
124 elif whence == 1:
125 self.offset += offset
125 self.offset += offset
126 elif whence == 2:
126 elif whence == 2:
127 self.offset = self.end() + offset
127 self.offset = self.end() + offset
128 if self.offset < self.size:
128 if self.offset < self.size:
129 self.fp.seek(self.offset)
129 self.fp.seek(self.offset)
130
130
131 def read(self, count=-1):
131 def read(self, count=-1):
132 '''only trick here is reads that span real file and data'''
132 '''only trick here is reads that span real file and data'''
133 ret = b""
133 ret = b""
134 if self.offset < self.size:
134 if self.offset < self.size:
135 s = self.fp.read(count)
135 s = self.fp.read(count)
136 ret = s
136 ret = s
137 self.offset += len(s)
137 self.offset += len(s)
138 if count > 0:
138 if count > 0:
139 count -= len(s)
139 count -= len(s)
140 if count != 0:
140 if count != 0:
141 doff = self.offset - self.size
141 doff = self.offset - self.size
142 self.data.insert(0, b"".join(self.data))
142 self.data.insert(0, b"".join(self.data))
143 del self.data[1:]
143 del self.data[1:]
144 s = self.data[0][doff : doff + count]
144 s = self.data[0][doff : doff + count]
145 self.offset += len(s)
145 self.offset += len(s)
146 ret += s
146 ret += s
147 return ret
147 return ret
148
148
149 def write(self, s):
149 def write(self, s):
150 self.data.append(bytes(s))
150 self.data.append(bytes(s))
151 self.offset += len(s)
151 self.offset += len(s)
152 self._end += len(s)
152 self._end += len(s)
153
153
154 def __enter__(self):
154 def __enter__(self):
155 self.fp.__enter__()
155 self.fp.__enter__()
156 return self
156 return self
157
157
158 def __exit__(self, *args):
158 def __exit__(self, *args):
159 return self.fp.__exit__(*args)
159 return self.fp.__exit__(*args)
160
160
161
161
162 class _divertopener(object):
162 class _divertopener(object):
163 def __init__(self, opener, target):
163 def __init__(self, opener, target):
164 self._opener = opener
164 self._opener = opener
165 self._target = target
165 self._target = target
166
166
167 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
167 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
168 if name != self._target:
168 if name != self._target:
169 return self._opener(name, mode, **kwargs)
169 return self._opener(name, mode, **kwargs)
170 return self._opener(name + b".a", mode, **kwargs)
170 return self._opener(name + b".a", mode, **kwargs)
171
171
172 def __getattr__(self, attr):
172 def __getattr__(self, attr):
173 return getattr(self._opener, attr)
173 return getattr(self._opener, attr)
174
174
175
175
176 def _delayopener(opener, target, buf):
176 def _delayopener(opener, target, buf):
177 """build an opener that stores chunks in 'buf' instead of 'target'"""
177 """build an opener that stores chunks in 'buf' instead of 'target'"""
178
178
179 def _delay(name, mode=b'r', checkambig=False, **kwargs):
179 def _delay(name, mode=b'r', checkambig=False, **kwargs):
180 if name != target:
180 if name != target:
181 return opener(name, mode, **kwargs)
181 return opener(name, mode, **kwargs)
182 assert not kwargs
182 assert not kwargs
183 return appender(opener, name, mode, buf)
183 return appender(opener, name, mode, buf)
184
184
185 return _delay
185 return _delay
186
186
187
187
188 @attr.s
188 @attr.s
189 class _changelogrevision(object):
189 class _changelogrevision(object):
190 # Extensions might modify _defaultextra, so let the constructor below pass
190 # Extensions might modify _defaultextra, so let the constructor below pass
191 # it in
191 # it in
192 extra = attr.ib()
192 extra = attr.ib()
193 manifest = attr.ib(default=nullid)
193 manifest = attr.ib(default=nullid)
194 user = attr.ib(default=b'')
194 user = attr.ib(default=b'')
195 date = attr.ib(default=(0, 0))
195 date = attr.ib(default=(0, 0))
196 files = attr.ib(default=attr.Factory(list))
196 files = attr.ib(default=attr.Factory(list))
197 filesadded = attr.ib(default=None)
197 filesadded = attr.ib(default=None)
198 filesremoved = attr.ib(default=None)
198 filesremoved = attr.ib(default=None)
199 p1copies = attr.ib(default=None)
199 p1copies = attr.ib(default=None)
200 p2copies = attr.ib(default=None)
200 p2copies = attr.ib(default=None)
201 description = attr.ib(default=b'')
201 description = attr.ib(default=b'')
202
202
203
203
204 class changelogrevision(object):
204 class changelogrevision(object):
205 """Holds results of a parsed changelog revision.
205 """Holds results of a parsed changelog revision.
206
206
207 Changelog revisions consist of multiple pieces of data, including
207 Changelog revisions consist of multiple pieces of data, including
208 the manifest node, user, and date. This object exposes a view into
208 the manifest node, user, and date. This object exposes a view into
209 the parsed object.
209 the parsed object.
210 """
210 """
211
211
212 __slots__ = (
212 __slots__ = (
213 '_offsets',
213 '_offsets',
214 '_text',
214 '_text',
215 '_sidedata',
215 '_sidedata',
216 '_cpsd',
216 '_cpsd',
217 '_changes',
217 '_changes',
218 )
218 )
219
219
220 def __new__(cls, text, sidedata, cpsd):
220 def __new__(cls, text, sidedata, cpsd):
221 if not text:
221 if not text:
222 return _changelogrevision(extra=_defaultextra)
222 return _changelogrevision(extra=_defaultextra)
223
223
224 self = super(changelogrevision, cls).__new__(cls)
224 self = super(changelogrevision, cls).__new__(cls)
225 # We could return here and implement the following as an __init__.
225 # We could return here and implement the following as an __init__.
226 # But doing it here is equivalent and saves an extra function call.
226 # But doing it here is equivalent and saves an extra function call.
227
227
228 # format used:
228 # format used:
229 # nodeid\n : manifest node in ascii
229 # nodeid\n : manifest node in ascii
230 # user\n : user, no \n or \r allowed
230 # user\n : user, no \n or \r allowed
231 # time tz extra\n : date (time is int or float, timezone is int)
231 # time tz extra\n : date (time is int or float, timezone is int)
232 # : extra is metadata, encoded and separated by '\0'
232 # : extra is metadata, encoded and separated by '\0'
233 # : older versions ignore it
233 # : older versions ignore it
234 # files\n\n : files modified by the cset, no \n or \r allowed
234 # files\n\n : files modified by the cset, no \n or \r allowed
235 # (.*) : comment (free text, ideally utf-8)
235 # (.*) : comment (free text, ideally utf-8)
236 #
236 #
237 # changelog v0 doesn't use extra
237 # changelog v0 doesn't use extra
238
238
239 nl1 = text.index(b'\n')
239 nl1 = text.index(b'\n')
240 nl2 = text.index(b'\n', nl1 + 1)
240 nl2 = text.index(b'\n', nl1 + 1)
241 nl3 = text.index(b'\n', nl2 + 1)
241 nl3 = text.index(b'\n', nl2 + 1)
242
242
243 # The list of files may be empty. Which means nl3 is the first of the
243 # The list of files may be empty. Which means nl3 is the first of the
244 # double newline that precedes the description.
244 # double newline that precedes the description.
245 if text[nl3 + 1 : nl3 + 2] == b'\n':
245 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 doublenl = nl3
246 doublenl = nl3
247 else:
247 else:
248 doublenl = text.index(b'\n\n', nl3 + 1)
248 doublenl = text.index(b'\n\n', nl3 + 1)
249
249
250 self._offsets = (nl1, nl2, nl3, doublenl)
250 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._text = text
251 self._text = text
252 self._sidedata = sidedata
252 self._sidedata = sidedata
253 self._cpsd = cpsd
253 self._cpsd = cpsd
254 self._changes = None
254 self._changes = None
255
255
256 return self
256 return self
257
257
258 @property
258 @property
259 def manifest(self):
259 def manifest(self):
260 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
261
261
262 @property
262 @property
263 def user(self):
263 def user(self):
264 off = self._offsets
264 off = self._offsets
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266
266
267 @property
267 @property
268 def _rawdate(self):
268 def _rawdate(self):
269 off = self._offsets
269 off = self._offsets
270 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
271 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
272
272
273 @property
273 @property
274 def _rawextra(self):
274 def _rawextra(self):
275 off = self._offsets
275 off = self._offsets
276 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
277 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
278 if len(fields) != 3:
278 if len(fields) != 3:
279 return None
279 return None
280
280
281 return fields[2]
281 return fields[2]
282
282
283 @property
283 @property
284 def date(self):
284 def date(self):
285 raw = self._rawdate
285 raw = self._rawdate
286 time = float(raw[0])
286 time = float(raw[0])
287 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
288 try:
288 try:
289 timezone = int(raw[1])
289 timezone = int(raw[1])
290 except ValueError:
290 except ValueError:
291 timezone = 0
291 timezone = 0
292
292
293 return time, timezone
293 return time, timezone
294
294
295 @property
295 @property
296 def extra(self):
296 def extra(self):
297 raw = self._rawextra
297 raw = self._rawextra
298 if raw is None:
298 if raw is None:
299 return _defaultextra
299 return _defaultextra
300
300
301 return decodeextra(raw)
301 return decodeextra(raw)
302
302
303 @property
303 @property
304 def changes(self):
304 def changes(self):
305 if self._changes is not None:
305 if self._changes is not None:
306 return self._changes
306 return self._changes
307 if self._cpsd:
307 if self._cpsd:
308 changes = metadata.decode_files_sidedata(self, self._sidedata)
308 changes = metadata.decode_files_sidedata(self._sidedata)
309 else:
309 else:
310 changes = metadata.ChangingFiles(
310 changes = metadata.ChangingFiles(
311 touched=self.files or (),
311 touched=self.files or (),
312 added=self.filesadded or (),
312 added=self.filesadded or (),
313 removed=self.filesremoved or (),
313 removed=self.filesremoved or (),
314 p1_copies=self.p1copies or {},
314 p1_copies=self.p1copies or {},
315 p2_copies=self.p2copies or {},
315 p2_copies=self.p2copies or {},
316 )
316 )
317 self._changes = changes
317 self._changes = changes
318 return changes
318 return changes
319
319
320 @property
320 @property
321 def files(self):
321 def files(self):
322 off = self._offsets
322 off = self._offsets
323 if off[2] == off[3]:
323 if off[2] == off[3]:
324 return []
324 return []
325
325
326 return self._text[off[2] + 1 : off[3]].split(b'\n')
326 return self._text[off[2] + 1 : off[3]].split(b'\n')
327
327
328 @property
328 @property
329 def filesadded(self):
329 def filesadded(self):
330 if self._cpsd:
330 if self._cpsd:
331 return self.changes.added
331 return self.changes.added
332 else:
332 else:
333 rawindices = self.extra.get(b'filesadded')
333 rawindices = self.extra.get(b'filesadded')
334 if rawindices is None:
334 if rawindices is None:
335 return None
335 return None
336 return metadata.decodefileindices(self.files, rawindices)
336 return metadata.decodefileindices(self.files, rawindices)
337
337
338 @property
338 @property
339 def filesremoved(self):
339 def filesremoved(self):
340 if self._cpsd:
340 if self._cpsd:
341 return self.changes.removed
341 return self.changes.removed
342 else:
342 else:
343 rawindices = self.extra.get(b'filesremoved')
343 rawindices = self.extra.get(b'filesremoved')
344 if rawindices is None:
344 if rawindices is None:
345 return None
345 return None
346 return metadata.decodefileindices(self.files, rawindices)
346 return metadata.decodefileindices(self.files, rawindices)
347
347
348 @property
348 @property
349 def p1copies(self):
349 def p1copies(self):
350 if self._cpsd:
350 if self._cpsd:
351 return self.changes.copied_from_p1
351 return self.changes.copied_from_p1
352 else:
352 else:
353 rawcopies = self.extra.get(b'p1copies')
353 rawcopies = self.extra.get(b'p1copies')
354 if rawcopies is None:
354 if rawcopies is None:
355 return None
355 return None
356 return metadata.decodecopies(self.files, rawcopies)
356 return metadata.decodecopies(self.files, rawcopies)
357
357
358 @property
358 @property
359 def p2copies(self):
359 def p2copies(self):
360 if self._cpsd:
360 if self._cpsd:
361 return self.changes.copied_from_p2
361 return self.changes.copied_from_p2
362 else:
362 else:
363 rawcopies = self.extra.get(b'p2copies')
363 rawcopies = self.extra.get(b'p2copies')
364 if rawcopies is None:
364 if rawcopies is None:
365 return None
365 return None
366 return metadata.decodecopies(self.files, rawcopies)
366 return metadata.decodecopies(self.files, rawcopies)
367
367
368 @property
368 @property
369 def description(self):
369 def description(self):
370 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
370 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
371
371
372
372
373 class changelog(revlog.revlog):
373 class changelog(revlog.revlog):
374 def __init__(self, opener, trypending=False):
374 def __init__(self, opener, trypending=False):
375 """Load a changelog revlog using an opener.
375 """Load a changelog revlog using an opener.
376
376
377 If ``trypending`` is true, we attempt to load the index from a
377 If ``trypending`` is true, we attempt to load the index from a
378 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
378 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
379 The ``00changelog.i.a`` file contains index (and possibly inline
379 The ``00changelog.i.a`` file contains index (and possibly inline
380 revision) data for a transaction that hasn't been finalized yet.
380 revision) data for a transaction that hasn't been finalized yet.
381 It exists in a separate file to facilitate readers (such as
381 It exists in a separate file to facilitate readers (such as
382 hooks processes) accessing data before a transaction is finalized.
382 hooks processes) accessing data before a transaction is finalized.
383 """
383 """
384 if trypending and opener.exists(b'00changelog.i.a'):
384 if trypending and opener.exists(b'00changelog.i.a'):
385 indexfile = b'00changelog.i.a'
385 indexfile = b'00changelog.i.a'
386 else:
386 else:
387 indexfile = b'00changelog.i'
387 indexfile = b'00changelog.i'
388
388
389 datafile = b'00changelog.d'
389 datafile = b'00changelog.d'
390 revlog.revlog.__init__(
390 revlog.revlog.__init__(
391 self,
391 self,
392 opener,
392 opener,
393 indexfile,
393 indexfile,
394 datafile=datafile,
394 datafile=datafile,
395 checkambig=True,
395 checkambig=True,
396 mmaplargeindex=True,
396 mmaplargeindex=True,
397 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
397 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
398 )
398 )
399
399
400 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
400 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
401 # changelogs don't benefit from generaldelta.
401 # changelogs don't benefit from generaldelta.
402
402
403 self.version &= ~revlog.FLAG_GENERALDELTA
403 self.version &= ~revlog.FLAG_GENERALDELTA
404 self._generaldelta = False
404 self._generaldelta = False
405
405
406 # Delta chains for changelogs tend to be very small because entries
406 # Delta chains for changelogs tend to be very small because entries
407 # tend to be small and don't delta well with each. So disable delta
407 # tend to be small and don't delta well with each. So disable delta
408 # chains.
408 # chains.
409 self._storedeltachains = False
409 self._storedeltachains = False
410
410
411 self._realopener = opener
411 self._realopener = opener
412 self._delayed = False
412 self._delayed = False
413 self._delaybuf = None
413 self._delaybuf = None
414 self._divert = False
414 self._divert = False
415 self._filteredrevs = frozenset()
415 self._filteredrevs = frozenset()
416 self._filteredrevs_hashcache = {}
416 self._filteredrevs_hashcache = {}
417 self._copiesstorage = opener.options.get(b'copies-storage')
417 self._copiesstorage = opener.options.get(b'copies-storage')
418
418
419 @property
419 @property
420 def filteredrevs(self):
420 def filteredrevs(self):
421 return self._filteredrevs
421 return self._filteredrevs
422
422
423 @filteredrevs.setter
423 @filteredrevs.setter
424 def filteredrevs(self, val):
424 def filteredrevs(self, val):
425 # Ensure all updates go through this function
425 # Ensure all updates go through this function
426 assert isinstance(val, frozenset)
426 assert isinstance(val, frozenset)
427 self._filteredrevs = val
427 self._filteredrevs = val
428 self._filteredrevs_hashcache = {}
428 self._filteredrevs_hashcache = {}
429
429
430 def delayupdate(self, tr):
430 def delayupdate(self, tr):
431 """delay visibility of index updates to other readers"""
431 """delay visibility of index updates to other readers"""
432
432
433 if not self._delayed:
433 if not self._delayed:
434 if len(self) == 0:
434 if len(self) == 0:
435 self._divert = True
435 self._divert = True
436 if self._realopener.exists(self.indexfile + b'.a'):
436 if self._realopener.exists(self.indexfile + b'.a'):
437 self._realopener.unlink(self.indexfile + b'.a')
437 self._realopener.unlink(self.indexfile + b'.a')
438 self.opener = _divertopener(self._realopener, self.indexfile)
438 self.opener = _divertopener(self._realopener, self.indexfile)
439 else:
439 else:
440 self._delaybuf = []
440 self._delaybuf = []
441 self.opener = _delayopener(
441 self.opener = _delayopener(
442 self._realopener, self.indexfile, self._delaybuf
442 self._realopener, self.indexfile, self._delaybuf
443 )
443 )
444 self._delayed = True
444 self._delayed = True
445 tr.addpending(b'cl-%i' % id(self), self._writepending)
445 tr.addpending(b'cl-%i' % id(self), self._writepending)
446 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
446 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
447
447
448 def _finalize(self, tr):
448 def _finalize(self, tr):
449 """finalize index updates"""
449 """finalize index updates"""
450 self._delayed = False
450 self._delayed = False
451 self.opener = self._realopener
451 self.opener = self._realopener
452 # move redirected index data back into place
452 # move redirected index data back into place
453 if self._divert:
453 if self._divert:
454 assert not self._delaybuf
454 assert not self._delaybuf
455 tmpname = self.indexfile + b".a"
455 tmpname = self.indexfile + b".a"
456 nfile = self.opener.open(tmpname)
456 nfile = self.opener.open(tmpname)
457 nfile.close()
457 nfile.close()
458 self.opener.rename(tmpname, self.indexfile, checkambig=True)
458 self.opener.rename(tmpname, self.indexfile, checkambig=True)
459 elif self._delaybuf:
459 elif self._delaybuf:
460 fp = self.opener(self.indexfile, b'a', checkambig=True)
460 fp = self.opener(self.indexfile, b'a', checkambig=True)
461 fp.write(b"".join(self._delaybuf))
461 fp.write(b"".join(self._delaybuf))
462 fp.close()
462 fp.close()
463 self._delaybuf = None
463 self._delaybuf = None
464 self._divert = False
464 self._divert = False
465 # split when we're done
465 # split when we're done
466 self._enforceinlinesize(tr)
466 self._enforceinlinesize(tr)
467
467
468 def _writepending(self, tr):
468 def _writepending(self, tr):
469 """create a file containing the unfinalized state for
469 """create a file containing the unfinalized state for
470 pretxnchangegroup"""
470 pretxnchangegroup"""
471 if self._delaybuf:
471 if self._delaybuf:
472 # make a temporary copy of the index
472 # make a temporary copy of the index
473 fp1 = self._realopener(self.indexfile)
473 fp1 = self._realopener(self.indexfile)
474 pendingfilename = self.indexfile + b".a"
474 pendingfilename = self.indexfile + b".a"
475 # register as a temp file to ensure cleanup on failure
475 # register as a temp file to ensure cleanup on failure
476 tr.registertmp(pendingfilename)
476 tr.registertmp(pendingfilename)
477 # write existing data
477 # write existing data
478 fp2 = self._realopener(pendingfilename, b"w")
478 fp2 = self._realopener(pendingfilename, b"w")
479 fp2.write(fp1.read())
479 fp2.write(fp1.read())
480 # add pending data
480 # add pending data
481 fp2.write(b"".join(self._delaybuf))
481 fp2.write(b"".join(self._delaybuf))
482 fp2.close()
482 fp2.close()
483 # switch modes so finalize can simply rename
483 # switch modes so finalize can simply rename
484 self._delaybuf = None
484 self._delaybuf = None
485 self._divert = True
485 self._divert = True
486 self.opener = _divertopener(self._realopener, self.indexfile)
486 self.opener = _divertopener(self._realopener, self.indexfile)
487
487
488 if self._divert:
488 if self._divert:
489 return True
489 return True
490
490
491 return False
491 return False
492
492
493 def _enforceinlinesize(self, tr, fp=None):
493 def _enforceinlinesize(self, tr, fp=None):
494 if not self._delayed:
494 if not self._delayed:
495 revlog.revlog._enforceinlinesize(self, tr, fp)
495 revlog.revlog._enforceinlinesize(self, tr, fp)
496
496
497 def read(self, node):
497 def read(self, node):
498 """Obtain data from a parsed changelog revision.
498 """Obtain data from a parsed changelog revision.
499
499
500 Returns a 6-tuple of:
500 Returns a 6-tuple of:
501
501
502 - manifest node in binary
502 - manifest node in binary
503 - author/user as a localstr
503 - author/user as a localstr
504 - date as a 2-tuple of (time, timezone)
504 - date as a 2-tuple of (time, timezone)
505 - list of files
505 - list of files
506 - commit message as a localstr
506 - commit message as a localstr
507 - dict of extra metadata
507 - dict of extra metadata
508
508
509 Unless you need to access all fields, consider calling
509 Unless you need to access all fields, consider calling
510 ``changelogrevision`` instead, as it is faster for partial object
510 ``changelogrevision`` instead, as it is faster for partial object
511 access.
511 access.
512 """
512 """
513 d, s = self._revisiondata(node)
513 d, s = self._revisiondata(node)
514 c = changelogrevision(
514 c = changelogrevision(
515 d, s, self._copiesstorage == b'changeset-sidedata'
515 d, s, self._copiesstorage == b'changeset-sidedata'
516 )
516 )
517 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
517 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
518
518
519 def changelogrevision(self, nodeorrev):
519 def changelogrevision(self, nodeorrev):
520 """Obtain a ``changelogrevision`` for a node or revision."""
520 """Obtain a ``changelogrevision`` for a node or revision."""
521 text, sidedata = self._revisiondata(nodeorrev)
521 text, sidedata = self._revisiondata(nodeorrev)
522 return changelogrevision(
522 return changelogrevision(
523 text, sidedata, self._copiesstorage == b'changeset-sidedata'
523 text, sidedata, self._copiesstorage == b'changeset-sidedata'
524 )
524 )
525
525
526 def readfiles(self, node):
526 def readfiles(self, node):
527 """
527 """
528 short version of read that only returns the files modified by the cset
528 short version of read that only returns the files modified by the cset
529 """
529 """
530 text = self.revision(node)
530 text = self.revision(node)
531 if not text:
531 if not text:
532 return []
532 return []
533 last = text.index(b"\n\n")
533 last = text.index(b"\n\n")
534 l = text[:last].split(b'\n')
534 l = text[:last].split(b'\n')
535 return l[3:]
535 return l[3:]
536
536
537 def add(
537 def add(
538 self,
538 self,
539 manifest,
539 manifest,
540 files,
540 files,
541 desc,
541 desc,
542 transaction,
542 transaction,
543 p1,
543 p1,
544 p2,
544 p2,
545 user,
545 user,
546 date=None,
546 date=None,
547 extra=None,
547 extra=None,
548 ):
548 ):
549 # Convert to UTF-8 encoded bytestrings as the very first
549 # Convert to UTF-8 encoded bytestrings as the very first
550 # thing: calling any method on a localstr object will turn it
550 # thing: calling any method on a localstr object will turn it
551 # into a str object and the cached UTF-8 string is thus lost.
551 # into a str object and the cached UTF-8 string is thus lost.
552 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
552 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
553
553
554 user = user.strip()
554 user = user.strip()
555 # An empty username or a username with a "\n" will make the
555 # An empty username or a username with a "\n" will make the
556 # revision text contain two "\n\n" sequences -> corrupt
556 # revision text contain two "\n\n" sequences -> corrupt
557 # repository since read cannot unpack the revision.
557 # repository since read cannot unpack the revision.
558 if not user:
558 if not user:
559 raise error.StorageError(_(b"empty username"))
559 raise error.StorageError(_(b"empty username"))
560 if b"\n" in user:
560 if b"\n" in user:
561 raise error.StorageError(
561 raise error.StorageError(
562 _(b"username %r contains a newline") % pycompat.bytestr(user)
562 _(b"username %r contains a newline") % pycompat.bytestr(user)
563 )
563 )
564
564
565 desc = stripdesc(desc)
565 desc = stripdesc(desc)
566
566
567 if date:
567 if date:
568 parseddate = b"%d %d" % dateutil.parsedate(date)
568 parseddate = b"%d %d" % dateutil.parsedate(date)
569 else:
569 else:
570 parseddate = b"%d %d" % dateutil.makedate()
570 parseddate = b"%d %d" % dateutil.makedate()
571 if extra:
571 if extra:
572 branch = extra.get(b"branch")
572 branch = extra.get(b"branch")
573 if branch in (b"default", b""):
573 if branch in (b"default", b""):
574 del extra[b"branch"]
574 del extra[b"branch"]
575 elif branch in (b".", b"null", b"tip"):
575 elif branch in (b".", b"null", b"tip"):
576 raise error.StorageError(
576 raise error.StorageError(
577 _(b'the name \'%s\' is reserved') % branch
577 _(b'the name \'%s\' is reserved') % branch
578 )
578 )
579 sortedfiles = sorted(files.touched)
579 sortedfiles = sorted(files.touched)
580 sidedata = None
580 sidedata = None
581 if self._copiesstorage == b'changeset-sidedata':
581 if self._copiesstorage == b'changeset-sidedata':
582 sidedata = metadata.encode_files_sidedata(files)
582 sidedata = metadata.encode_files_sidedata(files)
583
583
584 if extra:
584 if extra:
585 extra = encodeextra(extra)
585 extra = encodeextra(extra)
586 parseddate = b"%s %s" % (parseddate, extra)
586 parseddate = b"%s %s" % (parseddate, extra)
587 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
587 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
588 text = b"\n".join(l)
588 text = b"\n".join(l)
589 return self.addrevision(
589 return self.addrevision(
590 text, transaction, len(self), p1, p2, sidedata=sidedata
590 text, transaction, len(self), p1, p2, sidedata=sidedata
591 )
591 )
592
592
593 def branchinfo(self, rev):
593 def branchinfo(self, rev):
594 """return the branch name and open/close state of a revision
594 """return the branch name and open/close state of a revision
595
595
596 This function exists because creating a changectx object
596 This function exists because creating a changectx object
597 just to access this is costly."""
597 just to access this is costly."""
598 extra = self.read(rev)[5]
598 extra = self.read(rev)[5]
599 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
599 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
600
600
601 def _nodeduplicatecallback(self, transaction, node):
601 def _nodeduplicatecallback(self, transaction, node):
602 # keep track of revisions that got "re-added", eg: unbunde of know rev.
602 # keep track of revisions that got "re-added", eg: unbunde of know rev.
603 #
603 #
604 # We track them in a list to preserve their order from the source bundle
604 # We track them in a list to preserve their order from the source bundle
605 duplicates = transaction.changes.setdefault(b'revduplicates', [])
605 duplicates = transaction.changes.setdefault(b'revduplicates', [])
606 duplicates.append(self.rev(node))
606 duplicates.append(self.rev(node))
@@ -1,620 +1,620 b''
1 # metadata.py -- code related to various metadata computation and access.
1 # metadata.py -- code related to various metadata computation and access.
2 #
2 #
3 # Copyright 2019 Google, Inc <martinvonz@google.com>
3 # Copyright 2019 Google, Inc <martinvonz@google.com>
4 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import multiprocessing
10 import multiprocessing
11 import struct
11 import struct
12
12
13 from . import (
13 from . import (
14 error,
14 error,
15 node,
15 node,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 )
18 )
19
19
20 from .revlogutils import (
20 from .revlogutils import (
21 flagutil as sidedataflag,
21 flagutil as sidedataflag,
22 sidedata as sidedatamod,
22 sidedata as sidedatamod,
23 )
23 )
24
24
25
25
26 class ChangingFiles(object):
26 class ChangingFiles(object):
27 """A class recording the changes made to files by a changeset
27 """A class recording the changes made to files by a changeset
28
28
29 Actions performed on files are gathered into 3 sets:
29 Actions performed on files are gathered into 3 sets:
30
30
31 - added: files actively added in the changeset.
31 - added: files actively added in the changeset.
32 - merged: files whose history got merged
32 - merged: files whose history got merged
33 - removed: files removed in the revision
33 - removed: files removed in the revision
34 - touched: files affected by the merge
34 - touched: files affected by the merge
35
35
36 and copies information is held by 2 mappings
36 and copies information is held by 2 mappings
37
37
38 - copied_from_p1: {"<new-name>": "<source-name-in-p1>"} mapping for copies
38 - copied_from_p1: {"<new-name>": "<source-name-in-p1>"} mapping for copies
39 - copied_from_p2: {"<new-name>": "<source-name-in-p2>"} mapping for copies
39 - copied_from_p2: {"<new-name>": "<source-name-in-p2>"} mapping for copies
40
40
41 See their inline help for details.
41 See their inline help for details.
42 """
42 """
43
43
44 def __init__(
44 def __init__(
45 self,
45 self,
46 touched=None,
46 touched=None,
47 added=None,
47 added=None,
48 removed=None,
48 removed=None,
49 merged=None,
49 merged=None,
50 p1_copies=None,
50 p1_copies=None,
51 p2_copies=None,
51 p2_copies=None,
52 ):
52 ):
53 self._added = set(() if added is None else added)
53 self._added = set(() if added is None else added)
54 self._merged = set(() if merged is None else merged)
54 self._merged = set(() if merged is None else merged)
55 self._removed = set(() if removed is None else removed)
55 self._removed = set(() if removed is None else removed)
56 self._touched = set(() if touched is None else touched)
56 self._touched = set(() if touched is None else touched)
57 self._touched.update(self._added)
57 self._touched.update(self._added)
58 self._touched.update(self._merged)
58 self._touched.update(self._merged)
59 self._touched.update(self._removed)
59 self._touched.update(self._removed)
60 self._p1_copies = dict(() if p1_copies is None else p1_copies)
60 self._p1_copies = dict(() if p1_copies is None else p1_copies)
61 self._p2_copies = dict(() if p2_copies is None else p2_copies)
61 self._p2_copies = dict(() if p2_copies is None else p2_copies)
62
62
63 def __eq__(self, other):
63 def __eq__(self, other):
64 return (
64 return (
65 self.added == other.added
65 self.added == other.added
66 and self.merged == other.merged
66 and self.merged == other.merged
67 and self.removed == other.removed
67 and self.removed == other.removed
68 and self.touched == other.touched
68 and self.touched == other.touched
69 and self.copied_from_p1 == other.copied_from_p1
69 and self.copied_from_p1 == other.copied_from_p1
70 and self.copied_from_p2 == other.copied_from_p2
70 and self.copied_from_p2 == other.copied_from_p2
71 )
71 )
72
72
73 @util.propertycache
73 @util.propertycache
74 def added(self):
74 def added(self):
75 """files actively added in the changeset
75 """files actively added in the changeset
76
76
77 Any file present in that revision that was absent in all the changeset's
77 Any file present in that revision that was absent in all the changeset's
78 parents.
78 parents.
79
79
80 In case of merge, this means a file absent in one of the parents but
80 In case of merge, this means a file absent in one of the parents but
81 existing in the other will *not* be contained in this set. (They were
81 existing in the other will *not* be contained in this set. (They were
82 added by an ancestor)
82 added by an ancestor)
83 """
83 """
84 return frozenset(self._added)
84 return frozenset(self._added)
85
85
86 def mark_added(self, filename):
86 def mark_added(self, filename):
87 if 'added' in vars(self):
87 if 'added' in vars(self):
88 del self.added
88 del self.added
89 self._added.add(filename)
89 self._added.add(filename)
90 self.mark_touched(filename)
90 self.mark_touched(filename)
91
91
92 def update_added(self, filenames):
92 def update_added(self, filenames):
93 for f in filenames:
93 for f in filenames:
94 self.mark_added(f)
94 self.mark_added(f)
95
95
96 @util.propertycache
96 @util.propertycache
97 def merged(self):
97 def merged(self):
98 """files actively merged during a merge
98 """files actively merged during a merge
99
99
100 Any modified files which had modification on both size that needed merging.
100 Any modified files which had modification on both size that needed merging.
101
101
102 In this case a new filenode was created and it has two parents.
102 In this case a new filenode was created and it has two parents.
103 """
103 """
104 return frozenset(self._merged)
104 return frozenset(self._merged)
105
105
106 def mark_merged(self, filename):
106 def mark_merged(self, filename):
107 if 'merged' in vars(self):
107 if 'merged' in vars(self):
108 del self.merged
108 del self.merged
109 self._merged.add(filename)
109 self._merged.add(filename)
110 self.mark_touched(filename)
110 self.mark_touched(filename)
111
111
112 def update_merged(self, filenames):
112 def update_merged(self, filenames):
113 for f in filenames:
113 for f in filenames:
114 self.mark_merged(f)
114 self.mark_merged(f)
115
115
116 @util.propertycache
116 @util.propertycache
117 def removed(self):
117 def removed(self):
118 """files actively removed by the changeset
118 """files actively removed by the changeset
119
119
120 In case of merge this will only contain the set of files removing "new"
120 In case of merge this will only contain the set of files removing "new"
121 content. For any file absent in the current changeset:
121 content. For any file absent in the current changeset:
122
122
123 a) If the file exists in both parents, it is clearly "actively" removed
123 a) If the file exists in both parents, it is clearly "actively" removed
124 by this changeset.
124 by this changeset.
125
125
126 b) If a file exists in only one parent and in none of the common
126 b) If a file exists in only one parent and in none of the common
127 ancestors, then the file was newly added in one of the merged branches
127 ancestors, then the file was newly added in one of the merged branches
128 and then got "actively" removed.
128 and then got "actively" removed.
129
129
130 c) If a file exists in only one parent and at least one of the common
130 c) If a file exists in only one parent and at least one of the common
131 ancestors using the same filenode, then the file was unchanged on one
131 ancestors using the same filenode, then the file was unchanged on one
132 side and deleted on the other side. The merge "passively" propagated
132 side and deleted on the other side. The merge "passively" propagated
133 that deletion, but didn't "actively" remove the file. In this case the
133 that deletion, but didn't "actively" remove the file. In this case the
134 file is *not* included in the `removed` set.
134 file is *not* included in the `removed` set.
135
135
136 d) If a file exists in only one parent and at least one of the common
136 d) If a file exists in only one parent and at least one of the common
137 ancestors using a different filenode, then the file was changed on one
137 ancestors using a different filenode, then the file was changed on one
138 side and removed on the other side. The merge process "actively"
138 side and removed on the other side. The merge process "actively"
139 decided to drop the new change and delete the file. Unlike in the
139 decided to drop the new change and delete the file. Unlike in the
140 previous case, (c), the file included in the `removed` set.
140 previous case, (c), the file included in the `removed` set.
141
141
142 Summary table for merge:
142 Summary table for merge:
143
143
144 case | exists in parents | exists in gca || removed
144 case | exists in parents | exists in gca || removed
145 (a) | both | * || yes
145 (a) | both | * || yes
146 (b) | one | none || yes
146 (b) | one | none || yes
147 (c) | one | same filenode || no
147 (c) | one | same filenode || no
148 (d) | one | new filenode || yes
148 (d) | one | new filenode || yes
149 """
149 """
150 return frozenset(self._removed)
150 return frozenset(self._removed)
151
151
152 def mark_removed(self, filename):
152 def mark_removed(self, filename):
153 if 'removed' in vars(self):
153 if 'removed' in vars(self):
154 del self.removed
154 del self.removed
155 self._removed.add(filename)
155 self._removed.add(filename)
156 self.mark_touched(filename)
156 self.mark_touched(filename)
157
157
158 def update_removed(self, filenames):
158 def update_removed(self, filenames):
159 for f in filenames:
159 for f in filenames:
160 self.mark_removed(f)
160 self.mark_removed(f)
161
161
162 @util.propertycache
162 @util.propertycache
163 def touched(self):
163 def touched(self):
164 """files either actively modified, added or removed"""
164 """files either actively modified, added or removed"""
165 return frozenset(self._touched)
165 return frozenset(self._touched)
166
166
167 def mark_touched(self, filename):
167 def mark_touched(self, filename):
168 if 'touched' in vars(self):
168 if 'touched' in vars(self):
169 del self.touched
169 del self.touched
170 self._touched.add(filename)
170 self._touched.add(filename)
171
171
172 def update_touched(self, filenames):
172 def update_touched(self, filenames):
173 for f in filenames:
173 for f in filenames:
174 self.mark_touched(f)
174 self.mark_touched(f)
175
175
176 @util.propertycache
176 @util.propertycache
177 def copied_from_p1(self):
177 def copied_from_p1(self):
178 return self._p1_copies.copy()
178 return self._p1_copies.copy()
179
179
180 def mark_copied_from_p1(self, source, dest):
180 def mark_copied_from_p1(self, source, dest):
181 if 'copied_from_p1' in vars(self):
181 if 'copied_from_p1' in vars(self):
182 del self.copied_from_p1
182 del self.copied_from_p1
183 self._p1_copies[dest] = source
183 self._p1_copies[dest] = source
184
184
185 def update_copies_from_p1(self, copies):
185 def update_copies_from_p1(self, copies):
186 for dest, source in copies.items():
186 for dest, source in copies.items():
187 self.mark_copied_from_p1(source, dest)
187 self.mark_copied_from_p1(source, dest)
188
188
189 @util.propertycache
189 @util.propertycache
190 def copied_from_p2(self):
190 def copied_from_p2(self):
191 return self._p2_copies.copy()
191 return self._p2_copies.copy()
192
192
193 def mark_copied_from_p2(self, source, dest):
193 def mark_copied_from_p2(self, source, dest):
194 if 'copied_from_p2' in vars(self):
194 if 'copied_from_p2' in vars(self):
195 del self.copied_from_p2
195 del self.copied_from_p2
196 self._p2_copies[dest] = source
196 self._p2_copies[dest] = source
197
197
198 def update_copies_from_p2(self, copies):
198 def update_copies_from_p2(self, copies):
199 for dest, source in copies.items():
199 for dest, source in copies.items():
200 self.mark_copied_from_p2(source, dest)
200 self.mark_copied_from_p2(source, dest)
201
201
202
202
203 def computechangesetfilesadded(ctx):
203 def computechangesetfilesadded(ctx):
204 """return the list of files added in a changeset
204 """return the list of files added in a changeset
205 """
205 """
206 added = []
206 added = []
207 for f in ctx.files():
207 for f in ctx.files():
208 if not any(f in p for p in ctx.parents()):
208 if not any(f in p for p in ctx.parents()):
209 added.append(f)
209 added.append(f)
210 return added
210 return added
211
211
212
212
213 def get_removal_filter(ctx, x=None):
213 def get_removal_filter(ctx, x=None):
214 """return a function to detect files "wrongly" detected as `removed`
214 """return a function to detect files "wrongly" detected as `removed`
215
215
216 When a file is removed relative to p1 in a merge, this
216 When a file is removed relative to p1 in a merge, this
217 function determines whether the absence is due to a
217 function determines whether the absence is due to a
218 deletion from a parent, or whether the merge commit
218 deletion from a parent, or whether the merge commit
219 itself deletes the file. We decide this by doing a
219 itself deletes the file. We decide this by doing a
220 simplified three way merge of the manifest entry for
220 simplified three way merge of the manifest entry for
221 the file. There are two ways we decide the merge
221 the file. There are two ways we decide the merge
222 itself didn't delete a file:
222 itself didn't delete a file:
223 - neither parent (nor the merge) contain the file
223 - neither parent (nor the merge) contain the file
224 - exactly one parent contains the file, and that
224 - exactly one parent contains the file, and that
225 parent has the same filelog entry as the merge
225 parent has the same filelog entry as the merge
226 ancestor (or all of them if there two). In other
226 ancestor (or all of them if there two). In other
227 words, that parent left the file unchanged while the
227 words, that parent left the file unchanged while the
228 other one deleted it.
228 other one deleted it.
229 One way to think about this is that deleting a file is
229 One way to think about this is that deleting a file is
230 similar to emptying it, so the list of changed files
230 similar to emptying it, so the list of changed files
231 should be similar either way. The computation
231 should be similar either way. The computation
232 described above is not done directly in _filecommit
232 described above is not done directly in _filecommit
233 when creating the list of changed files, however
233 when creating the list of changed files, however
234 it does something very similar by comparing filelog
234 it does something very similar by comparing filelog
235 nodes.
235 nodes.
236 """
236 """
237
237
238 if x is not None:
238 if x is not None:
239 p1, p2, m1, m2 = x
239 p1, p2, m1, m2 = x
240 else:
240 else:
241 p1 = ctx.p1()
241 p1 = ctx.p1()
242 p2 = ctx.p2()
242 p2 = ctx.p2()
243 m1 = p1.manifest()
243 m1 = p1.manifest()
244 m2 = p2.manifest()
244 m2 = p2.manifest()
245
245
246 @util.cachefunc
246 @util.cachefunc
247 def mas():
247 def mas():
248 p1n = p1.node()
248 p1n = p1.node()
249 p2n = p2.node()
249 p2n = p2.node()
250 cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
250 cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
251 if not cahs:
251 if not cahs:
252 cahs = [node.nullrev]
252 cahs = [node.nullrev]
253 return [ctx.repo()[r].manifest() for r in cahs]
253 return [ctx.repo()[r].manifest() for r in cahs]
254
254
255 def deletionfromparent(f):
255 def deletionfromparent(f):
256 if f in m1:
256 if f in m1:
257 return f not in m2 and all(
257 return f not in m2 and all(
258 f in ma and ma.find(f) == m1.find(f) for ma in mas()
258 f in ma and ma.find(f) == m1.find(f) for ma in mas()
259 )
259 )
260 elif f in m2:
260 elif f in m2:
261 return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
261 return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
262 else:
262 else:
263 return True
263 return True
264
264
265 return deletionfromparent
265 return deletionfromparent
266
266
267
267
268 def computechangesetfilesremoved(ctx):
268 def computechangesetfilesremoved(ctx):
269 """return the list of files removed in a changeset
269 """return the list of files removed in a changeset
270 """
270 """
271 removed = []
271 removed = []
272 for f in ctx.files():
272 for f in ctx.files():
273 if f not in ctx:
273 if f not in ctx:
274 removed.append(f)
274 removed.append(f)
275 if removed:
275 if removed:
276 rf = get_removal_filter(ctx)
276 rf = get_removal_filter(ctx)
277 removed = [r for r in removed if not rf(r)]
277 removed = [r for r in removed if not rf(r)]
278 return removed
278 return removed
279
279
280
280
281 def computechangesetfilesmerged(ctx):
281 def computechangesetfilesmerged(ctx):
282 """return the list of files merged in a changeset
282 """return the list of files merged in a changeset
283 """
283 """
284 merged = []
284 merged = []
285 if len(ctx.parents()) < 2:
285 if len(ctx.parents()) < 2:
286 return merged
286 return merged
287 for f in ctx.files():
287 for f in ctx.files():
288 if f in ctx:
288 if f in ctx:
289 fctx = ctx[f]
289 fctx = ctx[f]
290 parents = fctx._filelog.parents(fctx._filenode)
290 parents = fctx._filelog.parents(fctx._filenode)
291 if parents[1] != node.nullid:
291 if parents[1] != node.nullid:
292 merged.append(f)
292 merged.append(f)
293 return merged
293 return merged
294
294
295
295
296 def computechangesetcopies(ctx):
296 def computechangesetcopies(ctx):
297 """return the copies data for a changeset
297 """return the copies data for a changeset
298
298
299 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
299 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
300
300
301 Each dictionnary are in the form: `{newname: oldname}`
301 Each dictionnary are in the form: `{newname: oldname}`
302 """
302 """
303 p1copies = {}
303 p1copies = {}
304 p2copies = {}
304 p2copies = {}
305 p1 = ctx.p1()
305 p1 = ctx.p1()
306 p2 = ctx.p2()
306 p2 = ctx.p2()
307 narrowmatch = ctx._repo.narrowmatch()
307 narrowmatch = ctx._repo.narrowmatch()
308 for dst in ctx.files():
308 for dst in ctx.files():
309 if not narrowmatch(dst) or dst not in ctx:
309 if not narrowmatch(dst) or dst not in ctx:
310 continue
310 continue
311 copied = ctx[dst].renamed()
311 copied = ctx[dst].renamed()
312 if not copied:
312 if not copied:
313 continue
313 continue
314 src, srcnode = copied
314 src, srcnode = copied
315 if src in p1 and p1[src].filenode() == srcnode:
315 if src in p1 and p1[src].filenode() == srcnode:
316 p1copies[dst] = src
316 p1copies[dst] = src
317 elif src in p2 and p2[src].filenode() == srcnode:
317 elif src in p2 and p2[src].filenode() == srcnode:
318 p2copies[dst] = src
318 p2copies[dst] = src
319 return p1copies, p2copies
319 return p1copies, p2copies
320
320
321
321
322 def encodecopies(files, copies):
322 def encodecopies(files, copies):
323 items = []
323 items = []
324 for i, dst in enumerate(files):
324 for i, dst in enumerate(files):
325 if dst in copies:
325 if dst in copies:
326 items.append(b'%d\0%s' % (i, copies[dst]))
326 items.append(b'%d\0%s' % (i, copies[dst]))
327 if len(items) != len(copies):
327 if len(items) != len(copies):
328 raise error.ProgrammingError(
328 raise error.ProgrammingError(
329 b'some copy targets missing from file list'
329 b'some copy targets missing from file list'
330 )
330 )
331 return b"\n".join(items)
331 return b"\n".join(items)
332
332
333
333
334 def decodecopies(files, data):
334 def decodecopies(files, data):
335 try:
335 try:
336 copies = {}
336 copies = {}
337 if not data:
337 if not data:
338 return copies
338 return copies
339 for l in data.split(b'\n'):
339 for l in data.split(b'\n'):
340 strindex, src = l.split(b'\0')
340 strindex, src = l.split(b'\0')
341 i = int(strindex)
341 i = int(strindex)
342 dst = files[i]
342 dst = files[i]
343 copies[dst] = src
343 copies[dst] = src
344 return copies
344 return copies
345 except (ValueError, IndexError):
345 except (ValueError, IndexError):
346 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
346 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
347 # used different syntax for the value.
347 # used different syntax for the value.
348 return None
348 return None
349
349
350
350
351 def encodefileindices(files, subset):
351 def encodefileindices(files, subset):
352 subset = set(subset)
352 subset = set(subset)
353 indices = []
353 indices = []
354 for i, f in enumerate(files):
354 for i, f in enumerate(files):
355 if f in subset:
355 if f in subset:
356 indices.append(b'%d' % i)
356 indices.append(b'%d' % i)
357 return b'\n'.join(indices)
357 return b'\n'.join(indices)
358
358
359
359
360 def decodefileindices(files, data):
360 def decodefileindices(files, data):
361 try:
361 try:
362 subset = []
362 subset = []
363 if not data:
363 if not data:
364 return subset
364 return subset
365 for strindex in data.split(b'\n'):
365 for strindex in data.split(b'\n'):
366 i = int(strindex)
366 i = int(strindex)
367 if i < 0 or i >= len(files):
367 if i < 0 or i >= len(files):
368 return None
368 return None
369 subset.append(files[i])
369 subset.append(files[i])
370 return subset
370 return subset
371 except (ValueError, IndexError):
371 except (ValueError, IndexError):
372 # Perhaps someone had chosen the same key name (e.g. "added") and
372 # Perhaps someone had chosen the same key name (e.g. "added") and
373 # used different syntax for the value.
373 # used different syntax for the value.
374 return None
374 return None
375
375
376
376
377 # see mercurial/helptext/internals/revlogs.txt for details about the format
377 # see mercurial/helptext/internals/revlogs.txt for details about the format
378
378
379 ACTION_MASK = int("111" "00", 2)
379 ACTION_MASK = int("111" "00", 2)
380 # note: untouched file used as copy source will as `000` for this mask.
380 # note: untouched file used as copy source will as `000` for this mask.
381 ADDED_FLAG = int("001" "00", 2)
381 ADDED_FLAG = int("001" "00", 2)
382 MERGED_FLAG = int("010" "00", 2)
382 MERGED_FLAG = int("010" "00", 2)
383 REMOVED_FLAG = int("011" "00", 2)
383 REMOVED_FLAG = int("011" "00", 2)
384 # `100` is reserved for future use
384 # `100` is reserved for future use
385 TOUCHED_FLAG = int("101" "00", 2)
385 TOUCHED_FLAG = int("101" "00", 2)
386
386
387 COPIED_MASK = int("11", 2)
387 COPIED_MASK = int("11", 2)
388 COPIED_FROM_P1_FLAG = int("10", 2)
388 COPIED_FROM_P1_FLAG = int("10", 2)
389 COPIED_FROM_P2_FLAG = int("11", 2)
389 COPIED_FROM_P2_FLAG = int("11", 2)
390
390
391 # structure is <flag><filename-end><copy-source>
391 # structure is <flag><filename-end><copy-source>
392 INDEX_HEADER = struct.Struct(">L")
392 INDEX_HEADER = struct.Struct(">L")
393 INDEX_ENTRY = struct.Struct(">bLL")
393 INDEX_ENTRY = struct.Struct(">bLL")
394
394
395
395
396 def encode_files_sidedata(files):
396 def encode_files_sidedata(files):
397 all_files = set(files.touched)
397 all_files = set(files.touched)
398 all_files.update(files.copied_from_p1.values())
398 all_files.update(files.copied_from_p1.values())
399 all_files.update(files.copied_from_p2.values())
399 all_files.update(files.copied_from_p2.values())
400 all_files = sorted(all_files)
400 all_files = sorted(all_files)
401 file_idx = {f: i for (i, f) in enumerate(all_files)}
401 file_idx = {f: i for (i, f) in enumerate(all_files)}
402 file_idx[None] = 0
402 file_idx[None] = 0
403
403
404 chunks = [INDEX_HEADER.pack(len(all_files))]
404 chunks = [INDEX_HEADER.pack(len(all_files))]
405
405
406 filename_length = 0
406 filename_length = 0
407 for f in all_files:
407 for f in all_files:
408 filename_size = len(f)
408 filename_size = len(f)
409 filename_length += filename_size
409 filename_length += filename_size
410 flag = 0
410 flag = 0
411 if f in files.added:
411 if f in files.added:
412 flag |= ADDED_FLAG
412 flag |= ADDED_FLAG
413 elif f in files.merged:
413 elif f in files.merged:
414 flag |= MERGED_FLAG
414 flag |= MERGED_FLAG
415 elif f in files.removed:
415 elif f in files.removed:
416 flag |= REMOVED_FLAG
416 flag |= REMOVED_FLAG
417 elif f in files.touched:
417 elif f in files.touched:
418 flag |= TOUCHED_FLAG
418 flag |= TOUCHED_FLAG
419
419
420 copy = None
420 copy = None
421 if f in files.copied_from_p1:
421 if f in files.copied_from_p1:
422 flag |= COPIED_FROM_P1_FLAG
422 flag |= COPIED_FROM_P1_FLAG
423 copy = files.copied_from_p1.get(f)
423 copy = files.copied_from_p1.get(f)
424 elif f in files.copied_from_p2:
424 elif f in files.copied_from_p2:
425 copy = files.copied_from_p2.get(f)
425 copy = files.copied_from_p2.get(f)
426 flag |= COPIED_FROM_P2_FLAG
426 flag |= COPIED_FROM_P2_FLAG
427 copy_idx = file_idx[copy]
427 copy_idx = file_idx[copy]
428 chunks.append(INDEX_ENTRY.pack(flag, filename_length, copy_idx))
428 chunks.append(INDEX_ENTRY.pack(flag, filename_length, copy_idx))
429 chunks.extend(all_files)
429 chunks.extend(all_files)
430 return {sidedatamod.SD_FILES: b''.join(chunks)}
430 return {sidedatamod.SD_FILES: b''.join(chunks)}
431
431
432
432
433 def decode_files_sidedata(changelogrevision, sidedata):
433 def decode_files_sidedata(sidedata):
434 md = ChangingFiles()
434 md = ChangingFiles()
435 raw = sidedata.get(sidedatamod.SD_FILES)
435 raw = sidedata.get(sidedatamod.SD_FILES)
436
436
437 if raw is None:
437 if raw is None:
438 return md
438 return md
439
439
440 copies = []
440 copies = []
441 all_files = []
441 all_files = []
442
442
443 assert len(raw) >= INDEX_HEADER.size
443 assert len(raw) >= INDEX_HEADER.size
444 total_files = INDEX_HEADER.unpack_from(raw, 0)[0]
444 total_files = INDEX_HEADER.unpack_from(raw, 0)[0]
445
445
446 offset = INDEX_HEADER.size
446 offset = INDEX_HEADER.size
447 file_offset_base = offset + (INDEX_ENTRY.size * total_files)
447 file_offset_base = offset + (INDEX_ENTRY.size * total_files)
448 file_offset_last = file_offset_base
448 file_offset_last = file_offset_base
449
449
450 assert len(raw) >= file_offset_base
450 assert len(raw) >= file_offset_base
451
451
452 for idx in range(total_files):
452 for idx in range(total_files):
453 flag, file_end, copy_idx = INDEX_ENTRY.unpack_from(raw, offset)
453 flag, file_end, copy_idx = INDEX_ENTRY.unpack_from(raw, offset)
454 file_end += file_offset_base
454 file_end += file_offset_base
455 filename = raw[file_offset_last:file_end]
455 filename = raw[file_offset_last:file_end]
456 filesize = file_end - file_offset_last
456 filesize = file_end - file_offset_last
457 assert len(filename) == filesize
457 assert len(filename) == filesize
458 offset += INDEX_ENTRY.size
458 offset += INDEX_ENTRY.size
459 file_offset_last = file_end
459 file_offset_last = file_end
460 all_files.append(filename)
460 all_files.append(filename)
461 if flag & ACTION_MASK == ADDED_FLAG:
461 if flag & ACTION_MASK == ADDED_FLAG:
462 md.mark_added(filename)
462 md.mark_added(filename)
463 elif flag & ACTION_MASK == MERGED_FLAG:
463 elif flag & ACTION_MASK == MERGED_FLAG:
464 md.mark_merged(filename)
464 md.mark_merged(filename)
465 elif flag & ACTION_MASK == REMOVED_FLAG:
465 elif flag & ACTION_MASK == REMOVED_FLAG:
466 md.mark_removed(filename)
466 md.mark_removed(filename)
467 elif flag & ACTION_MASK == TOUCHED_FLAG:
467 elif flag & ACTION_MASK == TOUCHED_FLAG:
468 md.mark_touched(filename)
468 md.mark_touched(filename)
469
469
470 copied = None
470 copied = None
471 if flag & COPIED_MASK == COPIED_FROM_P1_FLAG:
471 if flag & COPIED_MASK == COPIED_FROM_P1_FLAG:
472 copied = md.mark_copied_from_p1
472 copied = md.mark_copied_from_p1
473 elif flag & COPIED_MASK == COPIED_FROM_P2_FLAG:
473 elif flag & COPIED_MASK == COPIED_FROM_P2_FLAG:
474 copied = md.mark_copied_from_p2
474 copied = md.mark_copied_from_p2
475
475
476 if copied is not None:
476 if copied is not None:
477 copies.append((copied, filename, copy_idx))
477 copies.append((copied, filename, copy_idx))
478
478
479 for copied, filename, copy_idx in copies:
479 for copied, filename, copy_idx in copies:
480 copied(all_files[copy_idx], filename)
480 copied(all_files[copy_idx], filename)
481
481
482 return md
482 return md
483
483
484
484
485 def _getsidedata(srcrepo, rev):
485 def _getsidedata(srcrepo, rev):
486 ctx = srcrepo[rev]
486 ctx = srcrepo[rev]
487 filescopies = computechangesetcopies(ctx)
487 filescopies = computechangesetcopies(ctx)
488 filesadded = computechangesetfilesadded(ctx)
488 filesadded = computechangesetfilesadded(ctx)
489 filesremoved = computechangesetfilesremoved(ctx)
489 filesremoved = computechangesetfilesremoved(ctx)
490 filesmerged = computechangesetfilesmerged(ctx)
490 filesmerged = computechangesetfilesmerged(ctx)
491 files = ChangingFiles()
491 files = ChangingFiles()
492 files.update_touched(ctx.files())
492 files.update_touched(ctx.files())
493 files.update_added(filesadded)
493 files.update_added(filesadded)
494 files.update_removed(filesremoved)
494 files.update_removed(filesremoved)
495 files.update_merged(filesmerged)
495 files.update_merged(filesmerged)
496 files.update_copies_from_p1(filescopies[0])
496 files.update_copies_from_p1(filescopies[0])
497 files.update_copies_from_p2(filescopies[1])
497 files.update_copies_from_p2(filescopies[1])
498 return encode_files_sidedata(files)
498 return encode_files_sidedata(files)
499
499
500
500
501 def getsidedataadder(srcrepo, destrepo):
501 def getsidedataadder(srcrepo, destrepo):
502 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
502 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
503 if pycompat.iswindows or not use_w:
503 if pycompat.iswindows or not use_w:
504 return _get_simple_sidedata_adder(srcrepo, destrepo)
504 return _get_simple_sidedata_adder(srcrepo, destrepo)
505 else:
505 else:
506 return _get_worker_sidedata_adder(srcrepo, destrepo)
506 return _get_worker_sidedata_adder(srcrepo, destrepo)
507
507
508
508
509 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
509 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
510 """The function used by worker precomputing sidedata
510 """The function used by worker precomputing sidedata
511
511
512 It read an input queue containing revision numbers
512 It read an input queue containing revision numbers
513 It write in an output queue containing (rev, <sidedata-map>)
513 It write in an output queue containing (rev, <sidedata-map>)
514
514
515 The `None` input value is used as a stop signal.
515 The `None` input value is used as a stop signal.
516
516
517 The `tokens` semaphore is user to avoid having too many unprocessed
517 The `tokens` semaphore is user to avoid having too many unprocessed
518 entries. The workers needs to acquire one token before fetching a task.
518 entries. The workers needs to acquire one token before fetching a task.
519 They will be released by the consumer of the produced data.
519 They will be released by the consumer of the produced data.
520 """
520 """
521 tokens.acquire()
521 tokens.acquire()
522 rev = revs_queue.get()
522 rev = revs_queue.get()
523 while rev is not None:
523 while rev is not None:
524 data = _getsidedata(srcrepo, rev)
524 data = _getsidedata(srcrepo, rev)
525 sidedata_queue.put((rev, data))
525 sidedata_queue.put((rev, data))
526 tokens.acquire()
526 tokens.acquire()
527 rev = revs_queue.get()
527 rev = revs_queue.get()
528 # processing of `None` is completed, release the token.
528 # processing of `None` is completed, release the token.
529 tokens.release()
529 tokens.release()
530
530
531
531
532 BUFF_PER_WORKER = 50
532 BUFF_PER_WORKER = 50
533
533
534
534
535 def _get_worker_sidedata_adder(srcrepo, destrepo):
535 def _get_worker_sidedata_adder(srcrepo, destrepo):
536 """The parallel version of the sidedata computation
536 """The parallel version of the sidedata computation
537
537
538 This code spawn a pool of worker that precompute a buffer of sidedata
538 This code spawn a pool of worker that precompute a buffer of sidedata
539 before we actually need them"""
539 before we actually need them"""
540 # avoid circular import copies -> scmutil -> worker -> copies
540 # avoid circular import copies -> scmutil -> worker -> copies
541 from . import worker
541 from . import worker
542
542
543 nbworkers = worker._numworkers(srcrepo.ui)
543 nbworkers = worker._numworkers(srcrepo.ui)
544
544
545 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
545 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
546 revsq = multiprocessing.Queue()
546 revsq = multiprocessing.Queue()
547 sidedataq = multiprocessing.Queue()
547 sidedataq = multiprocessing.Queue()
548
548
549 assert srcrepo.filtername is None
549 assert srcrepo.filtername is None
550 # queue all tasks beforehand, revision numbers are small and it make
550 # queue all tasks beforehand, revision numbers are small and it make
551 # synchronisation simpler
551 # synchronisation simpler
552 #
552 #
553 # Since the computation for each node can be quite expensive, the overhead
553 # Since the computation for each node can be quite expensive, the overhead
554 # of using a single queue is not revelant. In practice, most computation
554 # of using a single queue is not revelant. In practice, most computation
555 # are fast but some are very expensive and dominate all the other smaller
555 # are fast but some are very expensive and dominate all the other smaller
556 # cost.
556 # cost.
557 for r in srcrepo.changelog.revs():
557 for r in srcrepo.changelog.revs():
558 revsq.put(r)
558 revsq.put(r)
559 # queue the "no more tasks" markers
559 # queue the "no more tasks" markers
560 for i in range(nbworkers):
560 for i in range(nbworkers):
561 revsq.put(None)
561 revsq.put(None)
562
562
563 allworkers = []
563 allworkers = []
564 for i in range(nbworkers):
564 for i in range(nbworkers):
565 args = (srcrepo, revsq, sidedataq, tokens)
565 args = (srcrepo, revsq, sidedataq, tokens)
566 w = multiprocessing.Process(target=_sidedata_worker, args=args)
566 w = multiprocessing.Process(target=_sidedata_worker, args=args)
567 allworkers.append(w)
567 allworkers.append(w)
568 w.start()
568 w.start()
569
569
570 # dictionnary to store results for revision higher than we one we are
570 # dictionnary to store results for revision higher than we one we are
571 # looking for. For example, if we need the sidedatamap for 42, and 43 is
571 # looking for. For example, if we need the sidedatamap for 42, and 43 is
572 # received, when shelve 43 for later use.
572 # received, when shelve 43 for later use.
573 staging = {}
573 staging = {}
574
574
575 def sidedata_companion(revlog, rev):
575 def sidedata_companion(revlog, rev):
576 sidedata = {}
576 sidedata = {}
577 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
577 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
578 # Is the data previously shelved ?
578 # Is the data previously shelved ?
579 sidedata = staging.pop(rev, None)
579 sidedata = staging.pop(rev, None)
580 if sidedata is None:
580 if sidedata is None:
581 # look at the queued result until we find the one we are lookig
581 # look at the queued result until we find the one we are lookig
582 # for (shelve the other ones)
582 # for (shelve the other ones)
583 r, sidedata = sidedataq.get()
583 r, sidedata = sidedataq.get()
584 while r != rev:
584 while r != rev:
585 staging[r] = sidedata
585 staging[r] = sidedata
586 r, sidedata = sidedataq.get()
586 r, sidedata = sidedataq.get()
587 tokens.release()
587 tokens.release()
588 return False, (), sidedata
588 return False, (), sidedata
589
589
590 return sidedata_companion
590 return sidedata_companion
591
591
592
592
593 def _get_simple_sidedata_adder(srcrepo, destrepo):
593 def _get_simple_sidedata_adder(srcrepo, destrepo):
594 """The simple version of the sidedata computation
594 """The simple version of the sidedata computation
595
595
596 It just compute it in the same thread on request"""
596 It just compute it in the same thread on request"""
597
597
598 def sidedatacompanion(revlog, rev):
598 def sidedatacompanion(revlog, rev):
599 sidedata = {}
599 sidedata = {}
600 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
600 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
601 sidedata = _getsidedata(srcrepo, rev)
601 sidedata = _getsidedata(srcrepo, rev)
602 return False, (), sidedata
602 return False, (), sidedata
603
603
604 return sidedatacompanion
604 return sidedatacompanion
605
605
606
606
607 def getsidedataremover(srcrepo, destrepo):
607 def getsidedataremover(srcrepo, destrepo):
608 def sidedatacompanion(revlog, rev):
608 def sidedatacompanion(revlog, rev):
609 f = ()
609 f = ()
610 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
610 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
611 if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
611 if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
612 f = (
612 f = (
613 sidedatamod.SD_P1COPIES,
613 sidedatamod.SD_P1COPIES,
614 sidedatamod.SD_P2COPIES,
614 sidedatamod.SD_P2COPIES,
615 sidedatamod.SD_FILESADDED,
615 sidedatamod.SD_FILESADDED,
616 sidedatamod.SD_FILESREMOVED,
616 sidedatamod.SD_FILESREMOVED,
617 )
617 )
618 return False, f, {}
618 return False, f, {}
619
619
620 return sidedatacompanion
620 return sidedatacompanion
General Comments 0
You need to be logged in to leave comments. Login now