##// END OF EJS Templates
commitctx: directly pass a ChangingFiles object to changelog.add...
marmoute -
r45884:6c562773 default
parent child Browse files
Show More
@@ -1,605 +1,605 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 return b"\0".join(items)
86 return b"\0".join(items)
87
87
88
88
89 def stripdesc(desc):
89 def stripdesc(desc):
90 """strip trailing whitespace and leading and trailing empty lines"""
90 """strip trailing whitespace and leading and trailing empty lines"""
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92
92
93
93
94 class appender(object):
94 class appender(object):
95 '''the changelog index must be updated last on disk, so we use this class
95 '''the changelog index must be updated last on disk, so we use this class
96 to delay writes to it'''
96 to delay writes to it'''
97
97
98 def __init__(self, vfs, name, mode, buf):
98 def __init__(self, vfs, name, mode, buf):
99 self.data = buf
99 self.data = buf
100 fp = vfs(name, mode)
100 fp = vfs(name, mode)
101 self.fp = fp
101 self.fp = fp
102 self.offset = fp.tell()
102 self.offset = fp.tell()
103 self.size = vfs.fstat(fp).st_size
103 self.size = vfs.fstat(fp).st_size
104 self._end = self.size
104 self._end = self.size
105
105
106 def end(self):
106 def end(self):
107 return self._end
107 return self._end
108
108
109 def tell(self):
109 def tell(self):
110 return self.offset
110 return self.offset
111
111
112 def flush(self):
112 def flush(self):
113 pass
113 pass
114
114
115 @property
115 @property
116 def closed(self):
116 def closed(self):
117 return self.fp.closed
117 return self.fp.closed
118
118
119 def close(self):
119 def close(self):
120 self.fp.close()
120 self.fp.close()
121
121
122 def seek(self, offset, whence=0):
122 def seek(self, offset, whence=0):
123 '''virtual file offset spans real file and data'''
123 '''virtual file offset spans real file and data'''
124 if whence == 0:
124 if whence == 0:
125 self.offset = offset
125 self.offset = offset
126 elif whence == 1:
126 elif whence == 1:
127 self.offset += offset
127 self.offset += offset
128 elif whence == 2:
128 elif whence == 2:
129 self.offset = self.end() + offset
129 self.offset = self.end() + offset
130 if self.offset < self.size:
130 if self.offset < self.size:
131 self.fp.seek(self.offset)
131 self.fp.seek(self.offset)
132
132
133 def read(self, count=-1):
133 def read(self, count=-1):
134 '''only trick here is reads that span real file and data'''
134 '''only trick here is reads that span real file and data'''
135 ret = b""
135 ret = b""
136 if self.offset < self.size:
136 if self.offset < self.size:
137 s = self.fp.read(count)
137 s = self.fp.read(count)
138 ret = s
138 ret = s
139 self.offset += len(s)
139 self.offset += len(s)
140 if count > 0:
140 if count > 0:
141 count -= len(s)
141 count -= len(s)
142 if count != 0:
142 if count != 0:
143 doff = self.offset - self.size
143 doff = self.offset - self.size
144 self.data.insert(0, b"".join(self.data))
144 self.data.insert(0, b"".join(self.data))
145 del self.data[1:]
145 del self.data[1:]
146 s = self.data[0][doff : doff + count]
146 s = self.data[0][doff : doff + count]
147 self.offset += len(s)
147 self.offset += len(s)
148 ret += s
148 ret += s
149 return ret
149 return ret
150
150
151 def write(self, s):
151 def write(self, s):
152 self.data.append(bytes(s))
152 self.data.append(bytes(s))
153 self.offset += len(s)
153 self.offset += len(s)
154 self._end += len(s)
154 self._end += len(s)
155
155
156 def __enter__(self):
156 def __enter__(self):
157 self.fp.__enter__()
157 self.fp.__enter__()
158 return self
158 return self
159
159
160 def __exit__(self, *args):
160 def __exit__(self, *args):
161 return self.fp.__exit__(*args)
161 return self.fp.__exit__(*args)
162
162
163
163
164 class _divertopener(object):
164 class _divertopener(object):
165 def __init__(self, opener, target):
165 def __init__(self, opener, target):
166 self._opener = opener
166 self._opener = opener
167 self._target = target
167 self._target = target
168
168
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 if name != self._target:
170 if name != self._target:
171 return self._opener(name, mode, **kwargs)
171 return self._opener(name, mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
173
173
174 def __getattr__(self, attr):
174 def __getattr__(self, attr):
175 return getattr(self._opener, attr)
175 return getattr(self._opener, attr)
176
176
177
177
178 def _delayopener(opener, target, buf):
178 def _delayopener(opener, target, buf):
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
180
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 if name != target:
182 if name != target:
183 return opener(name, mode, **kwargs)
183 return opener(name, mode, **kwargs)
184 assert not kwargs
184 assert not kwargs
185 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
186
186
187 return _delay
187 return _delay
188
188
189
189
190 @attr.s
190 @attr.s
191 class _changelogrevision(object):
191 class _changelogrevision(object):
192 # Extensions might modify _defaultextra, so let the constructor below pass
192 # Extensions might modify _defaultextra, so let the constructor below pass
193 # it in
193 # it in
194 extra = attr.ib()
194 extra = attr.ib()
195 manifest = attr.ib(default=nullid)
195 manifest = attr.ib(default=nullid)
196 user = attr.ib(default=b'')
196 user = attr.ib(default=b'')
197 date = attr.ib(default=(0, 0))
197 date = attr.ib(default=(0, 0))
198 files = attr.ib(default=attr.Factory(list))
198 files = attr.ib(default=attr.Factory(list))
199 filesadded = attr.ib(default=None)
199 filesadded = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
203 description = attr.ib(default=b'')
203 description = attr.ib(default=b'')
204
204
205
205
206 class changelogrevision(object):
206 class changelogrevision(object):
207 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
208
208
209 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
210 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
211 the parsed object.
211 the parsed object.
212 """
212 """
213
213
214 __slots__ = (
214 __slots__ = (
215 '_offsets',
215 '_offsets',
216 '_text',
216 '_text',
217 '_sidedata',
217 '_sidedata',
218 '_cpsd',
218 '_cpsd',
219 )
219 )
220
220
221 def __new__(cls, text, sidedata, cpsd):
221 def __new__(cls, text, sidedata, cpsd):
222 if not text:
222 if not text:
223 return _changelogrevision(extra=_defaultextra)
223 return _changelogrevision(extra=_defaultextra)
224
224
225 self = super(changelogrevision, cls).__new__(cls)
225 self = super(changelogrevision, cls).__new__(cls)
226 # We could return here and implement the following as an __init__.
226 # We could return here and implement the following as an __init__.
227 # But doing it here is equivalent and saves an extra function call.
227 # But doing it here is equivalent and saves an extra function call.
228
228
229 # format used:
229 # format used:
230 # nodeid\n : manifest node in ascii
230 # nodeid\n : manifest node in ascii
231 # user\n : user, no \n or \r allowed
231 # user\n : user, no \n or \r allowed
232 # time tz extra\n : date (time is int or float, timezone is int)
232 # time tz extra\n : date (time is int or float, timezone is int)
233 # : extra is metadata, encoded and separated by '\0'
233 # : extra is metadata, encoded and separated by '\0'
234 # : older versions ignore it
234 # : older versions ignore it
235 # files\n\n : files modified by the cset, no \n or \r allowed
235 # files\n\n : files modified by the cset, no \n or \r allowed
236 # (.*) : comment (free text, ideally utf-8)
236 # (.*) : comment (free text, ideally utf-8)
237 #
237 #
238 # changelog v0 doesn't use extra
238 # changelog v0 doesn't use extra
239
239
240 nl1 = text.index(b'\n')
240 nl1 = text.index(b'\n')
241 nl2 = text.index(b'\n', nl1 + 1)
241 nl2 = text.index(b'\n', nl1 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
243
243
244 # The list of files may be empty. Which means nl3 is the first of the
244 # The list of files may be empty. Which means nl3 is the first of the
245 # double newline that precedes the description.
245 # double newline that precedes the description.
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 doublenl = nl3
247 doublenl = nl3
248 else:
248 else:
249 doublenl = text.index(b'\n\n', nl3 + 1)
249 doublenl = text.index(b'\n\n', nl3 + 1)
250
250
251 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._text = text
252 self._text = text
253 self._sidedata = sidedata
253 self._sidedata = sidedata
254 self._cpsd = cpsd
254 self._cpsd = cpsd
255
255
256 return self
256 return self
257
257
258 @property
258 @property
259 def manifest(self):
259 def manifest(self):
260 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
261
261
262 @property
262 @property
263 def user(self):
263 def user(self):
264 off = self._offsets
264 off = self._offsets
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266
266
267 @property
267 @property
268 def _rawdate(self):
268 def _rawdate(self):
269 off = self._offsets
269 off = self._offsets
270 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
271 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
272
272
273 @property
273 @property
274 def _rawextra(self):
274 def _rawextra(self):
275 off = self._offsets
275 off = self._offsets
276 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
277 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
278 if len(fields) != 3:
278 if len(fields) != 3:
279 return None
279 return None
280
280
281 return fields[2]
281 return fields[2]
282
282
283 @property
283 @property
284 def date(self):
284 def date(self):
285 raw = self._rawdate
285 raw = self._rawdate
286 time = float(raw[0])
286 time = float(raw[0])
287 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
288 try:
288 try:
289 timezone = int(raw[1])
289 timezone = int(raw[1])
290 except ValueError:
290 except ValueError:
291 timezone = 0
291 timezone = 0
292
292
293 return time, timezone
293 return time, timezone
294
294
295 @property
295 @property
296 def extra(self):
296 def extra(self):
297 raw = self._rawextra
297 raw = self._rawextra
298 if raw is None:
298 if raw is None:
299 return _defaultextra
299 return _defaultextra
300
300
301 return decodeextra(raw)
301 return decodeextra(raw)
302
302
303 @property
303 @property
304 def files(self):
304 def files(self):
305 off = self._offsets
305 off = self._offsets
306 if off[2] == off[3]:
306 if off[2] == off[3]:
307 return []
307 return []
308
308
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
310
310
311 @property
311 @property
312 def filesadded(self):
312 def filesadded(self):
313 if self._cpsd:
313 if self._cpsd:
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
315 if not rawindices:
315 if not rawindices:
316 return []
316 return []
317 else:
317 else:
318 rawindices = self.extra.get(b'filesadded')
318 rawindices = self.extra.get(b'filesadded')
319 if rawindices is None:
319 if rawindices is None:
320 return None
320 return None
321 return metadata.decodefileindices(self.files, rawindices)
321 return metadata.decodefileindices(self.files, rawindices)
322
322
323 @property
323 @property
324 def filesremoved(self):
324 def filesremoved(self):
325 if self._cpsd:
325 if self._cpsd:
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
327 if not rawindices:
327 if not rawindices:
328 return []
328 return []
329 else:
329 else:
330 rawindices = self.extra.get(b'filesremoved')
330 rawindices = self.extra.get(b'filesremoved')
331 if rawindices is None:
331 if rawindices is None:
332 return None
332 return None
333 return metadata.decodefileindices(self.files, rawindices)
333 return metadata.decodefileindices(self.files, rawindices)
334
334
335 @property
335 @property
336 def p1copies(self):
336 def p1copies(self):
337 if self._cpsd:
337 if self._cpsd:
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
339 if not rawcopies:
339 if not rawcopies:
340 return {}
340 return {}
341 else:
341 else:
342 rawcopies = self.extra.get(b'p1copies')
342 rawcopies = self.extra.get(b'p1copies')
343 if rawcopies is None:
343 if rawcopies is None:
344 return None
344 return None
345 return metadata.decodecopies(self.files, rawcopies)
345 return metadata.decodecopies(self.files, rawcopies)
346
346
347 @property
347 @property
348 def p2copies(self):
348 def p2copies(self):
349 if self._cpsd:
349 if self._cpsd:
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
351 if not rawcopies:
351 if not rawcopies:
352 return {}
352 return {}
353 else:
353 else:
354 rawcopies = self.extra.get(b'p2copies')
354 rawcopies = self.extra.get(b'p2copies')
355 if rawcopies is None:
355 if rawcopies is None:
356 return None
356 return None
357 return metadata.decodecopies(self.files, rawcopies)
357 return metadata.decodecopies(self.files, rawcopies)
358
358
359 @property
359 @property
360 def description(self):
360 def description(self):
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
362
362
363
363
364 class changelog(revlog.revlog):
364 class changelog(revlog.revlog):
365 def __init__(self, opener, trypending=False):
365 def __init__(self, opener, trypending=False):
366 """Load a changelog revlog using an opener.
366 """Load a changelog revlog using an opener.
367
367
368 If ``trypending`` is true, we attempt to load the index from a
368 If ``trypending`` is true, we attempt to load the index from a
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
370 The ``00changelog.i.a`` file contains index (and possibly inline
370 The ``00changelog.i.a`` file contains index (and possibly inline
371 revision) data for a transaction that hasn't been finalized yet.
371 revision) data for a transaction that hasn't been finalized yet.
372 It exists in a separate file to facilitate readers (such as
372 It exists in a separate file to facilitate readers (such as
373 hooks processes) accessing data before a transaction is finalized.
373 hooks processes) accessing data before a transaction is finalized.
374 """
374 """
375 if trypending and opener.exists(b'00changelog.i.a'):
375 if trypending and opener.exists(b'00changelog.i.a'):
376 indexfile = b'00changelog.i.a'
376 indexfile = b'00changelog.i.a'
377 else:
377 else:
378 indexfile = b'00changelog.i'
378 indexfile = b'00changelog.i'
379
379
380 datafile = b'00changelog.d'
380 datafile = b'00changelog.d'
381 revlog.revlog.__init__(
381 revlog.revlog.__init__(
382 self,
382 self,
383 opener,
383 opener,
384 indexfile,
384 indexfile,
385 datafile=datafile,
385 datafile=datafile,
386 checkambig=True,
386 checkambig=True,
387 mmaplargeindex=True,
387 mmaplargeindex=True,
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
389 )
389 )
390
390
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
392 # changelogs don't benefit from generaldelta.
392 # changelogs don't benefit from generaldelta.
393
393
394 self.version &= ~revlog.FLAG_GENERALDELTA
394 self.version &= ~revlog.FLAG_GENERALDELTA
395 self._generaldelta = False
395 self._generaldelta = False
396
396
397 # Delta chains for changelogs tend to be very small because entries
397 # Delta chains for changelogs tend to be very small because entries
398 # tend to be small and don't delta well with each. So disable delta
398 # tend to be small and don't delta well with each. So disable delta
399 # chains.
399 # chains.
400 self._storedeltachains = False
400 self._storedeltachains = False
401
401
402 self._realopener = opener
402 self._realopener = opener
403 self._delayed = False
403 self._delayed = False
404 self._delaybuf = None
404 self._delaybuf = None
405 self._divert = False
405 self._divert = False
406 self.filteredrevs = frozenset()
406 self.filteredrevs = frozenset()
407 self._copiesstorage = opener.options.get(b'copies-storage')
407 self._copiesstorage = opener.options.get(b'copies-storage')
408
408
409 def delayupdate(self, tr):
409 def delayupdate(self, tr):
410 """delay visibility of index updates to other readers"""
410 """delay visibility of index updates to other readers"""
411
411
412 if not self._delayed:
412 if not self._delayed:
413 if len(self) == 0:
413 if len(self) == 0:
414 self._divert = True
414 self._divert = True
415 if self._realopener.exists(self.indexfile + b'.a'):
415 if self._realopener.exists(self.indexfile + b'.a'):
416 self._realopener.unlink(self.indexfile + b'.a')
416 self._realopener.unlink(self.indexfile + b'.a')
417 self.opener = _divertopener(self._realopener, self.indexfile)
417 self.opener = _divertopener(self._realopener, self.indexfile)
418 else:
418 else:
419 self._delaybuf = []
419 self._delaybuf = []
420 self.opener = _delayopener(
420 self.opener = _delayopener(
421 self._realopener, self.indexfile, self._delaybuf
421 self._realopener, self.indexfile, self._delaybuf
422 )
422 )
423 self._delayed = True
423 self._delayed = True
424 tr.addpending(b'cl-%i' % id(self), self._writepending)
424 tr.addpending(b'cl-%i' % id(self), self._writepending)
425 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
425 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
426
426
427 def _finalize(self, tr):
427 def _finalize(self, tr):
428 """finalize index updates"""
428 """finalize index updates"""
429 self._delayed = False
429 self._delayed = False
430 self.opener = self._realopener
430 self.opener = self._realopener
431 # move redirected index data back into place
431 # move redirected index data back into place
432 if self._divert:
432 if self._divert:
433 assert not self._delaybuf
433 assert not self._delaybuf
434 tmpname = self.indexfile + b".a"
434 tmpname = self.indexfile + b".a"
435 nfile = self.opener.open(tmpname)
435 nfile = self.opener.open(tmpname)
436 nfile.close()
436 nfile.close()
437 self.opener.rename(tmpname, self.indexfile, checkambig=True)
437 self.opener.rename(tmpname, self.indexfile, checkambig=True)
438 elif self._delaybuf:
438 elif self._delaybuf:
439 fp = self.opener(self.indexfile, b'a', checkambig=True)
439 fp = self.opener(self.indexfile, b'a', checkambig=True)
440 fp.write(b"".join(self._delaybuf))
440 fp.write(b"".join(self._delaybuf))
441 fp.close()
441 fp.close()
442 self._delaybuf = None
442 self._delaybuf = None
443 self._divert = False
443 self._divert = False
444 # split when we're done
444 # split when we're done
445 self._enforceinlinesize(tr)
445 self._enforceinlinesize(tr)
446
446
447 def _writepending(self, tr):
447 def _writepending(self, tr):
448 """create a file containing the unfinalized state for
448 """create a file containing the unfinalized state for
449 pretxnchangegroup"""
449 pretxnchangegroup"""
450 if self._delaybuf:
450 if self._delaybuf:
451 # make a temporary copy of the index
451 # make a temporary copy of the index
452 fp1 = self._realopener(self.indexfile)
452 fp1 = self._realopener(self.indexfile)
453 pendingfilename = self.indexfile + b".a"
453 pendingfilename = self.indexfile + b".a"
454 # register as a temp file to ensure cleanup on failure
454 # register as a temp file to ensure cleanup on failure
455 tr.registertmp(pendingfilename)
455 tr.registertmp(pendingfilename)
456 # write existing data
456 # write existing data
457 fp2 = self._realopener(pendingfilename, b"w")
457 fp2 = self._realopener(pendingfilename, b"w")
458 fp2.write(fp1.read())
458 fp2.write(fp1.read())
459 # add pending data
459 # add pending data
460 fp2.write(b"".join(self._delaybuf))
460 fp2.write(b"".join(self._delaybuf))
461 fp2.close()
461 fp2.close()
462 # switch modes so finalize can simply rename
462 # switch modes so finalize can simply rename
463 self._delaybuf = None
463 self._delaybuf = None
464 self._divert = True
464 self._divert = True
465 self.opener = _divertopener(self._realopener, self.indexfile)
465 self.opener = _divertopener(self._realopener, self.indexfile)
466
466
467 if self._divert:
467 if self._divert:
468 return True
468 return True
469
469
470 return False
470 return False
471
471
472 def _enforceinlinesize(self, tr, fp=None):
472 def _enforceinlinesize(self, tr, fp=None):
473 if not self._delayed:
473 if not self._delayed:
474 revlog.revlog._enforceinlinesize(self, tr, fp)
474 revlog.revlog._enforceinlinesize(self, tr, fp)
475
475
476 def read(self, node):
476 def read(self, node):
477 """Obtain data from a parsed changelog revision.
477 """Obtain data from a parsed changelog revision.
478
478
479 Returns a 6-tuple of:
479 Returns a 6-tuple of:
480
480
481 - manifest node in binary
481 - manifest node in binary
482 - author/user as a localstr
482 - author/user as a localstr
483 - date as a 2-tuple of (time, timezone)
483 - date as a 2-tuple of (time, timezone)
484 - list of files
484 - list of files
485 - commit message as a localstr
485 - commit message as a localstr
486 - dict of extra metadata
486 - dict of extra metadata
487
487
488 Unless you need to access all fields, consider calling
488 Unless you need to access all fields, consider calling
489 ``changelogrevision`` instead, as it is faster for partial object
489 ``changelogrevision`` instead, as it is faster for partial object
490 access.
490 access.
491 """
491 """
492 d, s = self._revisiondata(node)
492 d, s = self._revisiondata(node)
493 c = changelogrevision(
493 c = changelogrevision(
494 d, s, self._copiesstorage == b'changeset-sidedata'
494 d, s, self._copiesstorage == b'changeset-sidedata'
495 )
495 )
496 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
496 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
497
497
498 def changelogrevision(self, nodeorrev):
498 def changelogrevision(self, nodeorrev):
499 """Obtain a ``changelogrevision`` for a node or revision."""
499 """Obtain a ``changelogrevision`` for a node or revision."""
500 text, sidedata = self._revisiondata(nodeorrev)
500 text, sidedata = self._revisiondata(nodeorrev)
501 return changelogrevision(
501 return changelogrevision(
502 text, sidedata, self._copiesstorage == b'changeset-sidedata'
502 text, sidedata, self._copiesstorage == b'changeset-sidedata'
503 )
503 )
504
504
505 def readfiles(self, node):
505 def readfiles(self, node):
506 """
506 """
507 short version of read that only returns the files modified by the cset
507 short version of read that only returns the files modified by the cset
508 """
508 """
509 text = self.revision(node)
509 text = self.revision(node)
510 if not text:
510 if not text:
511 return []
511 return []
512 last = text.index(b"\n\n")
512 last = text.index(b"\n\n")
513 l = text[:last].split(b'\n')
513 l = text[:last].split(b'\n')
514 return l[3:]
514 return l[3:]
515
515
516 def add(
516 def add(
517 self,
517 self,
518 manifest,
518 manifest,
519 files,
519 files,
520 desc,
520 desc,
521 transaction,
521 transaction,
522 p1,
522 p1,
523 p2,
523 p2,
524 user,
524 user,
525 date=None,
525 date=None,
526 extra=None,
526 extra=None,
527 p1copies=None,
528 p2copies=None,
529 filesadded=None,
530 filesremoved=None,
531 ):
527 ):
532 # Convert to UTF-8 encoded bytestrings as the very first
528 # Convert to UTF-8 encoded bytestrings as the very first
533 # thing: calling any method on a localstr object will turn it
529 # thing: calling any method on a localstr object will turn it
534 # into a str object and the cached UTF-8 string is thus lost.
530 # into a str object and the cached UTF-8 string is thus lost.
535 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
531 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
536
532
537 user = user.strip()
533 user = user.strip()
538 # An empty username or a username with a "\n" will make the
534 # An empty username or a username with a "\n" will make the
539 # revision text contain two "\n\n" sequences -> corrupt
535 # revision text contain two "\n\n" sequences -> corrupt
540 # repository since read cannot unpack the revision.
536 # repository since read cannot unpack the revision.
541 if not user:
537 if not user:
542 raise error.StorageError(_(b"empty username"))
538 raise error.StorageError(_(b"empty username"))
543 if b"\n" in user:
539 if b"\n" in user:
544 raise error.StorageError(
540 raise error.StorageError(
545 _(b"username %r contains a newline") % pycompat.bytestr(user)
541 _(b"username %r contains a newline") % pycompat.bytestr(user)
546 )
542 )
547
543
548 desc = stripdesc(desc)
544 desc = stripdesc(desc)
549
545
550 if date:
546 if date:
551 parseddate = b"%d %d" % dateutil.parsedate(date)
547 parseddate = b"%d %d" % dateutil.parsedate(date)
552 else:
548 else:
553 parseddate = b"%d %d" % dateutil.makedate()
549 parseddate = b"%d %d" % dateutil.makedate()
554 if extra:
550 if extra:
555 branch = extra.get(b"branch")
551 branch = extra.get(b"branch")
556 if branch in (b"default", b""):
552 if branch in (b"default", b""):
557 del extra[b"branch"]
553 del extra[b"branch"]
558 elif branch in (b".", b"null", b"tip"):
554 elif branch in (b".", b"null", b"tip"):
559 raise error.StorageError(
555 raise error.StorageError(
560 _(b'the name \'%s\' is reserved') % branch
556 _(b'the name \'%s\' is reserved') % branch
561 )
557 )
562 sortedfiles = sorted(files)
558 sortedfiles = sorted(files.touched)
563 sidedata = None
559 sidedata = None
564 if self._copiesstorage == b'changeset-sidedata':
560 if self._copiesstorage == b'changeset-sidedata':
565 sidedata = {}
561 sidedata = {}
562 p1copies = files.copied_from_p1
566 if p1copies:
563 if p1copies:
567 p1copies = metadata.encodecopies(sortedfiles, p1copies)
564 p1copies = metadata.encodecopies(sortedfiles, p1copies)
568 sidedata[sidedatamod.SD_P1COPIES] = p1copies
565 sidedata[sidedatamod.SD_P1COPIES] = p1copies
566 p2copies = files.copied_from_p2
569 if p2copies:
567 if p2copies:
570 p2copies = metadata.encodecopies(sortedfiles, p2copies)
568 p2copies = metadata.encodecopies(sortedfiles, p2copies)
571 sidedata[sidedatamod.SD_P2COPIES] = p2copies
569 sidedata[sidedatamod.SD_P2COPIES] = p2copies
570 filesadded = files.added
572 if filesadded:
571 if filesadded:
573 filesadded = metadata.encodefileindices(sortedfiles, filesadded)
572 filesadded = metadata.encodefileindices(sortedfiles, filesadded)
574 sidedata[sidedatamod.SD_FILESADDED] = filesadded
573 sidedata[sidedatamod.SD_FILESADDED] = filesadded
574 filesremoved = files.removed
575 if filesremoved:
575 if filesremoved:
576 filesremoved = metadata.encodefileindices(
576 filesremoved = metadata.encodefileindices(
577 sortedfiles, filesremoved
577 sortedfiles, filesremoved
578 )
578 )
579 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
579 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
580 if not sidedata:
580 if not sidedata:
581 sidedata = None
581 sidedata = None
582
582
583 if extra:
583 if extra:
584 extra = encodeextra(extra)
584 extra = encodeextra(extra)
585 parseddate = b"%s %s" % (parseddate, extra)
585 parseddate = b"%s %s" % (parseddate, extra)
586 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
586 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
587 text = b"\n".join(l)
587 text = b"\n".join(l)
588 return self.addrevision(
588 return self.addrevision(
589 text, transaction, len(self), p1, p2, sidedata=sidedata
589 text, transaction, len(self), p1, p2, sidedata=sidedata
590 )
590 )
591
591
592 def branchinfo(self, rev):
592 def branchinfo(self, rev):
593 """return the branch name and open/close state of a revision
593 """return the branch name and open/close state of a revision
594
594
595 This function exists because creating a changectx object
595 This function exists because creating a changectx object
596 just to access this is costly."""
596 just to access this is costly."""
597 extra = self.read(rev)[5]
597 extra = self.read(rev)[5]
598 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
598 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
599
599
600 def _nodeduplicatecallback(self, transaction, node):
600 def _nodeduplicatecallback(self, transaction, node):
601 # keep track of revisions that got "re-added", eg: unbunde of know rev.
601 # keep track of revisions that got "re-added", eg: unbunde of know rev.
602 #
602 #
603 # We track them in a list to preserve their order from the source bundle
603 # We track them in a list to preserve their order from the source bundle
604 duplicates = transaction.changes.setdefault(b'revduplicates', [])
604 duplicates = transaction.changes.setdefault(b'revduplicates', [])
605 duplicates.append(self.rev(node))
605 duplicates.append(self.rev(node))
@@ -1,456 +1,452 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
68
68
69 extra = ctx.extra().copy()
69 extra = ctx.extra().copy()
70
70
71 if extra is not None:
71 if extra is not None:
72 for name in (
72 for name in (
73 b'p1copies',
73 b'p1copies',
74 b'p2copies',
74 b'p2copies',
75 b'filesadded',
75 b'filesadded',
76 b'filesremoved',
76 b'filesremoved',
77 ):
77 ):
78 extra.pop(name, None)
78 extra.pop(name, None)
79 if repo.changelog._copiesstorage == b'extra':
79 if repo.changelog._copiesstorage == b'extra':
80 extra = _extra_with_copies(repo, extra, files)
80 extra = _extra_with_copies(repo, extra, files)
81
81
82 # update changelog
82 # update changelog
83 repo.ui.note(_(b"committing changelog\n"))
83 repo.ui.note(_(b"committing changelog\n"))
84 repo.changelog.delayupdate(tr)
84 repo.changelog.delayupdate(tr)
85 n = repo.changelog.add(
85 n = repo.changelog.add(
86 mn,
86 mn,
87 files.touched,
87 files,
88 ctx.description(),
88 ctx.description(),
89 tr,
89 tr,
90 p1.node(),
90 p1.node(),
91 p2.node(),
91 p2.node(),
92 user,
92 user,
93 ctx.date(),
93 ctx.date(),
94 extra,
94 extra,
95 files.copied_from_p1,
96 files.copied_from_p2,
97 files.added,
98 files.removed,
99 )
95 )
100 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
101 repo.hook(
97 repo.hook(
102 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
103 )
99 )
104 # set the new commit is proper phase
100 # set the new commit is proper phase
105 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
106 if targetphase:
102 if targetphase:
107 # retract boundary do not alter parent changeset.
103 # retract boundary do not alter parent changeset.
108 # if a parent have higher the resulting phase will
104 # if a parent have higher the resulting phase will
109 # be compliant anyway
105 # be compliant anyway
110 #
106 #
111 # if minimal phase was 0 we don't need to retract anything
107 # if minimal phase was 0 we don't need to retract anything
112 phases.registernew(repo, tr, targetphase, [n])
108 phases.registernew(repo, tr, targetphase, [n])
113 return n
109 return n
114
110
115
111
116 def _prepare_files(tr, ctx, error=False, origctx=None):
112 def _prepare_files(tr, ctx, error=False, origctx=None):
117 repo = ctx.repo()
113 repo = ctx.repo()
118 p1 = ctx.p1()
114 p1 = ctx.p1()
119
115
120 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
121
117
122 p1copies, p2copies = None, None
118 p1copies, p2copies = None, None
123 if writechangesetcopy:
119 if writechangesetcopy:
124 p1copies = ctx.p1copies()
120 p1copies = ctx.p1copies()
125 p2copies = ctx.p2copies()
121 p2copies = ctx.p2copies()
126 filesadded, filesremoved = None, None
122 filesadded, filesremoved = None, None
127 if ctx.manifestnode():
123 if ctx.manifestnode():
128 # reuse an existing manifest revision
124 # reuse an existing manifest revision
129 repo.ui.debug(b'reusing known manifest\n')
125 repo.ui.debug(b'reusing known manifest\n')
130 mn = ctx.manifestnode()
126 mn = ctx.manifestnode()
131 touched = ctx.files()
127 touched = ctx.files()
132 if writechangesetcopy:
128 if writechangesetcopy:
133 filesadded = ctx.filesadded()
129 filesadded = ctx.filesadded()
134 filesremoved = ctx.filesremoved()
130 filesremoved = ctx.filesremoved()
135 elif not ctx.files():
131 elif not ctx.files():
136 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
132 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
137 mn = p1.manifestnode()
133 mn = p1.manifestnode()
138 touched = []
134 touched = []
139 else:
135 else:
140 mn, touched, added, removed = _process_files(tr, ctx, error=error)
136 mn, touched, added, removed = _process_files(tr, ctx, error=error)
141 if writechangesetcopy:
137 if writechangesetcopy:
142 filesremoved = removed
138 filesremoved = removed
143 filesadded = added
139 filesadded = added
144
140
145 if origctx and origctx.manifestnode() == mn:
141 if origctx and origctx.manifestnode() == mn:
146 touched = origctx.files()
142 touched = origctx.files()
147
143
148 files = metadata.ChangingFiles()
144 files = metadata.ChangingFiles()
149 if touched:
145 if touched:
150 files.update_touched(touched)
146 files.update_touched(touched)
151 if p1copies:
147 if p1copies:
152 files.update_copies_from_p1(p1copies)
148 files.update_copies_from_p1(p1copies)
153 if p2copies:
149 if p2copies:
154 files.update_copies_from_p2(p2copies)
150 files.update_copies_from_p2(p2copies)
155 if filesadded:
151 if filesadded:
156 files.update_added(filesadded)
152 files.update_added(filesadded)
157 if filesremoved:
153 if filesremoved:
158 files.update_removed(filesremoved)
154 files.update_removed(filesremoved)
159
155
160 return mn, files
156 return mn, files
161
157
162
158
163 def _process_files(tr, ctx, error=False):
159 def _process_files(tr, ctx, error=False):
164 repo = ctx.repo()
160 repo = ctx.repo()
165 p1 = ctx.p1()
161 p1 = ctx.p1()
166 p2 = ctx.p2()
162 p2 = ctx.p2()
167
163
168 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
164 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
169
165
170 m1ctx = p1.manifestctx()
166 m1ctx = p1.manifestctx()
171 m2ctx = p2.manifestctx()
167 m2ctx = p2.manifestctx()
172 mctx = m1ctx.copy()
168 mctx = m1ctx.copy()
173
169
174 m = mctx.read()
170 m = mctx.read()
175 m1 = m1ctx.read()
171 m1 = m1ctx.read()
176 m2 = m2ctx.read()
172 m2 = m2ctx.read()
177
173
178 # check in files
174 # check in files
179 added = []
175 added = []
180 filesadded = []
176 filesadded = []
181 removed = list(ctx.removed())
177 removed = list(ctx.removed())
182 touched = []
178 touched = []
183 linkrev = len(repo)
179 linkrev = len(repo)
184 repo.ui.note(_(b"committing files:\n"))
180 repo.ui.note(_(b"committing files:\n"))
185 uipathfn = scmutil.getuipathfn(repo)
181 uipathfn = scmutil.getuipathfn(repo)
186 for f in sorted(ctx.modified() + ctx.added()):
182 for f in sorted(ctx.modified() + ctx.added()):
187 repo.ui.note(uipathfn(f) + b"\n")
183 repo.ui.note(uipathfn(f) + b"\n")
188 try:
184 try:
189 fctx = ctx[f]
185 fctx = ctx[f]
190 if fctx is None:
186 if fctx is None:
191 removed.append(f)
187 removed.append(f)
192 else:
188 else:
193 added.append(f)
189 added.append(f)
194 m[f], is_touched = _filecommit(
190 m[f], is_touched = _filecommit(
195 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta,
191 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta,
196 )
192 )
197 if is_touched:
193 if is_touched:
198 touched.append(f)
194 touched.append(f)
199 if is_touched == 'added':
195 if is_touched == 'added':
200 filesadded.append(f)
196 filesadded.append(f)
201 m.setflag(f, fctx.flags())
197 m.setflag(f, fctx.flags())
202 except OSError:
198 except OSError:
203 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
199 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
204 raise
200 raise
205 except IOError as inst:
201 except IOError as inst:
206 errcode = getattr(inst, 'errno', errno.ENOENT)
202 errcode = getattr(inst, 'errno', errno.ENOENT)
207 if error or errcode and errcode != errno.ENOENT:
203 if error or errcode and errcode != errno.ENOENT:
208 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
204 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
209 raise
205 raise
210
206
211 # update manifest
207 # update manifest
212 removed = [f for f in removed if f in m1 or f in m2]
208 removed = [f for f in removed if f in m1 or f in m2]
213 drop = sorted([f for f in removed if f in m])
209 drop = sorted([f for f in removed if f in m])
214 for f in drop:
210 for f in drop:
215 del m[f]
211 del m[f]
216 if p2.rev() != nullrev:
212 if p2.rev() != nullrev:
217 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
213 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
218 removed = [f for f in removed if not rf(f)]
214 removed = [f for f in removed if not rf(f)]
219
215
220 touched.extend(removed)
216 touched.extend(removed)
221
217
222 files = touched
218 files = touched
223 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files, added, drop)
219 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files, added, drop)
224
220
225 return mn, files, filesadded, removed
221 return mn, files, filesadded, removed
226
222
227
223
228 def _filecommit(
224 def _filecommit(
229 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
225 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
230 ):
226 ):
231 """
227 """
232 commit an individual file as part of a larger transaction
228 commit an individual file as part of a larger transaction
233
229
234 input:
230 input:
235
231
236 fctx: a file context with the content we are trying to commit
232 fctx: a file context with the content we are trying to commit
237 manifest1: manifest of changeset first parent
233 manifest1: manifest of changeset first parent
238 manifest2: manifest of changeset second parent
234 manifest2: manifest of changeset second parent
239 linkrev: revision number of the changeset being created
235 linkrev: revision number of the changeset being created
240 tr: current transation
236 tr: current transation
241 individual: boolean, set to False to skip storing the copy data
237 individual: boolean, set to False to skip storing the copy data
242 (only used by the Google specific feature of using
238 (only used by the Google specific feature of using
243 changeset extra as copy source of truth).
239 changeset extra as copy source of truth).
244
240
245 output: (filenode, touched)
241 output: (filenode, touched)
246
242
247 filenode: the filenode that should be used by this changeset
243 filenode: the filenode that should be used by this changeset
248 touched: one of: None (mean untouched), 'added' or 'modified'
244 touched: one of: None (mean untouched), 'added' or 'modified'
249 """
245 """
250
246
251 fname = fctx.path()
247 fname = fctx.path()
252 fparent1 = manifest1.get(fname, nullid)
248 fparent1 = manifest1.get(fname, nullid)
253 fparent2 = manifest2.get(fname, nullid)
249 fparent2 = manifest2.get(fname, nullid)
254 touched = None
250 touched = None
255 if fparent1 == fparent2 == nullid:
251 if fparent1 == fparent2 == nullid:
256 touched = 'added'
252 touched = 'added'
257
253
258 if isinstance(fctx, context.filectx):
254 if isinstance(fctx, context.filectx):
259 # This block fast path most comparisons which are usually done. It
255 # This block fast path most comparisons which are usually done. It
260 # assumes that bare filectx is used and no merge happened, hence no
256 # assumes that bare filectx is used and no merge happened, hence no
261 # need to create a new file revision in this case.
257 # need to create a new file revision in this case.
262 node = fctx.filenode()
258 node = fctx.filenode()
263 if node in [fparent1, fparent2]:
259 if node in [fparent1, fparent2]:
264 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
260 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
265 if (
261 if (
266 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
262 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
267 ) or (
263 ) or (
268 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
264 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
269 ):
265 ):
270 touched = 'modified'
266 touched = 'modified'
271 return node, touched
267 return node, touched
272
268
273 flog = repo.file(fname)
269 flog = repo.file(fname)
274 meta = {}
270 meta = {}
275 cfname = fctx.copysource()
271 cfname = fctx.copysource()
276 fnode = None
272 fnode = None
277
273
278 if cfname and cfname != fname:
274 if cfname and cfname != fname:
279 # Mark the new revision of this file as a copy of another
275 # Mark the new revision of this file as a copy of another
280 # file. This copy data will effectively act as a parent
276 # file. This copy data will effectively act as a parent
281 # of this new revision. If this is a merge, the first
277 # of this new revision. If this is a merge, the first
282 # parent will be the nullid (meaning "look up the copy data")
278 # parent will be the nullid (meaning "look up the copy data")
283 # and the second one will be the other parent. For example:
279 # and the second one will be the other parent. For example:
284 #
280 #
285 # 0 --- 1 --- 3 rev1 changes file foo
281 # 0 --- 1 --- 3 rev1 changes file foo
286 # \ / rev2 renames foo to bar and changes it
282 # \ / rev2 renames foo to bar and changes it
287 # \- 2 -/ rev3 should have bar with all changes and
283 # \- 2 -/ rev3 should have bar with all changes and
288 # should record that bar descends from
284 # should record that bar descends from
289 # bar in rev2 and foo in rev1
285 # bar in rev2 and foo in rev1
290 #
286 #
291 # this allows this merge to succeed:
287 # this allows this merge to succeed:
292 #
288 #
293 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
289 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
294 # \ / merging rev3 and rev4 should use bar@rev2
290 # \ / merging rev3 and rev4 should use bar@rev2
295 # \- 2 --- 4 as the merge base
291 # \- 2 --- 4 as the merge base
296 #
292 #
297
293
298 cnode = manifest1.get(cfname)
294 cnode = manifest1.get(cfname)
299 newfparent = fparent2
295 newfparent = fparent2
300
296
301 if manifest2: # branch merge
297 if manifest2: # branch merge
302 if fparent2 == nullid or cnode is None: # copied on remote side
298 if fparent2 == nullid or cnode is None: # copied on remote side
303 if cfname in manifest2:
299 if cfname in manifest2:
304 cnode = manifest2[cfname]
300 cnode = manifest2[cfname]
305 newfparent = fparent1
301 newfparent = fparent1
306
302
307 # Here, we used to search backwards through history to try to find
303 # Here, we used to search backwards through history to try to find
308 # where the file copy came from if the source of a copy was not in
304 # where the file copy came from if the source of a copy was not in
309 # the parent directory. However, this doesn't actually make sense to
305 # the parent directory. However, this doesn't actually make sense to
310 # do (what does a copy from something not in your working copy even
306 # do (what does a copy from something not in your working copy even
311 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
307 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
312 # the user that copy information was dropped, so if they didn't
308 # the user that copy information was dropped, so if they didn't
313 # expect this outcome it can be fixed, but this is the correct
309 # expect this outcome it can be fixed, but this is the correct
314 # behavior in this circumstance.
310 # behavior in this circumstance.
315
311
316 if cnode:
312 if cnode:
317 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
313 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
318 if includecopymeta:
314 if includecopymeta:
319 meta[b"copy"] = cfname
315 meta[b"copy"] = cfname
320 meta[b"copyrev"] = hex(cnode)
316 meta[b"copyrev"] = hex(cnode)
321 fparent1, fparent2 = nullid, newfparent
317 fparent1, fparent2 = nullid, newfparent
322 else:
318 else:
323 repo.ui.warn(
319 repo.ui.warn(
324 _(
320 _(
325 b"warning: can't find ancestor for '%s' "
321 b"warning: can't find ancestor for '%s' "
326 b"copied from '%s'!\n"
322 b"copied from '%s'!\n"
327 )
323 )
328 % (fname, cfname)
324 % (fname, cfname)
329 )
325 )
330
326
331 elif fparent1 == nullid:
327 elif fparent1 == nullid:
332 fparent1, fparent2 = fparent2, nullid
328 fparent1, fparent2 = fparent2, nullid
333 elif fparent2 != nullid:
329 elif fparent2 != nullid:
334 # is one parent an ancestor of the other?
330 # is one parent an ancestor of the other?
335 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
331 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
336 if fparent1 in fparentancestors:
332 if fparent1 in fparentancestors:
337 fparent1, fparent2 = fparent2, nullid
333 fparent1, fparent2 = fparent2, nullid
338 elif fparent2 in fparentancestors:
334 elif fparent2 in fparentancestors:
339 fparent2 = nullid
335 fparent2 = nullid
340 elif not fparentancestors:
336 elif not fparentancestors:
341 # TODO: this whole if-else might be simplified much more
337 # TODO: this whole if-else might be simplified much more
342 ms = mergestate.mergestate.read(repo)
338 ms = mergestate.mergestate.read(repo)
343 if (
339 if (
344 fname in ms
340 fname in ms
345 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
341 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
346 ):
342 ):
347 fparent1, fparent2 = fparent2, nullid
343 fparent1, fparent2 = fparent2, nullid
348
344
349 # is the file changed?
345 # is the file changed?
350 text = fctx.data()
346 text = fctx.data()
351 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
347 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
352 if touched is None: # do not overwrite added
348 if touched is None: # do not overwrite added
353 touched = 'modified'
349 touched = 'modified'
354 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
350 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
355 # are just the flags changed during merge?
351 # are just the flags changed during merge?
356 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
357 touched = 'modified'
353 touched = 'modified'
358 fnode = fparent1
354 fnode = fparent1
359 else:
355 else:
360 fnode = fparent1
356 fnode = fparent1
361 return fnode, touched
357 return fnode, touched
362
358
363
359
364 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
360 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
365 """make a new manifest entry (or reuse a new one)
361 """make a new manifest entry (or reuse a new one)
366
362
367 given an initialised manifest context and precomputed list of
363 given an initialised manifest context and precomputed list of
368 - files: files affected by the commit
364 - files: files affected by the commit
369 - added: new entries in the manifest
365 - added: new entries in the manifest
370 - drop: entries present in parents but absent of this one
366 - drop: entries present in parents but absent of this one
371
367
372 Create a new manifest revision, reuse existing ones if possible.
368 Create a new manifest revision, reuse existing ones if possible.
373
369
374 Return the nodeid of the manifest revision.
370 Return the nodeid of the manifest revision.
375 """
371 """
376 repo = ctx.repo()
372 repo = ctx.repo()
377
373
378 md = None
374 md = None
379
375
380 # all this is cached, so it is find to get them all from the ctx.
376 # all this is cached, so it is find to get them all from the ctx.
381 p1 = ctx.p1()
377 p1 = ctx.p1()
382 p2 = ctx.p2()
378 p2 = ctx.p2()
383 m1ctx = p1.manifestctx()
379 m1ctx = p1.manifestctx()
384
380
385 m1 = m1ctx.read()
381 m1 = m1ctx.read()
386
382
387 if not files:
383 if not files:
388 # if no "files" actually changed in terms of the changelog,
384 # if no "files" actually changed in terms of the changelog,
389 # try hard to detect unmodified manifest entry so that the
385 # try hard to detect unmodified manifest entry so that the
390 # exact same commit can be reproduced later on convert.
386 # exact same commit can be reproduced later on convert.
391 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
387 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
392 if not files and md:
388 if not files and md:
393 repo.ui.debug(
389 repo.ui.debug(
394 b'not reusing manifest (no file change in '
390 b'not reusing manifest (no file change in '
395 b'changelog, but manifest differs)\n'
391 b'changelog, but manifest differs)\n'
396 )
392 )
397 if files or md:
393 if files or md:
398 repo.ui.note(_(b"committing manifest\n"))
394 repo.ui.note(_(b"committing manifest\n"))
399 # we're using narrowmatch here since it's already applied at
395 # we're using narrowmatch here since it's already applied at
400 # other stages (such as dirstate.walk), so we're already
396 # other stages (such as dirstate.walk), so we're already
401 # ignoring things outside of narrowspec in most cases. The
397 # ignoring things outside of narrowspec in most cases. The
402 # one case where we might have files outside the narrowspec
398 # one case where we might have files outside the narrowspec
403 # at this point is merges, and we already error out in the
399 # at this point is merges, and we already error out in the
404 # case where the merge has files outside of the narrowspec,
400 # case where the merge has files outside of the narrowspec,
405 # so this is safe.
401 # so this is safe.
406 mn = mctx.write(
402 mn = mctx.write(
407 tr,
403 tr,
408 linkrev,
404 linkrev,
409 p1.manifestnode(),
405 p1.manifestnode(),
410 p2.manifestnode(),
406 p2.manifestnode(),
411 added,
407 added,
412 drop,
408 drop,
413 match=repo.narrowmatch(),
409 match=repo.narrowmatch(),
414 )
410 )
415 else:
411 else:
416 repo.ui.debug(
412 repo.ui.debug(
417 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
413 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
418 )
414 )
419 mn = p1.manifestnode()
415 mn = p1.manifestnode()
420
416
421 return mn
417 return mn
422
418
423
419
424 def _extra_with_copies(repo, extra, files):
420 def _extra_with_copies(repo, extra, files):
425 """encode copy information into a `extra` dictionnary"""
421 """encode copy information into a `extra` dictionnary"""
426 p1copies = files.copied_from_p1
422 p1copies = files.copied_from_p1
427 p2copies = files.copied_from_p2
423 p2copies = files.copied_from_p2
428 filesadded = files.added
424 filesadded = files.added
429 filesremoved = files.removed
425 filesremoved = files.removed
430 files = sorted(files.touched)
426 files = sorted(files.touched)
431 if not _write_copy_meta(repo)[1]:
427 if not _write_copy_meta(repo)[1]:
432 # If writing only to changeset extras, use None to indicate that
428 # If writing only to changeset extras, use None to indicate that
433 # no entry should be written. If writing to both, write an empty
429 # no entry should be written. If writing to both, write an empty
434 # entry to prevent the reader from falling back to reading
430 # entry to prevent the reader from falling back to reading
435 # filelogs.
431 # filelogs.
436 p1copies = p1copies or None
432 p1copies = p1copies or None
437 p2copies = p2copies or None
433 p2copies = p2copies or None
438 filesadded = filesadded or None
434 filesadded = filesadded or None
439 filesremoved = filesremoved or None
435 filesremoved = filesremoved or None
440
436
441 extrasentries = p1copies, p2copies, filesadded, filesremoved
437 extrasentries = p1copies, p2copies, filesadded, filesremoved
442 if extra is None and any(x is not None for x in extrasentries):
438 if extra is None and any(x is not None for x in extrasentries):
443 extra = {}
439 extra = {}
444 if p1copies is not None:
440 if p1copies is not None:
445 p1copies = metadata.encodecopies(files, p1copies)
441 p1copies = metadata.encodecopies(files, p1copies)
446 extra[b'p1copies'] = p1copies
442 extra[b'p1copies'] = p1copies
447 if p2copies is not None:
443 if p2copies is not None:
448 p2copies = metadata.encodecopies(files, p2copies)
444 p2copies = metadata.encodecopies(files, p2copies)
449 extra[b'p2copies'] = p2copies
445 extra[b'p2copies'] = p2copies
450 if filesadded is not None:
446 if filesadded is not None:
451 filesadded = metadata.encodefileindices(files, filesadded)
447 filesadded = metadata.encodefileindices(files, filesadded)
452 extra[b'filesadded'] = filesadded
448 extra[b'filesadded'] = filesadded
453 if filesremoved is not None:
449 if filesremoved is not None:
454 filesremoved = metadata.encodefileindices(files, filesremoved)
450 filesremoved = metadata.encodefileindices(files, filesremoved)
455 extra[b'filesremoved'] = filesremoved
451 extra[b'filesremoved'] = filesremoved
456 return extra
452 return extra
@@ -1,40 +1,41 b''
1 Testing that convert.hg.preserve-hash=true can be used to make hg
1 Testing that convert.hg.preserve-hash=true can be used to make hg
2 convert from hg repo to hg repo preserve hashes, even if the
2 convert from hg repo to hg repo preserve hashes, even if the
3 computation of the files list in commits change slightly between hg
3 computation of the files list in commits change slightly between hg
4 versions.
4 versions.
5
5
6 $ cat <<'EOF' >> "$HGRCPATH"
6 $ cat <<'EOF' >> "$HGRCPATH"
7 > [extensions]
7 > [extensions]
8 > convert =
8 > convert =
9 > EOF
9 > EOF
10 $ cat <<'EOF' > changefileslist.py
10 $ cat <<'EOF' > changefileslist.py
11 > from mercurial import (changelog, extensions)
11 > from mercurial import (changelog, extensions, metadata)
12 > def wrap(orig, clog, manifest, files, *args, **kwargs):
12 > def wrap(orig, clog, manifest, files, *args, **kwargs):
13 > return orig(clog, manifest, [b"a"], *args, **kwargs)
13 > files = metadata.ChangingFiles(touched=[b"a"])
14 > return orig(clog, manifest, files, *args, **kwargs)
14 > def extsetup(ui):
15 > def extsetup(ui):
15 > extensions.wrapfunction(changelog.changelog, 'add', wrap)
16 > extensions.wrapfunction(changelog.changelog, 'add', wrap)
16 > EOF
17 > EOF
17
18
18 $ hg init repo
19 $ hg init repo
19 $ cd repo
20 $ cd repo
20 $ echo a > a; hg commit -qAm a
21 $ echo a > a; hg commit -qAm a
21 $ echo b > a; hg commit -qAm b
22 $ echo b > a; hg commit -qAm b
22 $ hg up -qr 0; echo c > c; hg commit -qAm c
23 $ hg up -qr 0; echo c > c; hg commit -qAm c
23 $ hg merge -qr 1
24 $ hg merge -qr 1
24 $ hg commit -m_ --config extensions.x=../changefileslist.py
25 $ hg commit -m_ --config extensions.x=../changefileslist.py
25 $ hg log -r . -T '{node|short} {files|json}\n'
26 $ hg log -r . -T '{node|short} {files|json}\n'
26 c085bbe93d59 ["a"]
27 c085bbe93d59 ["a"]
27
28
28 Now that we have a commit with a files list that's not what the
29 Now that we have a commit with a files list that's not what the
29 current hg version would create, check that convert either fixes it or
30 current hg version would create, check that convert either fixes it or
30 keeps it depending on config:
31 keeps it depending on config:
31
32
32 $ hg convert -q . ../convert
33 $ hg convert -q . ../convert
33 $ hg --cwd ../convert log -r tip -T '{node|short} {files|json}\n'
34 $ hg --cwd ../convert log -r tip -T '{node|short} {files|json}\n'
34 b7c4d4bbacd3 []
35 b7c4d4bbacd3 []
35 $ rm -rf ../convert
36 $ rm -rf ../convert
36
37
37 $ hg convert -q . ../convert --config convert.hg.preserve-hash=true
38 $ hg convert -q . ../convert --config convert.hg.preserve-hash=true
38 $ hg --cwd ../convert log -r tip -T '{node|short} {files|json}\n'
39 $ hg --cwd ../convert log -r tip -T '{node|short} {files|json}\n'
39 c085bbe93d59 ["a"]
40 c085bbe93d59 ["a"]
40 $ rm -rf ../convert
41 $ rm -rf ../convert
General Comments 0
You need to be logged in to leave comments. Login now