##// END OF EJS Templates
commitctx: extract copy information encoding into extra into commit.py...
marmoute -
r45809:b3040b67 default
parent child Browse files
Show More
@@ -1,627 +1,605
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 revlog,
23 revlog,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 return b"\0".join(items)
86 return b"\0".join(items)
87
87
88
88
89 def stripdesc(desc):
89 def stripdesc(desc):
90 """strip trailing whitespace and leading and trailing empty lines"""
90 """strip trailing whitespace and leading and trailing empty lines"""
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92
92
93
93
94 class appender(object):
94 class appender(object):
95 '''the changelog index must be updated last on disk, so we use this class
95 '''the changelog index must be updated last on disk, so we use this class
96 to delay writes to it'''
96 to delay writes to it'''
97
97
98 def __init__(self, vfs, name, mode, buf):
98 def __init__(self, vfs, name, mode, buf):
99 self.data = buf
99 self.data = buf
100 fp = vfs(name, mode)
100 fp = vfs(name, mode)
101 self.fp = fp
101 self.fp = fp
102 self.offset = fp.tell()
102 self.offset = fp.tell()
103 self.size = vfs.fstat(fp).st_size
103 self.size = vfs.fstat(fp).st_size
104 self._end = self.size
104 self._end = self.size
105
105
106 def end(self):
106 def end(self):
107 return self._end
107 return self._end
108
108
109 def tell(self):
109 def tell(self):
110 return self.offset
110 return self.offset
111
111
112 def flush(self):
112 def flush(self):
113 pass
113 pass
114
114
115 @property
115 @property
116 def closed(self):
116 def closed(self):
117 return self.fp.closed
117 return self.fp.closed
118
118
119 def close(self):
119 def close(self):
120 self.fp.close()
120 self.fp.close()
121
121
122 def seek(self, offset, whence=0):
122 def seek(self, offset, whence=0):
123 '''virtual file offset spans real file and data'''
123 '''virtual file offset spans real file and data'''
124 if whence == 0:
124 if whence == 0:
125 self.offset = offset
125 self.offset = offset
126 elif whence == 1:
126 elif whence == 1:
127 self.offset += offset
127 self.offset += offset
128 elif whence == 2:
128 elif whence == 2:
129 self.offset = self.end() + offset
129 self.offset = self.end() + offset
130 if self.offset < self.size:
130 if self.offset < self.size:
131 self.fp.seek(self.offset)
131 self.fp.seek(self.offset)
132
132
133 def read(self, count=-1):
133 def read(self, count=-1):
134 '''only trick here is reads that span real file and data'''
134 '''only trick here is reads that span real file and data'''
135 ret = b""
135 ret = b""
136 if self.offset < self.size:
136 if self.offset < self.size:
137 s = self.fp.read(count)
137 s = self.fp.read(count)
138 ret = s
138 ret = s
139 self.offset += len(s)
139 self.offset += len(s)
140 if count > 0:
140 if count > 0:
141 count -= len(s)
141 count -= len(s)
142 if count != 0:
142 if count != 0:
143 doff = self.offset - self.size
143 doff = self.offset - self.size
144 self.data.insert(0, b"".join(self.data))
144 self.data.insert(0, b"".join(self.data))
145 del self.data[1:]
145 del self.data[1:]
146 s = self.data[0][doff : doff + count]
146 s = self.data[0][doff : doff + count]
147 self.offset += len(s)
147 self.offset += len(s)
148 ret += s
148 ret += s
149 return ret
149 return ret
150
150
151 def write(self, s):
151 def write(self, s):
152 self.data.append(bytes(s))
152 self.data.append(bytes(s))
153 self.offset += len(s)
153 self.offset += len(s)
154 self._end += len(s)
154 self._end += len(s)
155
155
156 def __enter__(self):
156 def __enter__(self):
157 self.fp.__enter__()
157 self.fp.__enter__()
158 return self
158 return self
159
159
160 def __exit__(self, *args):
160 def __exit__(self, *args):
161 return self.fp.__exit__(*args)
161 return self.fp.__exit__(*args)
162
162
163
163
164 class _divertopener(object):
164 class _divertopener(object):
165 def __init__(self, opener, target):
165 def __init__(self, opener, target):
166 self._opener = opener
166 self._opener = opener
167 self._target = target
167 self._target = target
168
168
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 if name != self._target:
170 if name != self._target:
171 return self._opener(name, mode, **kwargs)
171 return self._opener(name, mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
172 return self._opener(name + b".a", mode, **kwargs)
173
173
174 def __getattr__(self, attr):
174 def __getattr__(self, attr):
175 return getattr(self._opener, attr)
175 return getattr(self._opener, attr)
176
176
177
177
178 def _delayopener(opener, target, buf):
178 def _delayopener(opener, target, buf):
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180
180
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 if name != target:
182 if name != target:
183 return opener(name, mode, **kwargs)
183 return opener(name, mode, **kwargs)
184 assert not kwargs
184 assert not kwargs
185 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
186
186
187 return _delay
187 return _delay
188
188
189
189
190 @attr.s
190 @attr.s
191 class _changelogrevision(object):
191 class _changelogrevision(object):
192 # Extensions might modify _defaultextra, so let the constructor below pass
192 # Extensions might modify _defaultextra, so let the constructor below pass
193 # it in
193 # it in
194 extra = attr.ib()
194 extra = attr.ib()
195 manifest = attr.ib(default=nullid)
195 manifest = attr.ib(default=nullid)
196 user = attr.ib(default=b'')
196 user = attr.ib(default=b'')
197 date = attr.ib(default=(0, 0))
197 date = attr.ib(default=(0, 0))
198 files = attr.ib(default=attr.Factory(list))
198 files = attr.ib(default=attr.Factory(list))
199 filesadded = attr.ib(default=None)
199 filesadded = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
200 filesremoved = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
201 p1copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
202 p2copies = attr.ib(default=None)
203 description = attr.ib(default=b'')
203 description = attr.ib(default=b'')
204
204
205
205
206 class changelogrevision(object):
206 class changelogrevision(object):
207 """Holds results of a parsed changelog revision.
207 """Holds results of a parsed changelog revision.
208
208
209 Changelog revisions consist of multiple pieces of data, including
209 Changelog revisions consist of multiple pieces of data, including
210 the manifest node, user, and date. This object exposes a view into
210 the manifest node, user, and date. This object exposes a view into
211 the parsed object.
211 the parsed object.
212 """
212 """
213
213
214 __slots__ = (
214 __slots__ = (
215 '_offsets',
215 '_offsets',
216 '_text',
216 '_text',
217 '_sidedata',
217 '_sidedata',
218 '_cpsd',
218 '_cpsd',
219 )
219 )
220
220
221 def __new__(cls, text, sidedata, cpsd):
221 def __new__(cls, text, sidedata, cpsd):
222 if not text:
222 if not text:
223 return _changelogrevision(extra=_defaultextra)
223 return _changelogrevision(extra=_defaultextra)
224
224
225 self = super(changelogrevision, cls).__new__(cls)
225 self = super(changelogrevision, cls).__new__(cls)
226 # We could return here and implement the following as an __init__.
226 # We could return here and implement the following as an __init__.
227 # But doing it here is equivalent and saves an extra function call.
227 # But doing it here is equivalent and saves an extra function call.
228
228
229 # format used:
229 # format used:
230 # nodeid\n : manifest node in ascii
230 # nodeid\n : manifest node in ascii
231 # user\n : user, no \n or \r allowed
231 # user\n : user, no \n or \r allowed
232 # time tz extra\n : date (time is int or float, timezone is int)
232 # time tz extra\n : date (time is int or float, timezone is int)
233 # : extra is metadata, encoded and separated by '\0'
233 # : extra is metadata, encoded and separated by '\0'
234 # : older versions ignore it
234 # : older versions ignore it
235 # files\n\n : files modified by the cset, no \n or \r allowed
235 # files\n\n : files modified by the cset, no \n or \r allowed
236 # (.*) : comment (free text, ideally utf-8)
236 # (.*) : comment (free text, ideally utf-8)
237 #
237 #
238 # changelog v0 doesn't use extra
238 # changelog v0 doesn't use extra
239
239
240 nl1 = text.index(b'\n')
240 nl1 = text.index(b'\n')
241 nl2 = text.index(b'\n', nl1 + 1)
241 nl2 = text.index(b'\n', nl1 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
242 nl3 = text.index(b'\n', nl2 + 1)
243
243
244 # The list of files may be empty. Which means nl3 is the first of the
244 # The list of files may be empty. Which means nl3 is the first of the
245 # double newline that precedes the description.
245 # double newline that precedes the description.
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
246 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 doublenl = nl3
247 doublenl = nl3
248 else:
248 else:
249 doublenl = text.index(b'\n\n', nl3 + 1)
249 doublenl = text.index(b'\n\n', nl3 + 1)
250
250
251 self._offsets = (nl1, nl2, nl3, doublenl)
251 self._offsets = (nl1, nl2, nl3, doublenl)
252 self._text = text
252 self._text = text
253 self._sidedata = sidedata
253 self._sidedata = sidedata
254 self._cpsd = cpsd
254 self._cpsd = cpsd
255
255
256 return self
256 return self
257
257
258 @property
258 @property
259 def manifest(self):
259 def manifest(self):
260 return bin(self._text[0 : self._offsets[0]])
260 return bin(self._text[0 : self._offsets[0]])
261
261
262 @property
262 @property
263 def user(self):
263 def user(self):
264 off = self._offsets
264 off = self._offsets
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266
266
267 @property
267 @property
268 def _rawdate(self):
268 def _rawdate(self):
269 off = self._offsets
269 off = self._offsets
270 dateextra = self._text[off[1] + 1 : off[2]]
270 dateextra = self._text[off[1] + 1 : off[2]]
271 return dateextra.split(b' ', 2)[0:2]
271 return dateextra.split(b' ', 2)[0:2]
272
272
273 @property
273 @property
274 def _rawextra(self):
274 def _rawextra(self):
275 off = self._offsets
275 off = self._offsets
276 dateextra = self._text[off[1] + 1 : off[2]]
276 dateextra = self._text[off[1] + 1 : off[2]]
277 fields = dateextra.split(b' ', 2)
277 fields = dateextra.split(b' ', 2)
278 if len(fields) != 3:
278 if len(fields) != 3:
279 return None
279 return None
280
280
281 return fields[2]
281 return fields[2]
282
282
283 @property
283 @property
284 def date(self):
284 def date(self):
285 raw = self._rawdate
285 raw = self._rawdate
286 time = float(raw[0])
286 time = float(raw[0])
287 # Various tools did silly things with the timezone.
287 # Various tools did silly things with the timezone.
288 try:
288 try:
289 timezone = int(raw[1])
289 timezone = int(raw[1])
290 except ValueError:
290 except ValueError:
291 timezone = 0
291 timezone = 0
292
292
293 return time, timezone
293 return time, timezone
294
294
295 @property
295 @property
296 def extra(self):
296 def extra(self):
297 raw = self._rawextra
297 raw = self._rawextra
298 if raw is None:
298 if raw is None:
299 return _defaultextra
299 return _defaultextra
300
300
301 return decodeextra(raw)
301 return decodeextra(raw)
302
302
303 @property
303 @property
304 def files(self):
304 def files(self):
305 off = self._offsets
305 off = self._offsets
306 if off[2] == off[3]:
306 if off[2] == off[3]:
307 return []
307 return []
308
308
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
309 return self._text[off[2] + 1 : off[3]].split(b'\n')
310
310
311 @property
311 @property
312 def filesadded(self):
312 def filesadded(self):
313 if self._cpsd:
313 if self._cpsd:
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
315 if not rawindices:
315 if not rawindices:
316 return []
316 return []
317 else:
317 else:
318 rawindices = self.extra.get(b'filesadded')
318 rawindices = self.extra.get(b'filesadded')
319 if rawindices is None:
319 if rawindices is None:
320 return None
320 return None
321 return metadata.decodefileindices(self.files, rawindices)
321 return metadata.decodefileindices(self.files, rawindices)
322
322
323 @property
323 @property
324 def filesremoved(self):
324 def filesremoved(self):
325 if self._cpsd:
325 if self._cpsd:
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
327 if not rawindices:
327 if not rawindices:
328 return []
328 return []
329 else:
329 else:
330 rawindices = self.extra.get(b'filesremoved')
330 rawindices = self.extra.get(b'filesremoved')
331 if rawindices is None:
331 if rawindices is None:
332 return None
332 return None
333 return metadata.decodefileindices(self.files, rawindices)
333 return metadata.decodefileindices(self.files, rawindices)
334
334
335 @property
335 @property
336 def p1copies(self):
336 def p1copies(self):
337 if self._cpsd:
337 if self._cpsd:
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
339 if not rawcopies:
339 if not rawcopies:
340 return {}
340 return {}
341 else:
341 else:
342 rawcopies = self.extra.get(b'p1copies')
342 rawcopies = self.extra.get(b'p1copies')
343 if rawcopies is None:
343 if rawcopies is None:
344 return None
344 return None
345 return metadata.decodecopies(self.files, rawcopies)
345 return metadata.decodecopies(self.files, rawcopies)
346
346
347 @property
347 @property
348 def p2copies(self):
348 def p2copies(self):
349 if self._cpsd:
349 if self._cpsd:
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
351 if not rawcopies:
351 if not rawcopies:
352 return {}
352 return {}
353 else:
353 else:
354 rawcopies = self.extra.get(b'p2copies')
354 rawcopies = self.extra.get(b'p2copies')
355 if rawcopies is None:
355 if rawcopies is None:
356 return None
356 return None
357 return metadata.decodecopies(self.files, rawcopies)
357 return metadata.decodecopies(self.files, rawcopies)
358
358
359 @property
359 @property
360 def description(self):
360 def description(self):
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
362
362
363
363
364 class changelog(revlog.revlog):
364 class changelog(revlog.revlog):
365 def __init__(self, opener, trypending=False):
365 def __init__(self, opener, trypending=False):
366 """Load a changelog revlog using an opener.
366 """Load a changelog revlog using an opener.
367
367
368 If ``trypending`` is true, we attempt to load the index from a
368 If ``trypending`` is true, we attempt to load the index from a
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
370 The ``00changelog.i.a`` file contains index (and possibly inline
370 The ``00changelog.i.a`` file contains index (and possibly inline
371 revision) data for a transaction that hasn't been finalized yet.
371 revision) data for a transaction that hasn't been finalized yet.
372 It exists in a separate file to facilitate readers (such as
372 It exists in a separate file to facilitate readers (such as
373 hooks processes) accessing data before a transaction is finalized.
373 hooks processes) accessing data before a transaction is finalized.
374 """
374 """
375 if trypending and opener.exists(b'00changelog.i.a'):
375 if trypending and opener.exists(b'00changelog.i.a'):
376 indexfile = b'00changelog.i.a'
376 indexfile = b'00changelog.i.a'
377 else:
377 else:
378 indexfile = b'00changelog.i'
378 indexfile = b'00changelog.i'
379
379
380 datafile = b'00changelog.d'
380 datafile = b'00changelog.d'
381 revlog.revlog.__init__(
381 revlog.revlog.__init__(
382 self,
382 self,
383 opener,
383 opener,
384 indexfile,
384 indexfile,
385 datafile=datafile,
385 datafile=datafile,
386 checkambig=True,
386 checkambig=True,
387 mmaplargeindex=True,
387 mmaplargeindex=True,
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
389 )
389 )
390
390
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
392 # changelogs don't benefit from generaldelta.
392 # changelogs don't benefit from generaldelta.
393
393
394 self.version &= ~revlog.FLAG_GENERALDELTA
394 self.version &= ~revlog.FLAG_GENERALDELTA
395 self._generaldelta = False
395 self._generaldelta = False
396
396
397 # Delta chains for changelogs tend to be very small because entries
397 # Delta chains for changelogs tend to be very small because entries
398 # tend to be small and don't delta well with each. So disable delta
398 # tend to be small and don't delta well with each. So disable delta
399 # chains.
399 # chains.
400 self._storedeltachains = False
400 self._storedeltachains = False
401
401
402 self._realopener = opener
402 self._realopener = opener
403 self._delayed = False
403 self._delayed = False
404 self._delaybuf = None
404 self._delaybuf = None
405 self._divert = False
405 self._divert = False
406 self.filteredrevs = frozenset()
406 self.filteredrevs = frozenset()
407 self._copiesstorage = opener.options.get(b'copies-storage')
407 self._copiesstorage = opener.options.get(b'copies-storage')
408
408
409 def delayupdate(self, tr):
409 def delayupdate(self, tr):
410 """delay visibility of index updates to other readers"""
410 """delay visibility of index updates to other readers"""
411
411
412 if not self._delayed:
412 if not self._delayed:
413 if len(self) == 0:
413 if len(self) == 0:
414 self._divert = True
414 self._divert = True
415 if self._realopener.exists(self.indexfile + b'.a'):
415 if self._realopener.exists(self.indexfile + b'.a'):
416 self._realopener.unlink(self.indexfile + b'.a')
416 self._realopener.unlink(self.indexfile + b'.a')
417 self.opener = _divertopener(self._realopener, self.indexfile)
417 self.opener = _divertopener(self._realopener, self.indexfile)
418 else:
418 else:
419 self._delaybuf = []
419 self._delaybuf = []
420 self.opener = _delayopener(
420 self.opener = _delayopener(
421 self._realopener, self.indexfile, self._delaybuf
421 self._realopener, self.indexfile, self._delaybuf
422 )
422 )
423 self._delayed = True
423 self._delayed = True
424 tr.addpending(b'cl-%i' % id(self), self._writepending)
424 tr.addpending(b'cl-%i' % id(self), self._writepending)
425 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
425 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
426
426
427 def _finalize(self, tr):
427 def _finalize(self, tr):
428 """finalize index updates"""
428 """finalize index updates"""
429 self._delayed = False
429 self._delayed = False
430 self.opener = self._realopener
430 self.opener = self._realopener
431 # move redirected index data back into place
431 # move redirected index data back into place
432 if self._divert:
432 if self._divert:
433 assert not self._delaybuf
433 assert not self._delaybuf
434 tmpname = self.indexfile + b".a"
434 tmpname = self.indexfile + b".a"
435 nfile = self.opener.open(tmpname)
435 nfile = self.opener.open(tmpname)
436 nfile.close()
436 nfile.close()
437 self.opener.rename(tmpname, self.indexfile, checkambig=True)
437 self.opener.rename(tmpname, self.indexfile, checkambig=True)
438 elif self._delaybuf:
438 elif self._delaybuf:
439 fp = self.opener(self.indexfile, b'a', checkambig=True)
439 fp = self.opener(self.indexfile, b'a', checkambig=True)
440 fp.write(b"".join(self._delaybuf))
440 fp.write(b"".join(self._delaybuf))
441 fp.close()
441 fp.close()
442 self._delaybuf = None
442 self._delaybuf = None
443 self._divert = False
443 self._divert = False
444 # split when we're done
444 # split when we're done
445 self._enforceinlinesize(tr)
445 self._enforceinlinesize(tr)
446
446
447 def _writepending(self, tr):
447 def _writepending(self, tr):
448 """create a file containing the unfinalized state for
448 """create a file containing the unfinalized state for
449 pretxnchangegroup"""
449 pretxnchangegroup"""
450 if self._delaybuf:
450 if self._delaybuf:
451 # make a temporary copy of the index
451 # make a temporary copy of the index
452 fp1 = self._realopener(self.indexfile)
452 fp1 = self._realopener(self.indexfile)
453 pendingfilename = self.indexfile + b".a"
453 pendingfilename = self.indexfile + b".a"
454 # register as a temp file to ensure cleanup on failure
454 # register as a temp file to ensure cleanup on failure
455 tr.registertmp(pendingfilename)
455 tr.registertmp(pendingfilename)
456 # write existing data
456 # write existing data
457 fp2 = self._realopener(pendingfilename, b"w")
457 fp2 = self._realopener(pendingfilename, b"w")
458 fp2.write(fp1.read())
458 fp2.write(fp1.read())
459 # add pending data
459 # add pending data
460 fp2.write(b"".join(self._delaybuf))
460 fp2.write(b"".join(self._delaybuf))
461 fp2.close()
461 fp2.close()
462 # switch modes so finalize can simply rename
462 # switch modes so finalize can simply rename
463 self._delaybuf = None
463 self._delaybuf = None
464 self._divert = True
464 self._divert = True
465 self.opener = _divertopener(self._realopener, self.indexfile)
465 self.opener = _divertopener(self._realopener, self.indexfile)
466
466
467 if self._divert:
467 if self._divert:
468 return True
468 return True
469
469
470 return False
470 return False
471
471
472 def _enforceinlinesize(self, tr, fp=None):
472 def _enforceinlinesize(self, tr, fp=None):
473 if not self._delayed:
473 if not self._delayed:
474 revlog.revlog._enforceinlinesize(self, tr, fp)
474 revlog.revlog._enforceinlinesize(self, tr, fp)
475
475
476 def read(self, node):
476 def read(self, node):
477 """Obtain data from a parsed changelog revision.
477 """Obtain data from a parsed changelog revision.
478
478
479 Returns a 6-tuple of:
479 Returns a 6-tuple of:
480
480
481 - manifest node in binary
481 - manifest node in binary
482 - author/user as a localstr
482 - author/user as a localstr
483 - date as a 2-tuple of (time, timezone)
483 - date as a 2-tuple of (time, timezone)
484 - list of files
484 - list of files
485 - commit message as a localstr
485 - commit message as a localstr
486 - dict of extra metadata
486 - dict of extra metadata
487
487
488 Unless you need to access all fields, consider calling
488 Unless you need to access all fields, consider calling
489 ``changelogrevision`` instead, as it is faster for partial object
489 ``changelogrevision`` instead, as it is faster for partial object
490 access.
490 access.
491 """
491 """
492 d, s = self._revisiondata(node)
492 d, s = self._revisiondata(node)
493 c = changelogrevision(
493 c = changelogrevision(
494 d, s, self._copiesstorage == b'changeset-sidedata'
494 d, s, self._copiesstorage == b'changeset-sidedata'
495 )
495 )
496 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
496 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
497
497
498 def changelogrevision(self, nodeorrev):
498 def changelogrevision(self, nodeorrev):
499 """Obtain a ``changelogrevision`` for a node or revision."""
499 """Obtain a ``changelogrevision`` for a node or revision."""
500 text, sidedata = self._revisiondata(nodeorrev)
500 text, sidedata = self._revisiondata(nodeorrev)
501 return changelogrevision(
501 return changelogrevision(
502 text, sidedata, self._copiesstorage == b'changeset-sidedata'
502 text, sidedata, self._copiesstorage == b'changeset-sidedata'
503 )
503 )
504
504
505 def readfiles(self, node):
505 def readfiles(self, node):
506 """
506 """
507 short version of read that only returns the files modified by the cset
507 short version of read that only returns the files modified by the cset
508 """
508 """
509 text = self.revision(node)
509 text = self.revision(node)
510 if not text:
510 if not text:
511 return []
511 return []
512 last = text.index(b"\n\n")
512 last = text.index(b"\n\n")
513 l = text[:last].split(b'\n')
513 l = text[:last].split(b'\n')
514 return l[3:]
514 return l[3:]
515
515
516 def add(
516 def add(
517 self,
517 self,
518 manifest,
518 manifest,
519 files,
519 files,
520 desc,
520 desc,
521 transaction,
521 transaction,
522 p1,
522 p1,
523 p2,
523 p2,
524 user,
524 user,
525 date=None,
525 date=None,
526 extra=None,
526 extra=None,
527 p1copies=None,
527 p1copies=None,
528 p2copies=None,
528 p2copies=None,
529 filesadded=None,
529 filesadded=None,
530 filesremoved=None,
530 filesremoved=None,
531 ):
531 ):
532 # Convert to UTF-8 encoded bytestrings as the very first
532 # Convert to UTF-8 encoded bytestrings as the very first
533 # thing: calling any method on a localstr object will turn it
533 # thing: calling any method on a localstr object will turn it
534 # into a str object and the cached UTF-8 string is thus lost.
534 # into a str object and the cached UTF-8 string is thus lost.
535 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
535 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
536
536
537 user = user.strip()
537 user = user.strip()
538 # An empty username or a username with a "\n" will make the
538 # An empty username or a username with a "\n" will make the
539 # revision text contain two "\n\n" sequences -> corrupt
539 # revision text contain two "\n\n" sequences -> corrupt
540 # repository since read cannot unpack the revision.
540 # repository since read cannot unpack the revision.
541 if not user:
541 if not user:
542 raise error.StorageError(_(b"empty username"))
542 raise error.StorageError(_(b"empty username"))
543 if b"\n" in user:
543 if b"\n" in user:
544 raise error.StorageError(
544 raise error.StorageError(
545 _(b"username %r contains a newline") % pycompat.bytestr(user)
545 _(b"username %r contains a newline") % pycompat.bytestr(user)
546 )
546 )
547
547
548 desc = stripdesc(desc)
548 desc = stripdesc(desc)
549
549
550 if date:
550 if date:
551 parseddate = b"%d %d" % dateutil.parsedate(date)
551 parseddate = b"%d %d" % dateutil.parsedate(date)
552 else:
552 else:
553 parseddate = b"%d %d" % dateutil.makedate()
553 parseddate = b"%d %d" % dateutil.makedate()
554 if extra:
554 if extra:
555 branch = extra.get(b"branch")
555 branch = extra.get(b"branch")
556 if branch in (b"default", b""):
556 if branch in (b"default", b""):
557 del extra[b"branch"]
557 del extra[b"branch"]
558 elif branch in (b".", b"null", b"tip"):
558 elif branch in (b".", b"null", b"tip"):
559 raise error.StorageError(
559 raise error.StorageError(
560 _(b'the name \'%s\' is reserved') % branch
560 _(b'the name \'%s\' is reserved') % branch
561 )
561 )
562 sortedfiles = sorted(files)
562 sortedfiles = sorted(files)
563 sidedata = None
563 sidedata = None
564 if extra is not None:
564 if self._copiesstorage == b'changeset-sidedata':
565 for name in (
566 b'p1copies',
567 b'p2copies',
568 b'filesadded',
569 b'filesremoved',
570 ):
571 extra.pop(name, None)
572 if p1copies is not None:
573 p1copies = metadata.encodecopies(sortedfiles, p1copies)
574 if p2copies is not None:
575 p2copies = metadata.encodecopies(sortedfiles, p2copies)
576 if filesadded is not None:
577 filesadded = metadata.encodefileindices(sortedfiles, filesadded)
578 if filesremoved is not None:
579 filesremoved = metadata.encodefileindices(sortedfiles, filesremoved)
580 if self._copiesstorage == b'extra':
581 extrasentries = p1copies, p2copies, filesadded, filesremoved
582 if extra is None and any(x is not None for x in extrasentries):
583 extra = {}
584 if p1copies is not None:
585 extra[b'p1copies'] = p1copies
586 if p2copies is not None:
587 extra[b'p2copies'] = p2copies
588 if filesadded is not None:
589 extra[b'filesadded'] = filesadded
590 if filesremoved is not None:
591 extra[b'filesremoved'] = filesremoved
592 elif self._copiesstorage == b'changeset-sidedata':
593 sidedata = {}
565 sidedata = {}
594 if p1copies:
566 if p1copies:
567 p1copies = metadata.encodecopies(sortedfiles, p1copies)
595 sidedata[sidedatamod.SD_P1COPIES] = p1copies
568 sidedata[sidedatamod.SD_P1COPIES] = p1copies
596 if p2copies:
569 if p2copies:
570 p2copies = metadata.encodecopies(sortedfiles, p2copies)
597 sidedata[sidedatamod.SD_P2COPIES] = p2copies
571 sidedata[sidedatamod.SD_P2COPIES] = p2copies
598 if filesadded:
572 if filesadded:
573 filesadded = metadata.encodefileindices(sortedfiles, filesadded)
599 sidedata[sidedatamod.SD_FILESADDED] = filesadded
574 sidedata[sidedatamod.SD_FILESADDED] = filesadded
600 if filesremoved:
575 if filesremoved:
576 filesremoved = metadata.encodefileindices(
577 sortedfiles, filesremoved
578 )
601 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
579 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
602 if not sidedata:
580 if not sidedata:
603 sidedata = None
581 sidedata = None
604
582
605 if extra:
583 if extra:
606 extra = encodeextra(extra)
584 extra = encodeextra(extra)
607 parseddate = b"%s %s" % (parseddate, extra)
585 parseddate = b"%s %s" % (parseddate, extra)
608 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
586 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
609 text = b"\n".join(l)
587 text = b"\n".join(l)
610 return self.addrevision(
588 return self.addrevision(
611 text, transaction, len(self), p1, p2, sidedata=sidedata
589 text, transaction, len(self), p1, p2, sidedata=sidedata
612 )
590 )
613
591
614 def branchinfo(self, rev):
592 def branchinfo(self, rev):
615 """return the branch name and open/close state of a revision
593 """return the branch name and open/close state of a revision
616
594
617 This function exists because creating a changectx object
595 This function exists because creating a changectx object
618 just to access this is costly."""
596 just to access this is costly."""
619 extra = self.read(rev)[5]
597 extra = self.read(rev)[5]
620 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
598 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
621
599
622 def _nodeduplicatecallback(self, transaction, node):
600 def _nodeduplicatecallback(self, transaction, node):
623 # keep track of revisions that got "re-added", eg: unbunde of know rev.
601 # keep track of revisions that got "re-added", eg: unbunde of know rev.
624 #
602 #
625 # We track them in a list to preserve their order from the source bundle
603 # We track them in a list to preserve their order from the source bundle
626 duplicates = transaction.changes.setdefault(b'revduplicates', [])
604 duplicates = transaction.changes.setdefault(b'revduplicates', [])
627 duplicates.append(self.rev(node))
605 duplicates.append(self.rev(node))
@@ -1,409 +1,445
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 r = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 r = _prepare_files(tr, ctx, error=error, origctx=origctx)
68 mn, files, p1copies, p2copies, filesadded, filesremoved = r
68 mn, files, p1copies, p2copies, filesadded, filesremoved = r
69
69
70 extra = ctx.extra().copy()
70 extra = ctx.extra().copy()
71
71
72 files = sorted(files)
73 if extra is not None:
74 for name in (
75 b'p1copies',
76 b'p2copies',
77 b'filesadded',
78 b'filesremoved',
79 ):
80 extra.pop(name, None)
81 if repo.changelog._copiesstorage == b'extra':
82 extra = _extra_with_copies(
83 repo, extra, files, p1copies, p2copies, filesadded, filesremoved
84 )
85
72 # update changelog
86 # update changelog
73 repo.ui.note(_(b"committing changelog\n"))
87 repo.ui.note(_(b"committing changelog\n"))
74 repo.changelog.delayupdate(tr)
88 repo.changelog.delayupdate(tr)
75 n = repo.changelog.add(
89 n = repo.changelog.add(
76 mn,
90 mn,
77 files,
91 files,
78 ctx.description(),
92 ctx.description(),
79 tr,
93 tr,
80 p1.node(),
94 p1.node(),
81 p2.node(),
95 p2.node(),
82 user,
96 user,
83 ctx.date(),
97 ctx.date(),
84 extra,
98 extra,
85 p1copies,
99 p1copies,
86 p2copies,
100 p2copies,
87 filesadded,
101 filesadded,
88 filesremoved,
102 filesremoved,
89 )
103 )
90 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
104 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
91 repo.hook(
105 repo.hook(
92 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
106 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
93 )
107 )
94 # set the new commit is proper phase
108 # set the new commit is proper phase
95 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
109 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
96 if targetphase:
110 if targetphase:
97 # retract boundary do not alter parent changeset.
111 # retract boundary do not alter parent changeset.
98 # if a parent have higher the resulting phase will
112 # if a parent have higher the resulting phase will
99 # be compliant anyway
113 # be compliant anyway
100 #
114 #
101 # if minimal phase was 0 we don't need to retract anything
115 # if minimal phase was 0 we don't need to retract anything
102 phases.registernew(repo, tr, targetphase, [n])
116 phases.registernew(repo, tr, targetphase, [n])
103 return n
117 return n
104
118
105
119
106 def _prepare_files(tr, ctx, error=False, origctx=None):
120 def _prepare_files(tr, ctx, error=False, origctx=None):
107 repo = ctx.repo()
121 repo = ctx.repo()
108 p1 = ctx.p1()
122 p1 = ctx.p1()
109
123
110 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
124 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
111
125
112 p1copies, p2copies = None, None
126 p1copies, p2copies = None, None
113 if writechangesetcopy:
127 if writechangesetcopy:
114 p1copies = ctx.p1copies()
128 p1copies = ctx.p1copies()
115 p2copies = ctx.p2copies()
129 p2copies = ctx.p2copies()
116 filesadded, filesremoved = None, None
130 filesadded, filesremoved = None, None
117 if ctx.manifestnode():
131 if ctx.manifestnode():
118 # reuse an existing manifest revision
132 # reuse an existing manifest revision
119 repo.ui.debug(b'reusing known manifest\n')
133 repo.ui.debug(b'reusing known manifest\n')
120 mn = ctx.manifestnode()
134 mn = ctx.manifestnode()
121 files = ctx.files()
135 files = ctx.files()
122 if writechangesetcopy:
136 if writechangesetcopy:
123 filesadded = ctx.filesadded()
137 filesadded = ctx.filesadded()
124 filesremoved = ctx.filesremoved()
138 filesremoved = ctx.filesremoved()
125 elif not ctx.files():
139 elif not ctx.files():
126 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
140 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
127 mn = p1.manifestnode()
141 mn = p1.manifestnode()
128 files = []
142 files = []
129 else:
143 else:
130 mn, files, added, removed = _process_files(tr, ctx, error=error)
144 mn, files, added, removed = _process_files(tr, ctx, error=error)
131 if writechangesetcopy:
145 if writechangesetcopy:
132 filesremoved = removed
146 filesremoved = removed
133 filesadded = added
147 filesadded = added
134
148
135 if origctx and origctx.manifestnode() == mn:
149 if origctx and origctx.manifestnode() == mn:
136 files = origctx.files()
150 files = origctx.files()
137
151
138 if not writefilecopymeta:
152 if not writefilecopymeta:
139 # If writing only to changeset extras, use None to indicate that
153 # If writing only to changeset extras, use None to indicate that
140 # no entry should be written. If writing to both, write an empty
154 # no entry should be written. If writing to both, write an empty
141 # entry to prevent the reader from falling back to reading
155 # entry to prevent the reader from falling back to reading
142 # filelogs.
156 # filelogs.
143 p1copies = p1copies or None
157 p1copies = p1copies or None
144 p2copies = p2copies or None
158 p2copies = p2copies or None
145 filesadded = filesadded or None
159 filesadded = filesadded or None
146 filesremoved = filesremoved or None
160 filesremoved = filesremoved or None
147
161
148 return mn, files, p1copies, p2copies, filesadded, filesremoved
162 return mn, files, p1copies, p2copies, filesadded, filesremoved
149
163
150
164
151 def _process_files(tr, ctx, error=False):
165 def _process_files(tr, ctx, error=False):
152 repo = ctx.repo()
166 repo = ctx.repo()
153 p1 = ctx.p1()
167 p1 = ctx.p1()
154 p2 = ctx.p2()
168 p2 = ctx.p2()
155
169
156 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
170 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
157
171
158 m1ctx = p1.manifestctx()
172 m1ctx = p1.manifestctx()
159 m2ctx = p2.manifestctx()
173 m2ctx = p2.manifestctx()
160 mctx = m1ctx.copy()
174 mctx = m1ctx.copy()
161
175
162 m = mctx.read()
176 m = mctx.read()
163 m1 = m1ctx.read()
177 m1 = m1ctx.read()
164 m2 = m2ctx.read()
178 m2 = m2ctx.read()
165
179
166 # check in files
180 # check in files
167 added = []
181 added = []
168 filesadded = []
182 filesadded = []
169 removed = list(ctx.removed())
183 removed = list(ctx.removed())
170 touched = []
184 touched = []
171 linkrev = len(repo)
185 linkrev = len(repo)
172 repo.ui.note(_(b"committing files:\n"))
186 repo.ui.note(_(b"committing files:\n"))
173 uipathfn = scmutil.getuipathfn(repo)
187 uipathfn = scmutil.getuipathfn(repo)
174 for f in sorted(ctx.modified() + ctx.added()):
188 for f in sorted(ctx.modified() + ctx.added()):
175 repo.ui.note(uipathfn(f) + b"\n")
189 repo.ui.note(uipathfn(f) + b"\n")
176 try:
190 try:
177 fctx = ctx[f]
191 fctx = ctx[f]
178 if fctx is None:
192 if fctx is None:
179 removed.append(f)
193 removed.append(f)
180 else:
194 else:
181 added.append(f)
195 added.append(f)
182 m[f], is_touched = _filecommit(
196 m[f], is_touched = _filecommit(
183 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta,
197 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta,
184 )
198 )
185 if is_touched:
199 if is_touched:
186 touched.append(f)
200 touched.append(f)
187 if is_touched == 'added':
201 if is_touched == 'added':
188 filesadded.append(f)
202 filesadded.append(f)
189 m.setflag(f, fctx.flags())
203 m.setflag(f, fctx.flags())
190 except OSError:
204 except OSError:
191 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
205 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
192 raise
206 raise
193 except IOError as inst:
207 except IOError as inst:
194 errcode = getattr(inst, 'errno', errno.ENOENT)
208 errcode = getattr(inst, 'errno', errno.ENOENT)
195 if error or errcode and errcode != errno.ENOENT:
209 if error or errcode and errcode != errno.ENOENT:
196 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
210 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
197 raise
211 raise
198
212
199 # update manifest
213 # update manifest
200 removed = [f for f in removed if f in m1 or f in m2]
214 removed = [f for f in removed if f in m1 or f in m2]
201 drop = sorted([f for f in removed if f in m])
215 drop = sorted([f for f in removed if f in m])
202 for f in drop:
216 for f in drop:
203 del m[f]
217 del m[f]
204 if p2.rev() != nullrev:
218 if p2.rev() != nullrev:
205 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
219 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
206 removed = [f for f in removed if not rf(f)]
220 removed = [f for f in removed if not rf(f)]
207
221
208 touched.extend(removed)
222 touched.extend(removed)
209
223
210 files = touched
224 files = touched
211 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files, added, drop)
225 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files, added, drop)
212
226
213 return mn, files, filesadded, removed
227 return mn, files, filesadded, removed
214
228
215
229
216 def _filecommit(
230 def _filecommit(
217 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
231 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta,
218 ):
232 ):
219 """
233 """
220 commit an individual file as part of a larger transaction
234 commit an individual file as part of a larger transaction
221
235
222 input:
236 input:
223
237
224 fctx: a file context with the content we are trying to commit
238 fctx: a file context with the content we are trying to commit
225 manifest1: manifest of changeset first parent
239 manifest1: manifest of changeset first parent
226 manifest2: manifest of changeset second parent
240 manifest2: manifest of changeset second parent
227 linkrev: revision number of the changeset being created
241 linkrev: revision number of the changeset being created
228 tr: current transation
242 tr: current transation
229 individual: boolean, set to False to skip storing the copy data
243 individual: boolean, set to False to skip storing the copy data
230 (only used by the Google specific feature of using
244 (only used by the Google specific feature of using
231 changeset extra as copy source of truth).
245 changeset extra as copy source of truth).
232
246
233 output: (filenode, touched)
247 output: (filenode, touched)
234
248
235 filenode: the filenode that should be used by this changeset
249 filenode: the filenode that should be used by this changeset
236 touched: one of: None (mean untouched), 'added' or 'modified'
250 touched: one of: None (mean untouched), 'added' or 'modified'
237 """
251 """
238
252
239 fname = fctx.path()
253 fname = fctx.path()
240 fparent1 = manifest1.get(fname, nullid)
254 fparent1 = manifest1.get(fname, nullid)
241 fparent2 = manifest2.get(fname, nullid)
255 fparent2 = manifest2.get(fname, nullid)
242 touched = None
256 touched = None
243 if fparent1 == fparent2 == nullid:
257 if fparent1 == fparent2 == nullid:
244 touched = 'added'
258 touched = 'added'
245
259
246 if isinstance(fctx, context.filectx):
260 if isinstance(fctx, context.filectx):
247 # This block fast path most comparisons which are usually done. It
261 # This block fast path most comparisons which are usually done. It
248 # assumes that bare filectx is used and no merge happened, hence no
262 # assumes that bare filectx is used and no merge happened, hence no
249 # need to create a new file revision in this case.
263 # need to create a new file revision in this case.
250 node = fctx.filenode()
264 node = fctx.filenode()
251 if node in [fparent1, fparent2]:
265 if node in [fparent1, fparent2]:
252 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
266 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
253 if (
267 if (
254 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
268 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
255 ) or (
269 ) or (
256 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
270 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
257 ):
271 ):
258 touched = 'modified'
272 touched = 'modified'
259 return node, touched
273 return node, touched
260
274
261 flog = repo.file(fname)
275 flog = repo.file(fname)
262 meta = {}
276 meta = {}
263 cfname = fctx.copysource()
277 cfname = fctx.copysource()
264 fnode = None
278 fnode = None
265
279
266 if cfname and cfname != fname:
280 if cfname and cfname != fname:
267 # Mark the new revision of this file as a copy of another
281 # Mark the new revision of this file as a copy of another
268 # file. This copy data will effectively act as a parent
282 # file. This copy data will effectively act as a parent
269 # of this new revision. If this is a merge, the first
283 # of this new revision. If this is a merge, the first
270 # parent will be the nullid (meaning "look up the copy data")
284 # parent will be the nullid (meaning "look up the copy data")
271 # and the second one will be the other parent. For example:
285 # and the second one will be the other parent. For example:
272 #
286 #
273 # 0 --- 1 --- 3 rev1 changes file foo
287 # 0 --- 1 --- 3 rev1 changes file foo
274 # \ / rev2 renames foo to bar and changes it
288 # \ / rev2 renames foo to bar and changes it
275 # \- 2 -/ rev3 should have bar with all changes and
289 # \- 2 -/ rev3 should have bar with all changes and
276 # should record that bar descends from
290 # should record that bar descends from
277 # bar in rev2 and foo in rev1
291 # bar in rev2 and foo in rev1
278 #
292 #
279 # this allows this merge to succeed:
293 # this allows this merge to succeed:
280 #
294 #
281 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
295 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
282 # \ / merging rev3 and rev4 should use bar@rev2
296 # \ / merging rev3 and rev4 should use bar@rev2
283 # \- 2 --- 4 as the merge base
297 # \- 2 --- 4 as the merge base
284 #
298 #
285
299
286 cnode = manifest1.get(cfname)
300 cnode = manifest1.get(cfname)
287 newfparent = fparent2
301 newfparent = fparent2
288
302
289 if manifest2: # branch merge
303 if manifest2: # branch merge
290 if fparent2 == nullid or cnode is None: # copied on remote side
304 if fparent2 == nullid or cnode is None: # copied on remote side
291 if cfname in manifest2:
305 if cfname in manifest2:
292 cnode = manifest2[cfname]
306 cnode = manifest2[cfname]
293 newfparent = fparent1
307 newfparent = fparent1
294
308
295 # Here, we used to search backwards through history to try to find
309 # Here, we used to search backwards through history to try to find
296 # where the file copy came from if the source of a copy was not in
310 # where the file copy came from if the source of a copy was not in
297 # the parent directory. However, this doesn't actually make sense to
311 # the parent directory. However, this doesn't actually make sense to
298 # do (what does a copy from something not in your working copy even
312 # do (what does a copy from something not in your working copy even
299 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
313 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
300 # the user that copy information was dropped, so if they didn't
314 # the user that copy information was dropped, so if they didn't
301 # expect this outcome it can be fixed, but this is the correct
315 # expect this outcome it can be fixed, but this is the correct
302 # behavior in this circumstance.
316 # behavior in this circumstance.
303
317
304 if cnode:
318 if cnode:
305 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
319 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
306 if includecopymeta:
320 if includecopymeta:
307 meta[b"copy"] = cfname
321 meta[b"copy"] = cfname
308 meta[b"copyrev"] = hex(cnode)
322 meta[b"copyrev"] = hex(cnode)
309 fparent1, fparent2 = nullid, newfparent
323 fparent1, fparent2 = nullid, newfparent
310 else:
324 else:
311 repo.ui.warn(
325 repo.ui.warn(
312 _(
326 _(
313 b"warning: can't find ancestor for '%s' "
327 b"warning: can't find ancestor for '%s' "
314 b"copied from '%s'!\n"
328 b"copied from '%s'!\n"
315 )
329 )
316 % (fname, cfname)
330 % (fname, cfname)
317 )
331 )
318
332
319 elif fparent1 == nullid:
333 elif fparent1 == nullid:
320 fparent1, fparent2 = fparent2, nullid
334 fparent1, fparent2 = fparent2, nullid
321 elif fparent2 != nullid:
335 elif fparent2 != nullid:
322 # is one parent an ancestor of the other?
336 # is one parent an ancestor of the other?
323 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
337 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
324 if fparent1 in fparentancestors:
338 if fparent1 in fparentancestors:
325 fparent1, fparent2 = fparent2, nullid
339 fparent1, fparent2 = fparent2, nullid
326 elif fparent2 in fparentancestors:
340 elif fparent2 in fparentancestors:
327 fparent2 = nullid
341 fparent2 = nullid
328 elif not fparentancestors:
342 elif not fparentancestors:
329 # TODO: this whole if-else might be simplified much more
343 # TODO: this whole if-else might be simplified much more
330 ms = mergestate.mergestate.read(repo)
344 ms = mergestate.mergestate.read(repo)
331 if (
345 if (
332 fname in ms
346 fname in ms
333 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
347 and ms[fname] == mergestate.MERGE_RECORD_MERGED_OTHER
334 ):
348 ):
335 fparent1, fparent2 = fparent2, nullid
349 fparent1, fparent2 = fparent2, nullid
336
350
337 # is the file changed?
351 # is the file changed?
338 text = fctx.data()
352 text = fctx.data()
339 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
353 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
340 if touched is None: # do not overwrite added
354 if touched is None: # do not overwrite added
341 touched = 'modified'
355 touched = 'modified'
342 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
356 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
343 # are just the flags changed during merge?
357 # are just the flags changed during merge?
344 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
345 touched = 'modified'
359 touched = 'modified'
346 fnode = fparent1
360 fnode = fparent1
347 else:
361 else:
348 fnode = fparent1
362 fnode = fparent1
349 return fnode, touched
363 return fnode, touched
350
364
351
365
352 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
366 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
353 """make a new manifest entry (or reuse a new one)
367 """make a new manifest entry (or reuse a new one)
354
368
355 given an initialised manifest context and precomputed list of
369 given an initialised manifest context and precomputed list of
356 - files: files affected by the commit
370 - files: files affected by the commit
357 - added: new entries in the manifest
371 - added: new entries in the manifest
358 - drop: entries present in parents but absent of this one
372 - drop: entries present in parents but absent of this one
359
373
360 Create a new manifest revision, reuse existing ones if possible.
374 Create a new manifest revision, reuse existing ones if possible.
361
375
362 Return the nodeid of the manifest revision.
376 Return the nodeid of the manifest revision.
363 """
377 """
364 repo = ctx.repo()
378 repo = ctx.repo()
365
379
366 md = None
380 md = None
367
381
368 # all this is cached, so it is find to get them all from the ctx.
382 # all this is cached, so it is find to get them all from the ctx.
369 p1 = ctx.p1()
383 p1 = ctx.p1()
370 p2 = ctx.p2()
384 p2 = ctx.p2()
371 m1ctx = p1.manifestctx()
385 m1ctx = p1.manifestctx()
372
386
373 m1 = m1ctx.read()
387 m1 = m1ctx.read()
374
388
375 if not files:
389 if not files:
376 # if no "files" actually changed in terms of the changelog,
390 # if no "files" actually changed in terms of the changelog,
377 # try hard to detect unmodified manifest entry so that the
391 # try hard to detect unmodified manifest entry so that the
378 # exact same commit can be reproduced later on convert.
392 # exact same commit can be reproduced later on convert.
379 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
393 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
380 if not files and md:
394 if not files and md:
381 repo.ui.debug(
395 repo.ui.debug(
382 b'not reusing manifest (no file change in '
396 b'not reusing manifest (no file change in '
383 b'changelog, but manifest differs)\n'
397 b'changelog, but manifest differs)\n'
384 )
398 )
385 if files or md:
399 if files or md:
386 repo.ui.note(_(b"committing manifest\n"))
400 repo.ui.note(_(b"committing manifest\n"))
387 # we're using narrowmatch here since it's already applied at
401 # we're using narrowmatch here since it's already applied at
388 # other stages (such as dirstate.walk), so we're already
402 # other stages (such as dirstate.walk), so we're already
389 # ignoring things outside of narrowspec in most cases. The
403 # ignoring things outside of narrowspec in most cases. The
390 # one case where we might have files outside the narrowspec
404 # one case where we might have files outside the narrowspec
391 # at this point is merges, and we already error out in the
405 # at this point is merges, and we already error out in the
392 # case where the merge has files outside of the narrowspec,
406 # case where the merge has files outside of the narrowspec,
393 # so this is safe.
407 # so this is safe.
394 mn = mctx.write(
408 mn = mctx.write(
395 tr,
409 tr,
396 linkrev,
410 linkrev,
397 p1.manifestnode(),
411 p1.manifestnode(),
398 p2.manifestnode(),
412 p2.manifestnode(),
399 added,
413 added,
400 drop,
414 drop,
401 match=repo.narrowmatch(),
415 match=repo.narrowmatch(),
402 )
416 )
403 else:
417 else:
404 repo.ui.debug(
418 repo.ui.debug(
405 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
419 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
406 )
420 )
407 mn = p1.manifestnode()
421 mn = p1.manifestnode()
408
422
409 return mn
423 return mn
424
425
426 def _extra_with_copies(
427 repo, extra, files, p1copies, p2copies, filesadded, filesremoved
428 ):
429 """encode copy information into a `extra` dictionnary"""
430 extrasentries = p1copies, p2copies, filesadded, filesremoved
431 if extra is None and any(x is not None for x in extrasentries):
432 extra = {}
433 if p1copies is not None:
434 p1copies = metadata.encodecopies(files, p1copies)
435 extra[b'p1copies'] = p1copies
436 if p2copies is not None:
437 p2copies = metadata.encodecopies(files, p2copies)
438 extra[b'p2copies'] = p2copies
439 if filesadded is not None:
440 filesadded = metadata.encodefileindices(files, filesadded)
441 extra[b'filesadded'] = filesadded
442 if filesremoved is not None:
443 filesremoved = metadata.encodefileindices(files, filesremoved)
444 extra[b'filesremoved'] = filesremoved
445 return extra
General Comments 0
You need to be logged in to leave comments. Login now