##// END OF EJS Templates
sidedatacopies: read rename information from sidedata...
marmoute -
r43416:0171483b default
parent child Browse files
Show More
@@ -1,754 +1,766 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import attr
16 from .thirdparty import attr
17
17
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 revlog,
22 revlog,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 dateutil,
26 dateutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 from .revlogutils import sidedata as sidedatamod
30 from .revlogutils import sidedata as sidedatamod
31
31
32 _defaultextra = {b'branch': b'default'}
32 _defaultextra = {b'branch': b'default'}
33
33
34
34
35 def _string_escape(text):
35 def _string_escape(text):
36 """
36 """
37 >>> from .pycompat import bytechr as chr
37 >>> from .pycompat import bytechr as chr
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 >>> s
40 >>> s
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 >>> res = _string_escape(s)
42 >>> res = _string_escape(s)
43 >>> s == _string_unescape(res)
43 >>> s == _string_unescape(res)
44 True
44 True
45 """
45 """
46 # subset of the string_escape codec
46 # subset of the string_escape codec
47 text = (
47 text = (
48 text.replace(b'\\', b'\\\\')
48 text.replace(b'\\', b'\\\\')
49 .replace(b'\n', b'\\n')
49 .replace(b'\n', b'\\n')
50 .replace(b'\r', b'\\r')
50 .replace(b'\r', b'\\r')
51 )
51 )
52 return text.replace(b'\0', b'\\0')
52 return text.replace(b'\0', b'\\0')
53
53
54
54
55 def _string_unescape(text):
55 def _string_unescape(text):
56 if b'\\0' in text:
56 if b'\\0' in text:
57 # fix up \0 without getting into trouble with \\0
57 # fix up \0 without getting into trouble with \\0
58 text = text.replace(b'\\\\', b'\\\\\n')
58 text = text.replace(b'\\\\', b'\\\\\n')
59 text = text.replace(b'\\0', b'\0')
59 text = text.replace(b'\\0', b'\0')
60 text = text.replace(b'\n', b'')
60 text = text.replace(b'\n', b'')
61 return stringutil.unescapestr(text)
61 return stringutil.unescapestr(text)
62
62
63
63
64 def decodeextra(text):
64 def decodeextra(text):
65 """
65 """
66 >>> from .pycompat import bytechr as chr
66 >>> from .pycompat import bytechr as chr
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 ... ).items())
68 ... ).items())
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 ... b'baz': chr(92) + chr(0) + b'2'})
71 ... b'baz': chr(92) + chr(0) + b'2'})
72 ... ).items())
72 ... ).items())
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 """
74 """
75 extra = _defaultextra.copy()
75 extra = _defaultextra.copy()
76 for l in text.split(b'\0'):
76 for l in text.split(b'\0'):
77 if l:
77 if l:
78 k, v = _string_unescape(l).split(b':', 1)
78 k, v = _string_unescape(l).split(b':', 1)
79 extra[k] = v
79 extra[k] = v
80 return extra
80 return extra
81
81
82
82
83 def encodeextra(d):
83 def encodeextra(d):
84 # keys must be sorted to produce a deterministic changelog entry
84 # keys must be sorted to produce a deterministic changelog entry
85 items = [
85 items = [
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
86 _string_escape(b'%s:%s' % (k, pycompat.bytestr(d[k])))
87 for k in sorted(d)
87 for k in sorted(d)
88 ]
88 ]
89 return b"\0".join(items)
89 return b"\0".join(items)
90
90
91
91
92 def encodecopies(files, copies):
92 def encodecopies(files, copies):
93 items = []
93 items = []
94 for i, dst in enumerate(files):
94 for i, dst in enumerate(files):
95 if dst in copies:
95 if dst in copies:
96 items.append(b'%d\0%s' % (i, copies[dst]))
96 items.append(b'%d\0%s' % (i, copies[dst]))
97 if len(items) != len(copies):
97 if len(items) != len(copies):
98 raise error.ProgrammingError(
98 raise error.ProgrammingError(
99 b'some copy targets missing from file list'
99 b'some copy targets missing from file list'
100 )
100 )
101 return b"\n".join(items)
101 return b"\n".join(items)
102
102
103
103
104 def decodecopies(files, data):
104 def decodecopies(files, data):
105 try:
105 try:
106 copies = {}
106 copies = {}
107 if not data:
107 if not data:
108 return copies
108 return copies
109 for l in data.split(b'\n'):
109 for l in data.split(b'\n'):
110 strindex, src = l.split(b'\0')
110 strindex, src = l.split(b'\0')
111 i = int(strindex)
111 i = int(strindex)
112 dst = files[i]
112 dst = files[i]
113 copies[dst] = src
113 copies[dst] = src
114 return copies
114 return copies
115 except (ValueError, IndexError):
115 except (ValueError, IndexError):
116 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
116 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
117 # used different syntax for the value.
117 # used different syntax for the value.
118 return None
118 return None
119
119
120
120
121 def encodefileindices(files, subset):
121 def encodefileindices(files, subset):
122 subset = set(subset)
122 subset = set(subset)
123 indices = []
123 indices = []
124 for i, f in enumerate(files):
124 for i, f in enumerate(files):
125 if f in subset:
125 if f in subset:
126 indices.append(b'%d' % i)
126 indices.append(b'%d' % i)
127 return b'\n'.join(indices)
127 return b'\n'.join(indices)
128
128
129
129
130 def decodefileindices(files, data):
130 def decodefileindices(files, data):
131 try:
131 try:
132 subset = []
132 subset = []
133 if not data:
133 if not data:
134 return subset
134 return subset
135 for strindex in data.split(b'\n'):
135 for strindex in data.split(b'\n'):
136 i = int(strindex)
136 i = int(strindex)
137 if i < 0 or i >= len(files):
137 if i < 0 or i >= len(files):
138 return None
138 return None
139 subset.append(files[i])
139 subset.append(files[i])
140 return subset
140 return subset
141 except (ValueError, IndexError):
141 except (ValueError, IndexError):
142 # Perhaps someone had chosen the same key name (e.g. "added") and
142 # Perhaps someone had chosen the same key name (e.g. "added") and
143 # used different syntax for the value.
143 # used different syntax for the value.
144 return None
144 return None
145
145
146
146
147 def stripdesc(desc):
147 def stripdesc(desc):
148 """strip trailing whitespace and leading and trailing empty lines"""
148 """strip trailing whitespace and leading and trailing empty lines"""
149 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
149 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
150
150
151
151
152 class appender(object):
152 class appender(object):
153 '''the changelog index must be updated last on disk, so we use this class
153 '''the changelog index must be updated last on disk, so we use this class
154 to delay writes to it'''
154 to delay writes to it'''
155
155
156 def __init__(self, vfs, name, mode, buf):
156 def __init__(self, vfs, name, mode, buf):
157 self.data = buf
157 self.data = buf
158 fp = vfs(name, mode)
158 fp = vfs(name, mode)
159 self.fp = fp
159 self.fp = fp
160 self.offset = fp.tell()
160 self.offset = fp.tell()
161 self.size = vfs.fstat(fp).st_size
161 self.size = vfs.fstat(fp).st_size
162 self._end = self.size
162 self._end = self.size
163
163
164 def end(self):
164 def end(self):
165 return self._end
165 return self._end
166
166
167 def tell(self):
167 def tell(self):
168 return self.offset
168 return self.offset
169
169
170 def flush(self):
170 def flush(self):
171 pass
171 pass
172
172
173 @property
173 @property
174 def closed(self):
174 def closed(self):
175 return self.fp.closed
175 return self.fp.closed
176
176
177 def close(self):
177 def close(self):
178 self.fp.close()
178 self.fp.close()
179
179
180 def seek(self, offset, whence=0):
180 def seek(self, offset, whence=0):
181 '''virtual file offset spans real file and data'''
181 '''virtual file offset spans real file and data'''
182 if whence == 0:
182 if whence == 0:
183 self.offset = offset
183 self.offset = offset
184 elif whence == 1:
184 elif whence == 1:
185 self.offset += offset
185 self.offset += offset
186 elif whence == 2:
186 elif whence == 2:
187 self.offset = self.end() + offset
187 self.offset = self.end() + offset
188 if self.offset < self.size:
188 if self.offset < self.size:
189 self.fp.seek(self.offset)
189 self.fp.seek(self.offset)
190
190
191 def read(self, count=-1):
191 def read(self, count=-1):
192 '''only trick here is reads that span real file and data'''
192 '''only trick here is reads that span real file and data'''
193 ret = b""
193 ret = b""
194 if self.offset < self.size:
194 if self.offset < self.size:
195 s = self.fp.read(count)
195 s = self.fp.read(count)
196 ret = s
196 ret = s
197 self.offset += len(s)
197 self.offset += len(s)
198 if count > 0:
198 if count > 0:
199 count -= len(s)
199 count -= len(s)
200 if count != 0:
200 if count != 0:
201 doff = self.offset - self.size
201 doff = self.offset - self.size
202 self.data.insert(0, b"".join(self.data))
202 self.data.insert(0, b"".join(self.data))
203 del self.data[1:]
203 del self.data[1:]
204 s = self.data[0][doff : doff + count]
204 s = self.data[0][doff : doff + count]
205 self.offset += len(s)
205 self.offset += len(s)
206 ret += s
206 ret += s
207 return ret
207 return ret
208
208
209 def write(self, s):
209 def write(self, s):
210 self.data.append(bytes(s))
210 self.data.append(bytes(s))
211 self.offset += len(s)
211 self.offset += len(s)
212 self._end += len(s)
212 self._end += len(s)
213
213
214 def __enter__(self):
214 def __enter__(self):
215 self.fp.__enter__()
215 self.fp.__enter__()
216 return self
216 return self
217
217
218 def __exit__(self, *args):
218 def __exit__(self, *args):
219 return self.fp.__exit__(*args)
219 return self.fp.__exit__(*args)
220
220
221
221
222 def _divertopener(opener, target):
222 def _divertopener(opener, target):
223 """build an opener that writes in 'target.a' instead of 'target'"""
223 """build an opener that writes in 'target.a' instead of 'target'"""
224
224
225 def _divert(name, mode=b'r', checkambig=False):
225 def _divert(name, mode=b'r', checkambig=False):
226 if name != target:
226 if name != target:
227 return opener(name, mode)
227 return opener(name, mode)
228 return opener(name + b".a", mode)
228 return opener(name + b".a", mode)
229
229
230 return _divert
230 return _divert
231
231
232
232
233 def _delayopener(opener, target, buf):
233 def _delayopener(opener, target, buf):
234 """build an opener that stores chunks in 'buf' instead of 'target'"""
234 """build an opener that stores chunks in 'buf' instead of 'target'"""
235
235
236 def _delay(name, mode=b'r', checkambig=False):
236 def _delay(name, mode=b'r', checkambig=False):
237 if name != target:
237 if name != target:
238 return opener(name, mode)
238 return opener(name, mode)
239 return appender(opener, name, mode, buf)
239 return appender(opener, name, mode, buf)
240
240
241 return _delay
241 return _delay
242
242
243
243
244 @attr.s
244 @attr.s
245 class _changelogrevision(object):
245 class _changelogrevision(object):
246 # Extensions might modify _defaultextra, so let the constructor below pass
246 # Extensions might modify _defaultextra, so let the constructor below pass
247 # it in
247 # it in
248 extra = attr.ib()
248 extra = attr.ib()
249 manifest = attr.ib(default=nullid)
249 manifest = attr.ib(default=nullid)
250 user = attr.ib(default=b'')
250 user = attr.ib(default=b'')
251 date = attr.ib(default=(0, 0))
251 date = attr.ib(default=(0, 0))
252 files = attr.ib(default=attr.Factory(list))
252 files = attr.ib(default=attr.Factory(list))
253 filesadded = attr.ib(default=None)
253 filesadded = attr.ib(default=None)
254 filesremoved = attr.ib(default=None)
254 filesremoved = attr.ib(default=None)
255 p1copies = attr.ib(default=None)
255 p1copies = attr.ib(default=None)
256 p2copies = attr.ib(default=None)
256 p2copies = attr.ib(default=None)
257 description = attr.ib(default=b'')
257 description = attr.ib(default=b'')
258
258
259
259
260 class changelogrevision(object):
260 class changelogrevision(object):
261 """Holds results of a parsed changelog revision.
261 """Holds results of a parsed changelog revision.
262
262
263 Changelog revisions consist of multiple pieces of data, including
263 Changelog revisions consist of multiple pieces of data, including
264 the manifest node, user, and date. This object exposes a view into
264 the manifest node, user, and date. This object exposes a view into
265 the parsed object.
265 the parsed object.
266 """
266 """
267
267
268 __slots__ = (
268 __slots__ = (
269 r'_offsets',
269 r'_offsets',
270 r'_text',
270 r'_text',
271 r'_sidedata',
271 r'_sidedata',
272 )
272 )
273
273
274 def __new__(cls, text, sidedata):
274 def __new__(cls, text, sidedata):
275 if not text:
275 if not text:
276 return _changelogrevision(extra=_defaultextra)
276 return _changelogrevision(extra=_defaultextra)
277
277
278 self = super(changelogrevision, cls).__new__(cls)
278 self = super(changelogrevision, cls).__new__(cls)
279 # We could return here and implement the following as an __init__.
279 # We could return here and implement the following as an __init__.
280 # But doing it here is equivalent and saves an extra function call.
280 # But doing it here is equivalent and saves an extra function call.
281
281
282 # format used:
282 # format used:
283 # nodeid\n : manifest node in ascii
283 # nodeid\n : manifest node in ascii
284 # user\n : user, no \n or \r allowed
284 # user\n : user, no \n or \r allowed
285 # time tz extra\n : date (time is int or float, timezone is int)
285 # time tz extra\n : date (time is int or float, timezone is int)
286 # : extra is metadata, encoded and separated by '\0'
286 # : extra is metadata, encoded and separated by '\0'
287 # : older versions ignore it
287 # : older versions ignore it
288 # files\n\n : files modified by the cset, no \n or \r allowed
288 # files\n\n : files modified by the cset, no \n or \r allowed
289 # (.*) : comment (free text, ideally utf-8)
289 # (.*) : comment (free text, ideally utf-8)
290 #
290 #
291 # changelog v0 doesn't use extra
291 # changelog v0 doesn't use extra
292
292
293 nl1 = text.index(b'\n')
293 nl1 = text.index(b'\n')
294 nl2 = text.index(b'\n', nl1 + 1)
294 nl2 = text.index(b'\n', nl1 + 1)
295 nl3 = text.index(b'\n', nl2 + 1)
295 nl3 = text.index(b'\n', nl2 + 1)
296
296
297 # The list of files may be empty. Which means nl3 is the first of the
297 # The list of files may be empty. Which means nl3 is the first of the
298 # double newline that precedes the description.
298 # double newline that precedes the description.
299 if text[nl3 + 1 : nl3 + 2] == b'\n':
299 if text[nl3 + 1 : nl3 + 2] == b'\n':
300 doublenl = nl3
300 doublenl = nl3
301 else:
301 else:
302 doublenl = text.index(b'\n\n', nl3 + 1)
302 doublenl = text.index(b'\n\n', nl3 + 1)
303
303
304 self._offsets = (nl1, nl2, nl3, doublenl)
304 self._offsets = (nl1, nl2, nl3, doublenl)
305 self._text = text
305 self._text = text
306 self._sidedata = sidedata
306 self._sidedata = sidedata
307
307
308 return self
308 return self
309
309
310 @property
310 @property
311 def manifest(self):
311 def manifest(self):
312 return bin(self._text[0 : self._offsets[0]])
312 return bin(self._text[0 : self._offsets[0]])
313
313
314 @property
314 @property
315 def user(self):
315 def user(self):
316 off = self._offsets
316 off = self._offsets
317 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
317 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
318
318
319 @property
319 @property
320 def _rawdate(self):
320 def _rawdate(self):
321 off = self._offsets
321 off = self._offsets
322 dateextra = self._text[off[1] + 1 : off[2]]
322 dateextra = self._text[off[1] + 1 : off[2]]
323 return dateextra.split(b' ', 2)[0:2]
323 return dateextra.split(b' ', 2)[0:2]
324
324
325 @property
325 @property
326 def _rawextra(self):
326 def _rawextra(self):
327 off = self._offsets
327 off = self._offsets
328 dateextra = self._text[off[1] + 1 : off[2]]
328 dateextra = self._text[off[1] + 1 : off[2]]
329 fields = dateextra.split(b' ', 2)
329 fields = dateextra.split(b' ', 2)
330 if len(fields) != 3:
330 if len(fields) != 3:
331 return None
331 return None
332
332
333 return fields[2]
333 return fields[2]
334
334
335 @property
335 @property
336 def date(self):
336 def date(self):
337 raw = self._rawdate
337 raw = self._rawdate
338 time = float(raw[0])
338 time = float(raw[0])
339 # Various tools did silly things with the timezone.
339 # Various tools did silly things with the timezone.
340 try:
340 try:
341 timezone = int(raw[1])
341 timezone = int(raw[1])
342 except ValueError:
342 except ValueError:
343 timezone = 0
343 timezone = 0
344
344
345 return time, timezone
345 return time, timezone
346
346
347 @property
347 @property
348 def extra(self):
348 def extra(self):
349 raw = self._rawextra
349 raw = self._rawextra
350 if raw is None:
350 if raw is None:
351 return _defaultextra
351 return _defaultextra
352
352
353 return decodeextra(raw)
353 return decodeextra(raw)
354
354
355 @property
355 @property
356 def files(self):
356 def files(self):
357 off = self._offsets
357 off = self._offsets
358 if off[2] == off[3]:
358 if off[2] == off[3]:
359 return []
359 return []
360
360
361 return self._text[off[2] + 1 : off[3]].split(b'\n')
361 return self._text[off[2] + 1 : off[3]].split(b'\n')
362
362
363 @property
363 @property
364 def filesadded(self):
364 def filesadded(self):
365 rawindices = self.extra.get(b'filesadded')
365 if sidedatamod.SD_FILESADDED in self._sidedata:
366 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
367 else:
368 rawindices = self.extra.get(b'filesadded')
366 if rawindices is None:
369 if rawindices is None:
367 return None
370 return None
368 return decodefileindices(self.files, rawindices)
371 return decodefileindices(self.files, rawindices)
369
372
370 @property
373 @property
371 def filesremoved(self):
374 def filesremoved(self):
372 rawindices = self.extra.get(b'filesremoved')
375 if sidedatamod.SD_FILESREMOVED in self._sidedata:
376 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
377 else:
378 rawindices = self.extra.get(b'filesremoved')
373 if rawindices is None:
379 if rawindices is None:
374 return None
380 return None
375 return decodefileindices(self.files, rawindices)
381 return decodefileindices(self.files, rawindices)
376
382
377 @property
383 @property
378 def p1copies(self):
384 def p1copies(self):
379 rawcopies = self.extra.get(b'p1copies')
385 if sidedatamod.SD_P1COPIES in self._sidedata:
386 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
387 else:
388 rawcopies = self.extra.get(b'p1copies')
380 if rawcopies is None:
389 if rawcopies is None:
381 return None
390 return None
382 return decodecopies(self.files, rawcopies)
391 return decodecopies(self.files, rawcopies)
383
392
384 @property
393 @property
385 def p2copies(self):
394 def p2copies(self):
386 rawcopies = self.extra.get(b'p2copies')
395 if sidedatamod.SD_P2COPIES in self._sidedata:
396 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
397 else:
398 rawcopies = self.extra.get(b'p2copies')
387 if rawcopies is None:
399 if rawcopies is None:
388 return None
400 return None
389 return decodecopies(self.files, rawcopies)
401 return decodecopies(self.files, rawcopies)
390
402
391 @property
403 @property
392 def description(self):
404 def description(self):
393 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
405 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
394
406
395
407
396 class changelog(revlog.revlog):
408 class changelog(revlog.revlog):
397 def __init__(self, opener, trypending=False):
409 def __init__(self, opener, trypending=False):
398 """Load a changelog revlog using an opener.
410 """Load a changelog revlog using an opener.
399
411
400 If ``trypending`` is true, we attempt to load the index from a
412 If ``trypending`` is true, we attempt to load the index from a
401 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
413 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
402 The ``00changelog.i.a`` file contains index (and possibly inline
414 The ``00changelog.i.a`` file contains index (and possibly inline
403 revision) data for a transaction that hasn't been finalized yet.
415 revision) data for a transaction that hasn't been finalized yet.
404 It exists in a separate file to facilitate readers (such as
416 It exists in a separate file to facilitate readers (such as
405 hooks processes) accessing data before a transaction is finalized.
417 hooks processes) accessing data before a transaction is finalized.
406 """
418 """
407 if trypending and opener.exists(b'00changelog.i.a'):
419 if trypending and opener.exists(b'00changelog.i.a'):
408 indexfile = b'00changelog.i.a'
420 indexfile = b'00changelog.i.a'
409 else:
421 else:
410 indexfile = b'00changelog.i'
422 indexfile = b'00changelog.i'
411
423
412 datafile = b'00changelog.d'
424 datafile = b'00changelog.d'
413 revlog.revlog.__init__(
425 revlog.revlog.__init__(
414 self,
426 self,
415 opener,
427 opener,
416 indexfile,
428 indexfile,
417 datafile=datafile,
429 datafile=datafile,
418 checkambig=True,
430 checkambig=True,
419 mmaplargeindex=True,
431 mmaplargeindex=True,
420 )
432 )
421
433
422 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
434 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
423 # changelogs don't benefit from generaldelta.
435 # changelogs don't benefit from generaldelta.
424
436
425 self.version &= ~revlog.FLAG_GENERALDELTA
437 self.version &= ~revlog.FLAG_GENERALDELTA
426 self._generaldelta = False
438 self._generaldelta = False
427
439
428 # Delta chains for changelogs tend to be very small because entries
440 # Delta chains for changelogs tend to be very small because entries
429 # tend to be small and don't delta well with each. So disable delta
441 # tend to be small and don't delta well with each. So disable delta
430 # chains.
442 # chains.
431 self._storedeltachains = False
443 self._storedeltachains = False
432
444
433 self._realopener = opener
445 self._realopener = opener
434 self._delayed = False
446 self._delayed = False
435 self._delaybuf = None
447 self._delaybuf = None
436 self._divert = False
448 self._divert = False
437 self.filteredrevs = frozenset()
449 self.filteredrevs = frozenset()
438 self._copiesstorage = opener.options.get(b'copies-storage')
450 self._copiesstorage = opener.options.get(b'copies-storage')
439
451
440 def tiprev(self):
452 def tiprev(self):
441 for i in pycompat.xrange(len(self) - 1, -2, -1):
453 for i in pycompat.xrange(len(self) - 1, -2, -1):
442 if i not in self.filteredrevs:
454 if i not in self.filteredrevs:
443 return i
455 return i
444
456
445 def tip(self):
457 def tip(self):
446 """filtered version of revlog.tip"""
458 """filtered version of revlog.tip"""
447 return self.node(self.tiprev())
459 return self.node(self.tiprev())
448
460
449 def __contains__(self, rev):
461 def __contains__(self, rev):
450 """filtered version of revlog.__contains__"""
462 """filtered version of revlog.__contains__"""
451 return 0 <= rev < len(self) and rev not in self.filteredrevs
463 return 0 <= rev < len(self) and rev not in self.filteredrevs
452
464
453 def __iter__(self):
465 def __iter__(self):
454 """filtered version of revlog.__iter__"""
466 """filtered version of revlog.__iter__"""
455 if len(self.filteredrevs) == 0:
467 if len(self.filteredrevs) == 0:
456 return revlog.revlog.__iter__(self)
468 return revlog.revlog.__iter__(self)
457
469
458 def filterediter():
470 def filterediter():
459 for i in pycompat.xrange(len(self)):
471 for i in pycompat.xrange(len(self)):
460 if i not in self.filteredrevs:
472 if i not in self.filteredrevs:
461 yield i
473 yield i
462
474
463 return filterediter()
475 return filterediter()
464
476
465 def revs(self, start=0, stop=None):
477 def revs(self, start=0, stop=None):
466 """filtered version of revlog.revs"""
478 """filtered version of revlog.revs"""
467 for i in super(changelog, self).revs(start, stop):
479 for i in super(changelog, self).revs(start, stop):
468 if i not in self.filteredrevs:
480 if i not in self.filteredrevs:
469 yield i
481 yield i
470
482
471 def _checknofilteredinrevs(self, revs):
483 def _checknofilteredinrevs(self, revs):
472 """raise the appropriate error if 'revs' contains a filtered revision
484 """raise the appropriate error if 'revs' contains a filtered revision
473
485
474 This returns a version of 'revs' to be used thereafter by the caller.
486 This returns a version of 'revs' to be used thereafter by the caller.
475 In particular, if revs is an iterator, it is converted into a set.
487 In particular, if revs is an iterator, it is converted into a set.
476 """
488 """
477 safehasattr = util.safehasattr
489 safehasattr = util.safehasattr
478 if safehasattr(revs, '__next__'):
490 if safehasattr(revs, '__next__'):
479 # Note that inspect.isgenerator() is not true for iterators,
491 # Note that inspect.isgenerator() is not true for iterators,
480 revs = set(revs)
492 revs = set(revs)
481
493
482 filteredrevs = self.filteredrevs
494 filteredrevs = self.filteredrevs
483 if safehasattr(revs, 'first'): # smartset
495 if safehasattr(revs, 'first'): # smartset
484 offenders = revs & filteredrevs
496 offenders = revs & filteredrevs
485 else:
497 else:
486 offenders = filteredrevs.intersection(revs)
498 offenders = filteredrevs.intersection(revs)
487
499
488 for rev in offenders:
500 for rev in offenders:
489 raise error.FilteredIndexError(rev)
501 raise error.FilteredIndexError(rev)
490 return revs
502 return revs
491
503
492 def headrevs(self, revs=None):
504 def headrevs(self, revs=None):
493 if revs is None and self.filteredrevs:
505 if revs is None and self.filteredrevs:
494 try:
506 try:
495 return self.index.headrevsfiltered(self.filteredrevs)
507 return self.index.headrevsfiltered(self.filteredrevs)
496 # AttributeError covers non-c-extension environments and
508 # AttributeError covers non-c-extension environments and
497 # old c extensions without filter handling.
509 # old c extensions without filter handling.
498 except AttributeError:
510 except AttributeError:
499 return self._headrevs()
511 return self._headrevs()
500
512
501 if self.filteredrevs:
513 if self.filteredrevs:
502 revs = self._checknofilteredinrevs(revs)
514 revs = self._checknofilteredinrevs(revs)
503 return super(changelog, self).headrevs(revs)
515 return super(changelog, self).headrevs(revs)
504
516
505 def strip(self, *args, **kwargs):
517 def strip(self, *args, **kwargs):
506 # XXX make something better than assert
518 # XXX make something better than assert
507 # We can't expect proper strip behavior if we are filtered.
519 # We can't expect proper strip behavior if we are filtered.
508 assert not self.filteredrevs
520 assert not self.filteredrevs
509 super(changelog, self).strip(*args, **kwargs)
521 super(changelog, self).strip(*args, **kwargs)
510
522
511 def rev(self, node):
523 def rev(self, node):
512 """filtered version of revlog.rev"""
524 """filtered version of revlog.rev"""
513 r = super(changelog, self).rev(node)
525 r = super(changelog, self).rev(node)
514 if r in self.filteredrevs:
526 if r in self.filteredrevs:
515 raise error.FilteredLookupError(
527 raise error.FilteredLookupError(
516 hex(node), self.indexfile, _(b'filtered node')
528 hex(node), self.indexfile, _(b'filtered node')
517 )
529 )
518 return r
530 return r
519
531
520 def node(self, rev):
532 def node(self, rev):
521 """filtered version of revlog.node"""
533 """filtered version of revlog.node"""
522 if rev in self.filteredrevs:
534 if rev in self.filteredrevs:
523 raise error.FilteredIndexError(rev)
535 raise error.FilteredIndexError(rev)
524 return super(changelog, self).node(rev)
536 return super(changelog, self).node(rev)
525
537
526 def linkrev(self, rev):
538 def linkrev(self, rev):
527 """filtered version of revlog.linkrev"""
539 """filtered version of revlog.linkrev"""
528 if rev in self.filteredrevs:
540 if rev in self.filteredrevs:
529 raise error.FilteredIndexError(rev)
541 raise error.FilteredIndexError(rev)
530 return super(changelog, self).linkrev(rev)
542 return super(changelog, self).linkrev(rev)
531
543
532 def parentrevs(self, rev):
544 def parentrevs(self, rev):
533 """filtered version of revlog.parentrevs"""
545 """filtered version of revlog.parentrevs"""
534 if rev in self.filteredrevs:
546 if rev in self.filteredrevs:
535 raise error.FilteredIndexError(rev)
547 raise error.FilteredIndexError(rev)
536 return super(changelog, self).parentrevs(rev)
548 return super(changelog, self).parentrevs(rev)
537
549
538 def flags(self, rev):
550 def flags(self, rev):
539 """filtered version of revlog.flags"""
551 """filtered version of revlog.flags"""
540 if rev in self.filteredrevs:
552 if rev in self.filteredrevs:
541 raise error.FilteredIndexError(rev)
553 raise error.FilteredIndexError(rev)
542 return super(changelog, self).flags(rev)
554 return super(changelog, self).flags(rev)
543
555
544 def delayupdate(self, tr):
556 def delayupdate(self, tr):
545 b"delay visibility of index updates to other readers"
557 b"delay visibility of index updates to other readers"
546
558
547 if not self._delayed:
559 if not self._delayed:
548 if len(self) == 0:
560 if len(self) == 0:
549 self._divert = True
561 self._divert = True
550 if self._realopener.exists(self.indexfile + b'.a'):
562 if self._realopener.exists(self.indexfile + b'.a'):
551 self._realopener.unlink(self.indexfile + b'.a')
563 self._realopener.unlink(self.indexfile + b'.a')
552 self.opener = _divertopener(self._realopener, self.indexfile)
564 self.opener = _divertopener(self._realopener, self.indexfile)
553 else:
565 else:
554 self._delaybuf = []
566 self._delaybuf = []
555 self.opener = _delayopener(
567 self.opener = _delayopener(
556 self._realopener, self.indexfile, self._delaybuf
568 self._realopener, self.indexfile, self._delaybuf
557 )
569 )
558 self._delayed = True
570 self._delayed = True
559 tr.addpending(b'cl-%i' % id(self), self._writepending)
571 tr.addpending(b'cl-%i' % id(self), self._writepending)
560 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
572 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
561
573
562 def _finalize(self, tr):
574 def _finalize(self, tr):
563 b"finalize index updates"
575 b"finalize index updates"
564 self._delayed = False
576 self._delayed = False
565 self.opener = self._realopener
577 self.opener = self._realopener
566 # move redirected index data back into place
578 # move redirected index data back into place
567 if self._divert:
579 if self._divert:
568 assert not self._delaybuf
580 assert not self._delaybuf
569 tmpname = self.indexfile + b".a"
581 tmpname = self.indexfile + b".a"
570 nfile = self.opener.open(tmpname)
582 nfile = self.opener.open(tmpname)
571 nfile.close()
583 nfile.close()
572 self.opener.rename(tmpname, self.indexfile, checkambig=True)
584 self.opener.rename(tmpname, self.indexfile, checkambig=True)
573 elif self._delaybuf:
585 elif self._delaybuf:
574 fp = self.opener(self.indexfile, b'a', checkambig=True)
586 fp = self.opener(self.indexfile, b'a', checkambig=True)
575 fp.write(b"".join(self._delaybuf))
587 fp.write(b"".join(self._delaybuf))
576 fp.close()
588 fp.close()
577 self._delaybuf = None
589 self._delaybuf = None
578 self._divert = False
590 self._divert = False
579 # split when we're done
591 # split when we're done
580 self._enforceinlinesize(tr)
592 self._enforceinlinesize(tr)
581
593
582 def _writepending(self, tr):
594 def _writepending(self, tr):
583 b"create a file containing the unfinalized state for pretxnchangegroup"
595 b"create a file containing the unfinalized state for pretxnchangegroup"
584 if self._delaybuf:
596 if self._delaybuf:
585 # make a temporary copy of the index
597 # make a temporary copy of the index
586 fp1 = self._realopener(self.indexfile)
598 fp1 = self._realopener(self.indexfile)
587 pendingfilename = self.indexfile + b".a"
599 pendingfilename = self.indexfile + b".a"
588 # register as a temp file to ensure cleanup on failure
600 # register as a temp file to ensure cleanup on failure
589 tr.registertmp(pendingfilename)
601 tr.registertmp(pendingfilename)
590 # write existing data
602 # write existing data
591 fp2 = self._realopener(pendingfilename, b"w")
603 fp2 = self._realopener(pendingfilename, b"w")
592 fp2.write(fp1.read())
604 fp2.write(fp1.read())
593 # add pending data
605 # add pending data
594 fp2.write(b"".join(self._delaybuf))
606 fp2.write(b"".join(self._delaybuf))
595 fp2.close()
607 fp2.close()
596 # switch modes so finalize can simply rename
608 # switch modes so finalize can simply rename
597 self._delaybuf = None
609 self._delaybuf = None
598 self._divert = True
610 self._divert = True
599 self.opener = _divertopener(self._realopener, self.indexfile)
611 self.opener = _divertopener(self._realopener, self.indexfile)
600
612
601 if self._divert:
613 if self._divert:
602 return True
614 return True
603
615
604 return False
616 return False
605
617
606 def _enforceinlinesize(self, tr, fp=None):
618 def _enforceinlinesize(self, tr, fp=None):
607 if not self._delayed:
619 if not self._delayed:
608 revlog.revlog._enforceinlinesize(self, tr, fp)
620 revlog.revlog._enforceinlinesize(self, tr, fp)
609
621
610 def read(self, node):
622 def read(self, node):
611 """Obtain data from a parsed changelog revision.
623 """Obtain data from a parsed changelog revision.
612
624
613 Returns a 6-tuple of:
625 Returns a 6-tuple of:
614
626
615 - manifest node in binary
627 - manifest node in binary
616 - author/user as a localstr
628 - author/user as a localstr
617 - date as a 2-tuple of (time, timezone)
629 - date as a 2-tuple of (time, timezone)
618 - list of files
630 - list of files
619 - commit message as a localstr
631 - commit message as a localstr
620 - dict of extra metadata
632 - dict of extra metadata
621
633
622 Unless you need to access all fields, consider calling
634 Unless you need to access all fields, consider calling
623 ``changelogrevision`` instead, as it is faster for partial object
635 ``changelogrevision`` instead, as it is faster for partial object
624 access.
636 access.
625 """
637 """
626 c = changelogrevision(*self._revisiondata(node))
638 c = changelogrevision(*self._revisiondata(node))
627 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
639 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
628
640
629 def changelogrevision(self, nodeorrev):
641 def changelogrevision(self, nodeorrev):
630 """Obtain a ``changelogrevision`` for a node or revision."""
642 """Obtain a ``changelogrevision`` for a node or revision."""
631 text, sidedata = self._revisiondata(nodeorrev)
643 text, sidedata = self._revisiondata(nodeorrev)
632 return changelogrevision(text, sidedata)
644 return changelogrevision(text, sidedata)
633
645
634 def readfiles(self, node):
646 def readfiles(self, node):
635 """
647 """
636 short version of read that only returns the files modified by the cset
648 short version of read that only returns the files modified by the cset
637 """
649 """
638 text = self.revision(node)
650 text = self.revision(node)
639 if not text:
651 if not text:
640 return []
652 return []
641 last = text.index(b"\n\n")
653 last = text.index(b"\n\n")
642 l = text[:last].split(b'\n')
654 l = text[:last].split(b'\n')
643 return l[3:]
655 return l[3:]
644
656
645 def add(
657 def add(
646 self,
658 self,
647 manifest,
659 manifest,
648 files,
660 files,
649 desc,
661 desc,
650 transaction,
662 transaction,
651 p1,
663 p1,
652 p2,
664 p2,
653 user,
665 user,
654 date=None,
666 date=None,
655 extra=None,
667 extra=None,
656 p1copies=None,
668 p1copies=None,
657 p2copies=None,
669 p2copies=None,
658 filesadded=None,
670 filesadded=None,
659 filesremoved=None,
671 filesremoved=None,
660 ):
672 ):
661 # Convert to UTF-8 encoded bytestrings as the very first
673 # Convert to UTF-8 encoded bytestrings as the very first
662 # thing: calling any method on a localstr object will turn it
674 # thing: calling any method on a localstr object will turn it
663 # into a str object and the cached UTF-8 string is thus lost.
675 # into a str object and the cached UTF-8 string is thus lost.
664 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
676 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
665
677
666 user = user.strip()
678 user = user.strip()
667 # An empty username or a username with a "\n" will make the
679 # An empty username or a username with a "\n" will make the
668 # revision text contain two "\n\n" sequences -> corrupt
680 # revision text contain two "\n\n" sequences -> corrupt
669 # repository since read cannot unpack the revision.
681 # repository since read cannot unpack the revision.
670 if not user:
682 if not user:
671 raise error.StorageError(_(b"empty username"))
683 raise error.StorageError(_(b"empty username"))
672 if b"\n" in user:
684 if b"\n" in user:
673 raise error.StorageError(
685 raise error.StorageError(
674 _(b"username %r contains a newline") % pycompat.bytestr(user)
686 _(b"username %r contains a newline") % pycompat.bytestr(user)
675 )
687 )
676
688
677 desc = stripdesc(desc)
689 desc = stripdesc(desc)
678
690
679 if date:
691 if date:
680 parseddate = b"%d %d" % dateutil.parsedate(date)
692 parseddate = b"%d %d" % dateutil.parsedate(date)
681 else:
693 else:
682 parseddate = b"%d %d" % dateutil.makedate()
694 parseddate = b"%d %d" % dateutil.makedate()
683 if extra:
695 if extra:
684 branch = extra.get(b"branch")
696 branch = extra.get(b"branch")
685 if branch in (b"default", b""):
697 if branch in (b"default", b""):
686 del extra[b"branch"]
698 del extra[b"branch"]
687 elif branch in (b".", b"null", b"tip"):
699 elif branch in (b".", b"null", b"tip"):
688 raise error.StorageError(
700 raise error.StorageError(
689 _(b'the name \'%s\' is reserved') % branch
701 _(b'the name \'%s\' is reserved') % branch
690 )
702 )
691 sortedfiles = sorted(files)
703 sortedfiles = sorted(files)
692 sidedata = None
704 sidedata = None
693 if extra is not None:
705 if extra is not None:
694 for name in (
706 for name in (
695 b'p1copies',
707 b'p1copies',
696 b'p2copies',
708 b'p2copies',
697 b'filesadded',
709 b'filesadded',
698 b'filesremoved',
710 b'filesremoved',
699 ):
711 ):
700 extra.pop(name, None)
712 extra.pop(name, None)
701 if p1copies is not None:
713 if p1copies is not None:
702 p1copies = encodecopies(sortedfiles, p1copies)
714 p1copies = encodecopies(sortedfiles, p1copies)
703 if p2copies is not None:
715 if p2copies is not None:
704 p2copies = encodecopies(sortedfiles, p2copies)
716 p2copies = encodecopies(sortedfiles, p2copies)
705 if filesadded is not None:
717 if filesadded is not None:
706 filesadded = encodefileindices(sortedfiles, filesadded)
718 filesadded = encodefileindices(sortedfiles, filesadded)
707 if filesremoved is not None:
719 if filesremoved is not None:
708 filesremoved = encodefileindices(sortedfiles, filesremoved)
720 filesremoved = encodefileindices(sortedfiles, filesremoved)
709 if self._copiesstorage == b'extra':
721 if self._copiesstorage == b'extra':
710 extrasentries = p1copies, p2copies, filesadded, filesremoved
722 extrasentries = p1copies, p2copies, filesadded, filesremoved
711 if extra is None and any(x is not None for x in extrasentries):
723 if extra is None and any(x is not None for x in extrasentries):
712 extra = {}
724 extra = {}
713 if p1copies is not None:
725 if p1copies is not None:
714 extra[b'p1copies'] = p1copies
726 extra[b'p1copies'] = p1copies
715 if p2copies is not None:
727 if p2copies is not None:
716 extra[b'p2copies'] = p2copies
728 extra[b'p2copies'] = p2copies
717 if filesadded is not None:
729 if filesadded is not None:
718 extra[b'filesadded'] = filesadded
730 extra[b'filesadded'] = filesadded
719 if filesremoved is not None:
731 if filesremoved is not None:
720 extra[b'filesremoved'] = filesremoved
732 extra[b'filesremoved'] = filesremoved
721 elif self._copiesstorage == b'changeset-sidedata':
733 elif self._copiesstorage == b'changeset-sidedata':
722 sidedata = {}
734 sidedata = {}
723 if p1copies is not None:
735 if p1copies is not None:
724 sidedata[sidedatamod.SD_P1COPIES] = p1copies
736 sidedata[sidedatamod.SD_P1COPIES] = p1copies
725 if p2copies is not None:
737 if p2copies is not None:
726 sidedata[sidedatamod.SD_P2COPIES] = p2copies
738 sidedata[sidedatamod.SD_P2COPIES] = p2copies
727 if filesadded is not None:
739 if filesadded is not None:
728 sidedata[sidedatamod.SD_FILESADDED] = filesadded
740 sidedata[sidedatamod.SD_FILESADDED] = filesadded
729 if filesremoved is not None:
741 if filesremoved is not None:
730 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
742 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
731
743
732 if extra:
744 if extra:
733 extra = encodeextra(extra)
745 extra = encodeextra(extra)
734 parseddate = b"%s %s" % (parseddate, extra)
746 parseddate = b"%s %s" % (parseddate, extra)
735 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
747 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
736 text = b"\n".join(l)
748 text = b"\n".join(l)
737 return self.addrevision(
749 return self.addrevision(
738 text, transaction, len(self), p1, p2, sidedata=sidedata
750 text, transaction, len(self), p1, p2, sidedata=sidedata
739 )
751 )
740
752
741 def branchinfo(self, rev):
753 def branchinfo(self, rev):
742 """return the branch name and open/close state of a revision
754 """return the branch name and open/close state of a revision
743
755
744 This function exists because creating a changectx object
756 This function exists because creating a changectx object
745 just to access this is costly."""
757 just to access this is costly."""
746 extra = self.read(rev)[5]
758 extra = self.read(rev)[5]
747 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
759 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
748
760
749 def _nodeduplicatecallback(self, transaction, node):
761 def _nodeduplicatecallback(self, transaction, node):
750 # keep track of revisions that got "re-added", eg: unbunde of know rev.
762 # keep track of revisions that got "re-added", eg: unbunde of know rev.
751 #
763 #
752 # We track them in a list to preserve their order from the source bundle
764 # We track them in a list to preserve their order from the source bundle
753 duplicates = transaction.changes.setdefault(b'revduplicates', [])
765 duplicates = transaction.changes.setdefault(b'revduplicates', [])
754 duplicates.append(self.rev(node))
766 duplicates.append(self.rev(node))
@@ -1,2966 +1,2987 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return r"<%s %s>" % (type(self).__name__, str(self))
74 return r"<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, expr, badfn=None):
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
204 return fileset.match(self, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if r'_manifest' in self.__dict__:
268 if r'_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if r'_manifestdelta' in self.__dict__ or path in self.files():
275 if r'_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 return matchmod.match(
333 return matchmod.match(
334 r.root,
334 r.root,
335 r.getcwd(),
335 r.getcwd(),
336 pats,
336 pats,
337 include,
337 include,
338 exclude,
338 exclude,
339 default,
339 default,
340 auditor=r.nofsauditor,
340 auditor=r.nofsauditor,
341 ctx=self,
341 ctx=self,
342 listsubrepos=listsubrepos,
342 listsubrepos=listsubrepos,
343 badfn=badfn,
343 badfn=badfn,
344 )
344 )
345
345
346 def diff(
346 def diff(
347 self,
347 self,
348 ctx2=None,
348 ctx2=None,
349 match=None,
349 match=None,
350 changes=None,
350 changes=None,
351 opts=None,
351 opts=None,
352 losedatafn=None,
352 losedatafn=None,
353 pathfn=None,
353 pathfn=None,
354 copy=None,
354 copy=None,
355 copysourcematch=None,
355 copysourcematch=None,
356 hunksfilterfn=None,
356 hunksfilterfn=None,
357 ):
357 ):
358 """Returns a diff generator for the given contexts and matcher"""
358 """Returns a diff generator for the given contexts and matcher"""
359 if ctx2 is None:
359 if ctx2 is None:
360 ctx2 = self.p1()
360 ctx2 = self.p1()
361 if ctx2 is not None:
361 if ctx2 is not None:
362 ctx2 = self._repo[ctx2]
362 ctx2 = self._repo[ctx2]
363 return patch.diff(
363 return patch.diff(
364 self._repo,
364 self._repo,
365 ctx2,
365 ctx2,
366 self,
366 self,
367 match=match,
367 match=match,
368 changes=changes,
368 changes=changes,
369 opts=opts,
369 opts=opts,
370 losedatafn=losedatafn,
370 losedatafn=losedatafn,
371 pathfn=pathfn,
371 pathfn=pathfn,
372 copy=copy,
372 copy=copy,
373 copysourcematch=copysourcematch,
373 copysourcematch=copysourcematch,
374 hunksfilterfn=hunksfilterfn,
374 hunksfilterfn=hunksfilterfn,
375 )
375 )
376
376
377 def dirs(self):
377 def dirs(self):
378 return self._manifest.dirs()
378 return self._manifest.dirs()
379
379
380 def hasdir(self, dir):
380 def hasdir(self, dir):
381 return self._manifest.hasdir(dir)
381 return self._manifest.hasdir(dir)
382
382
383 def status(
383 def status(
384 self,
384 self,
385 other=None,
385 other=None,
386 match=None,
386 match=None,
387 listignored=False,
387 listignored=False,
388 listclean=False,
388 listclean=False,
389 listunknown=False,
389 listunknown=False,
390 listsubrepos=False,
390 listsubrepos=False,
391 ):
391 ):
392 """return status of files between two nodes or node and working
392 """return status of files between two nodes or node and working
393 directory.
393 directory.
394
394
395 If other is None, compare this node with working directory.
395 If other is None, compare this node with working directory.
396
396
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 """
398 """
399
399
400 ctx1 = self
400 ctx1 = self
401 ctx2 = self._repo[other]
401 ctx2 = self._repo[other]
402
402
403 # This next code block is, admittedly, fragile logic that tests for
403 # This next code block is, admittedly, fragile logic that tests for
404 # reversing the contexts and wouldn't need to exist if it weren't for
404 # reversing the contexts and wouldn't need to exist if it weren't for
405 # the fast (and common) code path of comparing the working directory
405 # the fast (and common) code path of comparing the working directory
406 # with its first parent.
406 # with its first parent.
407 #
407 #
408 # What we're aiming for here is the ability to call:
408 # What we're aiming for here is the ability to call:
409 #
409 #
410 # workingctx.status(parentctx)
410 # workingctx.status(parentctx)
411 #
411 #
412 # If we always built the manifest for each context and compared those,
412 # If we always built the manifest for each context and compared those,
413 # then we'd be done. But the special case of the above call means we
413 # then we'd be done. But the special case of the above call means we
414 # just copy the manifest of the parent.
414 # just copy the manifest of the parent.
415 reversed = False
415 reversed = False
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 reversed = True
417 reversed = True
418 ctx1, ctx2 = ctx2, ctx1
418 ctx1, ctx2 = ctx2, ctx1
419
419
420 match = self._repo.narrowmatch(match)
420 match = self._repo.narrowmatch(match)
421 match = ctx2._matchstatus(ctx1, match)
421 match = ctx2._matchstatus(ctx1, match)
422 r = scmutil.status([], [], [], [], [], [], [])
422 r = scmutil.status([], [], [], [], [], [], [])
423 r = ctx2._buildstatus(
423 r = ctx2._buildstatus(
424 ctx1, r, match, listignored, listclean, listunknown
424 ctx1, r, match, listignored, listclean, listunknown
425 )
425 )
426
426
427 if reversed:
427 if reversed:
428 # Reverse added and removed. Clear deleted, unknown and ignored as
428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 # these make no sense to reverse.
429 # these make no sense to reverse.
430 r = scmutil.status(
430 r = scmutil.status(
431 r.modified, r.removed, r.added, [], [], [], r.clean
431 r.modified, r.removed, r.added, [], [], [], r.clean
432 )
432 )
433
433
434 if listsubrepos:
434 if listsubrepos:
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 try:
436 try:
437 rev2 = ctx2.subrev(subpath)
437 rev2 = ctx2.subrev(subpath)
438 except KeyError:
438 except KeyError:
439 # A subrepo that existed in node1 was deleted between
439 # A subrepo that existed in node1 was deleted between
440 # node1 and node2 (inclusive). Thus, ctx2's substate
440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 # won't contain that subpath. The best we can do ignore it.
441 # won't contain that subpath. The best we can do ignore it.
442 rev2 = None
442 rev2 = None
443 submatch = matchmod.subdirmatcher(subpath, match)
443 submatch = matchmod.subdirmatcher(subpath, match)
444 s = sub.status(
444 s = sub.status(
445 rev2,
445 rev2,
446 match=submatch,
446 match=submatch,
447 ignored=listignored,
447 ignored=listignored,
448 clean=listclean,
448 clean=listclean,
449 unknown=listunknown,
449 unknown=listunknown,
450 listsubrepos=True,
450 listsubrepos=True,
451 )
451 )
452 for rfiles, sfiles in zip(r, s):
452 for rfiles, sfiles in zip(r, s):
453 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
453 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
454
454
455 for l in r:
455 for l in r:
456 l.sort()
456 l.sort()
457
457
458 return r
458 return r
459
459
460
460
461 class changectx(basectx):
461 class changectx(basectx):
462 """A changecontext object makes access to data related to a particular
462 """A changecontext object makes access to data related to a particular
463 changeset convenient. It represents a read-only context already present in
463 changeset convenient. It represents a read-only context already present in
464 the repo."""
464 the repo."""
465
465
466 def __init__(self, repo, rev, node):
466 def __init__(self, repo, rev, node):
467 super(changectx, self).__init__(repo)
467 super(changectx, self).__init__(repo)
468 self._rev = rev
468 self._rev = rev
469 self._node = node
469 self._node = node
470
470
471 def __hash__(self):
471 def __hash__(self):
472 try:
472 try:
473 return hash(self._rev)
473 return hash(self._rev)
474 except AttributeError:
474 except AttributeError:
475 return id(self)
475 return id(self)
476
476
477 def __nonzero__(self):
477 def __nonzero__(self):
478 return self._rev != nullrev
478 return self._rev != nullrev
479
479
480 __bool__ = __nonzero__
480 __bool__ = __nonzero__
481
481
482 @propertycache
482 @propertycache
483 def _changeset(self):
483 def _changeset(self):
484 return self._repo.changelog.changelogrevision(self.rev())
484 return self._repo.changelog.changelogrevision(self.rev())
485
485
486 @propertycache
486 @propertycache
487 def _manifest(self):
487 def _manifest(self):
488 return self._manifestctx.read()
488 return self._manifestctx.read()
489
489
490 @property
490 @property
491 def _manifestctx(self):
491 def _manifestctx(self):
492 return self._repo.manifestlog[self._changeset.manifest]
492 return self._repo.manifestlog[self._changeset.manifest]
493
493
494 @propertycache
494 @propertycache
495 def _manifestdelta(self):
495 def _manifestdelta(self):
496 return self._manifestctx.readdelta()
496 return self._manifestctx.readdelta()
497
497
498 @propertycache
498 @propertycache
499 def _parents(self):
499 def _parents(self):
500 repo = self._repo
500 repo = self._repo
501 p1, p2 = repo.changelog.parentrevs(self._rev)
501 p1, p2 = repo.changelog.parentrevs(self._rev)
502 if p2 == nullrev:
502 if p2 == nullrev:
503 return [repo[p1]]
503 return [repo[p1]]
504 return [repo[p1], repo[p2]]
504 return [repo[p1], repo[p2]]
505
505
506 def changeset(self):
506 def changeset(self):
507 c = self._changeset
507 c = self._changeset
508 return (
508 return (
509 c.manifest,
509 c.manifest,
510 c.user,
510 c.user,
511 c.date,
511 c.date,
512 c.files,
512 c.files,
513 c.description,
513 c.description,
514 c.extra,
514 c.extra,
515 )
515 )
516
516
517 def manifestnode(self):
517 def manifestnode(self):
518 return self._changeset.manifest
518 return self._changeset.manifest
519
519
520 def user(self):
520 def user(self):
521 return self._changeset.user
521 return self._changeset.user
522
522
523 def date(self):
523 def date(self):
524 return self._changeset.date
524 return self._changeset.date
525
525
526 def files(self):
526 def files(self):
527 return self._changeset.files
527 return self._changeset.files
528
528
529 def filesmodified(self):
529 def filesmodified(self):
530 modified = set(self.files())
530 modified = set(self.files())
531 modified.difference_update(self.filesadded())
531 modified.difference_update(self.filesadded())
532 modified.difference_update(self.filesremoved())
532 modified.difference_update(self.filesremoved())
533 return sorted(modified)
533 return sorted(modified)
534
534
535 def filesadded(self):
535 def filesadded(self):
536 source = self._repo.ui.config(b'experimental', b'copies.read-from')
537 filesadded = self._changeset.filesadded
536 filesadded = self._changeset.filesadded
538 if source == b'changeset-only':
537 compute_on_none = True
539 if filesadded is None:
538 if self._repo.filecopiesmode == b'changeset-sidedata':
539 compute_on_none = False
540 else:
541 source = self._repo.ui.config(b'experimental', b'copies.read-from')
542 if source == b'changeset-only':
543 compute_on_none = False
544 elif source != b'compatibility':
545 # filelog mode, ignore any changelog content
546 filesadded = None
547 if filesadded is None:
548 if compute_on_none:
549 filesadded = scmutil.computechangesetfilesadded(self)
550 else:
540 filesadded = []
551 filesadded = []
541 elif source == b'compatibility':
542 if filesadded is None:
543 filesadded = scmutil.computechangesetfilesadded(self)
544 else:
545 filesadded = scmutil.computechangesetfilesadded(self)
546 return filesadded
552 return filesadded
547
553
548 def filesremoved(self):
554 def filesremoved(self):
549 source = self._repo.ui.config(b'experimental', b'copies.read-from')
550 filesremoved = self._changeset.filesremoved
555 filesremoved = self._changeset.filesremoved
551 if source == b'changeset-only':
556 compute_on_none = True
552 if filesremoved is None:
557 if self._repo.filecopiesmode == b'changeset-sidedata':
558 compute_on_none = False
559 else:
560 source = self._repo.ui.config(b'experimental', b'copies.read-from')
561 if source == b'changeset-only':
562 compute_on_none = False
563 elif source != b'compatibility':
564 # filelog mode, ignore any changelog content
565 filesremoved = None
566 if filesremoved is None:
567 if compute_on_none:
568 filesremoved = scmutil.computechangesetfilesremoved(self)
569 else:
553 filesremoved = []
570 filesremoved = []
554 elif source == b'compatibility':
555 if filesremoved is None:
556 filesremoved = scmutil.computechangesetfilesremoved(self)
557 else:
558 filesremoved = scmutil.computechangesetfilesremoved(self)
559 return filesremoved
571 return filesremoved
560
572
561 @propertycache
573 @propertycache
562 def _copies(self):
574 def _copies(self):
563 source = self._repo.ui.config(b'experimental', b'copies.read-from')
564 p1copies = self._changeset.p1copies
575 p1copies = self._changeset.p1copies
565 p2copies = self._changeset.p2copies
576 p2copies = self._changeset.p2copies
566 # If config says to get copy metadata only from changeset, then return
577 compute_on_none = True
567 # that, defaulting to {} if there was no copy metadata.
578 if self._repo.filecopiesmode == b'changeset-sidedata':
568 # In compatibility mode, we return copy data from the changeset if
579 compute_on_none = False
569 # it was recorded there, and otherwise we fall back to getting it from
580 else:
570 # the filelogs (below).
581 source = self._repo.ui.config(b'experimental', b'copies.read-from')
571 if source == b'changeset-only':
582 # If config says to get copy metadata only from changeset, then
572 if p1copies is None:
583 # return that, defaulting to {} if there was no copy metadata. In
573 p1copies = {}
584 # compatibility mode, we return copy data from the changeset if it
574 if p2copies is None:
585 # was recorded there, and otherwise we fall back to getting it from
575 p2copies = {}
586 # the filelogs (below).
576 elif source == b'compatibility':
587 #
577 if p1copies is None:
588 # If we are in compatiblity mode and there is not data in the
578 # we are in compatiblity mode and there is not data in the
589 # changeset), we get the copy metadata from the filelogs.
579 # changeset), we get the copy metadata from the filelogs.
590 #
591 # otherwise, when config said to read only from filelog, we get the
592 # copy metadata from the filelogs.
593 if source == b'changeset-only':
594 compute_on_none = False
595 elif source != b'compatibility':
596 # filelog mode, ignore any changelog content
597 p1copies = p2copies = None
598 if p1copies is None:
599 if compute_on_none:
580 p1copies, p2copies = super(changectx, self)._copies
600 p1copies, p2copies = super(changectx, self)._copies
581 else:
601 else:
582 # config said to read only from filelog, we get the copy metadata
602 if p1copies is None:
583 # from the filelogs.
603 p1copies = {}
584 p1copies, p2copies = super(changectx, self)._copies
604 if p2copies is None:
605 p2copies = {}
585 return p1copies, p2copies
606 return p1copies, p2copies
586
607
587 def description(self):
608 def description(self):
588 return self._changeset.description
609 return self._changeset.description
589
610
590 def branch(self):
611 def branch(self):
591 return encoding.tolocal(self._changeset.extra.get(b"branch"))
612 return encoding.tolocal(self._changeset.extra.get(b"branch"))
592
613
593 def closesbranch(self):
614 def closesbranch(self):
594 return b'close' in self._changeset.extra
615 return b'close' in self._changeset.extra
595
616
596 def extra(self):
617 def extra(self):
597 """Return a dict of extra information."""
618 """Return a dict of extra information."""
598 return self._changeset.extra
619 return self._changeset.extra
599
620
600 def tags(self):
621 def tags(self):
601 """Return a list of byte tag names"""
622 """Return a list of byte tag names"""
602 return self._repo.nodetags(self._node)
623 return self._repo.nodetags(self._node)
603
624
604 def bookmarks(self):
625 def bookmarks(self):
605 """Return a list of byte bookmark names."""
626 """Return a list of byte bookmark names."""
606 return self._repo.nodebookmarks(self._node)
627 return self._repo.nodebookmarks(self._node)
607
628
608 def phase(self):
629 def phase(self):
609 return self._repo._phasecache.phase(self._repo, self._rev)
630 return self._repo._phasecache.phase(self._repo, self._rev)
610
631
611 def hidden(self):
632 def hidden(self):
612 return self._rev in repoview.filterrevs(self._repo, b'visible')
633 return self._rev in repoview.filterrevs(self._repo, b'visible')
613
634
614 def isinmemory(self):
635 def isinmemory(self):
615 return False
636 return False
616
637
617 def children(self):
638 def children(self):
618 """return list of changectx contexts for each child changeset.
639 """return list of changectx contexts for each child changeset.
619
640
620 This returns only the immediate child changesets. Use descendants() to
641 This returns only the immediate child changesets. Use descendants() to
621 recursively walk children.
642 recursively walk children.
622 """
643 """
623 c = self._repo.changelog.children(self._node)
644 c = self._repo.changelog.children(self._node)
624 return [self._repo[x] for x in c]
645 return [self._repo[x] for x in c]
625
646
626 def ancestors(self):
647 def ancestors(self):
627 for a in self._repo.changelog.ancestors([self._rev]):
648 for a in self._repo.changelog.ancestors([self._rev]):
628 yield self._repo[a]
649 yield self._repo[a]
629
650
630 def descendants(self):
651 def descendants(self):
631 """Recursively yield all children of the changeset.
652 """Recursively yield all children of the changeset.
632
653
633 For just the immediate children, use children()
654 For just the immediate children, use children()
634 """
655 """
635 for d in self._repo.changelog.descendants([self._rev]):
656 for d in self._repo.changelog.descendants([self._rev]):
636 yield self._repo[d]
657 yield self._repo[d]
637
658
638 def filectx(self, path, fileid=None, filelog=None):
659 def filectx(self, path, fileid=None, filelog=None):
639 """get a file context from this changeset"""
660 """get a file context from this changeset"""
640 if fileid is None:
661 if fileid is None:
641 fileid = self.filenode(path)
662 fileid = self.filenode(path)
642 return filectx(
663 return filectx(
643 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
664 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
644 )
665 )
645
666
646 def ancestor(self, c2, warn=False):
667 def ancestor(self, c2, warn=False):
647 """return the "best" ancestor context of self and c2
668 """return the "best" ancestor context of self and c2
648
669
649 If there are multiple candidates, it will show a message and check
670 If there are multiple candidates, it will show a message and check
650 merge.preferancestor configuration before falling back to the
671 merge.preferancestor configuration before falling back to the
651 revlog ancestor."""
672 revlog ancestor."""
652 # deal with workingctxs
673 # deal with workingctxs
653 n2 = c2._node
674 n2 = c2._node
654 if n2 is None:
675 if n2 is None:
655 n2 = c2._parents[0]._node
676 n2 = c2._parents[0]._node
656 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
677 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
657 if not cahs:
678 if not cahs:
658 anc = nullid
679 anc = nullid
659 elif len(cahs) == 1:
680 elif len(cahs) == 1:
660 anc = cahs[0]
681 anc = cahs[0]
661 else:
682 else:
662 # experimental config: merge.preferancestor
683 # experimental config: merge.preferancestor
663 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
684 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
664 try:
685 try:
665 ctx = scmutil.revsymbol(self._repo, r)
686 ctx = scmutil.revsymbol(self._repo, r)
666 except error.RepoLookupError:
687 except error.RepoLookupError:
667 continue
688 continue
668 anc = ctx.node()
689 anc = ctx.node()
669 if anc in cahs:
690 if anc in cahs:
670 break
691 break
671 else:
692 else:
672 anc = self._repo.changelog.ancestor(self._node, n2)
693 anc = self._repo.changelog.ancestor(self._node, n2)
673 if warn:
694 if warn:
674 self._repo.ui.status(
695 self._repo.ui.status(
675 (
696 (
676 _(b"note: using %s as ancestor of %s and %s\n")
697 _(b"note: using %s as ancestor of %s and %s\n")
677 % (short(anc), short(self._node), short(n2))
698 % (short(anc), short(self._node), short(n2))
678 )
699 )
679 + b''.join(
700 + b''.join(
680 _(
701 _(
681 b" alternatively, use --config "
702 b" alternatively, use --config "
682 b"merge.preferancestor=%s\n"
703 b"merge.preferancestor=%s\n"
683 )
704 )
684 % short(n)
705 % short(n)
685 for n in sorted(cahs)
706 for n in sorted(cahs)
686 if n != anc
707 if n != anc
687 )
708 )
688 )
709 )
689 return self._repo[anc]
710 return self._repo[anc]
690
711
691 def isancestorof(self, other):
712 def isancestorof(self, other):
692 """True if this changeset is an ancestor of other"""
713 """True if this changeset is an ancestor of other"""
693 return self._repo.changelog.isancestorrev(self._rev, other._rev)
714 return self._repo.changelog.isancestorrev(self._rev, other._rev)
694
715
695 def walk(self, match):
716 def walk(self, match):
696 '''Generates matching file names.'''
717 '''Generates matching file names.'''
697
718
698 # Wrap match.bad method to have message with nodeid
719 # Wrap match.bad method to have message with nodeid
699 def bad(fn, msg):
720 def bad(fn, msg):
700 # The manifest doesn't know about subrepos, so don't complain about
721 # The manifest doesn't know about subrepos, so don't complain about
701 # paths into valid subrepos.
722 # paths into valid subrepos.
702 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
723 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
703 return
724 return
704 match.bad(fn, _(b'no such file in rev %s') % self)
725 match.bad(fn, _(b'no such file in rev %s') % self)
705
726
706 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
727 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
707 return self._manifest.walk(m)
728 return self._manifest.walk(m)
708
729
709 def matches(self, match):
730 def matches(self, match):
710 return self.walk(match)
731 return self.walk(match)
711
732
712
733
713 class basefilectx(object):
734 class basefilectx(object):
714 """A filecontext object represents the common logic for its children:
735 """A filecontext object represents the common logic for its children:
715 filectx: read-only access to a filerevision that is already present
736 filectx: read-only access to a filerevision that is already present
716 in the repo,
737 in the repo,
717 workingfilectx: a filecontext that represents files from the working
738 workingfilectx: a filecontext that represents files from the working
718 directory,
739 directory,
719 memfilectx: a filecontext that represents files in-memory,
740 memfilectx: a filecontext that represents files in-memory,
720 """
741 """
721
742
722 @propertycache
743 @propertycache
723 def _filelog(self):
744 def _filelog(self):
724 return self._repo.file(self._path)
745 return self._repo.file(self._path)
725
746
726 @propertycache
747 @propertycache
727 def _changeid(self):
748 def _changeid(self):
728 if r'_changectx' in self.__dict__:
749 if r'_changectx' in self.__dict__:
729 return self._changectx.rev()
750 return self._changectx.rev()
730 elif r'_descendantrev' in self.__dict__:
751 elif r'_descendantrev' in self.__dict__:
731 # this file context was created from a revision with a known
752 # this file context was created from a revision with a known
732 # descendant, we can (lazily) correct for linkrev aliases
753 # descendant, we can (lazily) correct for linkrev aliases
733 return self._adjustlinkrev(self._descendantrev)
754 return self._adjustlinkrev(self._descendantrev)
734 else:
755 else:
735 return self._filelog.linkrev(self._filerev)
756 return self._filelog.linkrev(self._filerev)
736
757
737 @propertycache
758 @propertycache
738 def _filenode(self):
759 def _filenode(self):
739 if r'_fileid' in self.__dict__:
760 if r'_fileid' in self.__dict__:
740 return self._filelog.lookup(self._fileid)
761 return self._filelog.lookup(self._fileid)
741 else:
762 else:
742 return self._changectx.filenode(self._path)
763 return self._changectx.filenode(self._path)
743
764
744 @propertycache
765 @propertycache
745 def _filerev(self):
766 def _filerev(self):
746 return self._filelog.rev(self._filenode)
767 return self._filelog.rev(self._filenode)
747
768
748 @propertycache
769 @propertycache
749 def _repopath(self):
770 def _repopath(self):
750 return self._path
771 return self._path
751
772
752 def __nonzero__(self):
773 def __nonzero__(self):
753 try:
774 try:
754 self._filenode
775 self._filenode
755 return True
776 return True
756 except error.LookupError:
777 except error.LookupError:
757 # file is missing
778 # file is missing
758 return False
779 return False
759
780
760 __bool__ = __nonzero__
781 __bool__ = __nonzero__
761
782
762 def __bytes__(self):
783 def __bytes__(self):
763 try:
784 try:
764 return b"%s@%s" % (self.path(), self._changectx)
785 return b"%s@%s" % (self.path(), self._changectx)
765 except error.LookupError:
786 except error.LookupError:
766 return b"%s@???" % self.path()
787 return b"%s@???" % self.path()
767
788
768 __str__ = encoding.strmethod(__bytes__)
789 __str__ = encoding.strmethod(__bytes__)
769
790
770 def __repr__(self):
791 def __repr__(self):
771 return r"<%s %s>" % (type(self).__name__, str(self))
792 return r"<%s %s>" % (type(self).__name__, str(self))
772
793
773 def __hash__(self):
794 def __hash__(self):
774 try:
795 try:
775 return hash((self._path, self._filenode))
796 return hash((self._path, self._filenode))
776 except AttributeError:
797 except AttributeError:
777 return id(self)
798 return id(self)
778
799
779 def __eq__(self, other):
800 def __eq__(self, other):
780 try:
801 try:
781 return (
802 return (
782 type(self) == type(other)
803 type(self) == type(other)
783 and self._path == other._path
804 and self._path == other._path
784 and self._filenode == other._filenode
805 and self._filenode == other._filenode
785 )
806 )
786 except AttributeError:
807 except AttributeError:
787 return False
808 return False
788
809
789 def __ne__(self, other):
810 def __ne__(self, other):
790 return not (self == other)
811 return not (self == other)
791
812
792 def filerev(self):
813 def filerev(self):
793 return self._filerev
814 return self._filerev
794
815
795 def filenode(self):
816 def filenode(self):
796 return self._filenode
817 return self._filenode
797
818
798 @propertycache
819 @propertycache
799 def _flags(self):
820 def _flags(self):
800 return self._changectx.flags(self._path)
821 return self._changectx.flags(self._path)
801
822
802 def flags(self):
823 def flags(self):
803 return self._flags
824 return self._flags
804
825
805 def filelog(self):
826 def filelog(self):
806 return self._filelog
827 return self._filelog
807
828
808 def rev(self):
829 def rev(self):
809 return self._changeid
830 return self._changeid
810
831
811 def linkrev(self):
832 def linkrev(self):
812 return self._filelog.linkrev(self._filerev)
833 return self._filelog.linkrev(self._filerev)
813
834
814 def node(self):
835 def node(self):
815 return self._changectx.node()
836 return self._changectx.node()
816
837
817 def hex(self):
838 def hex(self):
818 return self._changectx.hex()
839 return self._changectx.hex()
819
840
820 def user(self):
841 def user(self):
821 return self._changectx.user()
842 return self._changectx.user()
822
843
823 def date(self):
844 def date(self):
824 return self._changectx.date()
845 return self._changectx.date()
825
846
826 def files(self):
847 def files(self):
827 return self._changectx.files()
848 return self._changectx.files()
828
849
829 def description(self):
850 def description(self):
830 return self._changectx.description()
851 return self._changectx.description()
831
852
832 def branch(self):
853 def branch(self):
833 return self._changectx.branch()
854 return self._changectx.branch()
834
855
835 def extra(self):
856 def extra(self):
836 return self._changectx.extra()
857 return self._changectx.extra()
837
858
838 def phase(self):
859 def phase(self):
839 return self._changectx.phase()
860 return self._changectx.phase()
840
861
841 def phasestr(self):
862 def phasestr(self):
842 return self._changectx.phasestr()
863 return self._changectx.phasestr()
843
864
844 def obsolete(self):
865 def obsolete(self):
845 return self._changectx.obsolete()
866 return self._changectx.obsolete()
846
867
847 def instabilities(self):
868 def instabilities(self):
848 return self._changectx.instabilities()
869 return self._changectx.instabilities()
849
870
850 def manifest(self):
871 def manifest(self):
851 return self._changectx.manifest()
872 return self._changectx.manifest()
852
873
853 def changectx(self):
874 def changectx(self):
854 return self._changectx
875 return self._changectx
855
876
856 def renamed(self):
877 def renamed(self):
857 return self._copied
878 return self._copied
858
879
859 def copysource(self):
880 def copysource(self):
860 return self._copied and self._copied[0]
881 return self._copied and self._copied[0]
861
882
862 def repo(self):
883 def repo(self):
863 return self._repo
884 return self._repo
864
885
865 def size(self):
886 def size(self):
866 return len(self.data())
887 return len(self.data())
867
888
868 def path(self):
889 def path(self):
869 return self._path
890 return self._path
870
891
871 def isbinary(self):
892 def isbinary(self):
872 try:
893 try:
873 return stringutil.binary(self.data())
894 return stringutil.binary(self.data())
874 except IOError:
895 except IOError:
875 return False
896 return False
876
897
877 def isexec(self):
898 def isexec(self):
878 return b'x' in self.flags()
899 return b'x' in self.flags()
879
900
880 def islink(self):
901 def islink(self):
881 return b'l' in self.flags()
902 return b'l' in self.flags()
882
903
883 def isabsent(self):
904 def isabsent(self):
884 """whether this filectx represents a file not in self._changectx
905 """whether this filectx represents a file not in self._changectx
885
906
886 This is mainly for merge code to detect change/delete conflicts. This is
907 This is mainly for merge code to detect change/delete conflicts. This is
887 expected to be True for all subclasses of basectx."""
908 expected to be True for all subclasses of basectx."""
888 return False
909 return False
889
910
890 _customcmp = False
911 _customcmp = False
891
912
892 def cmp(self, fctx):
913 def cmp(self, fctx):
893 """compare with other file context
914 """compare with other file context
894
915
895 returns True if different than fctx.
916 returns True if different than fctx.
896 """
917 """
897 if fctx._customcmp:
918 if fctx._customcmp:
898 return fctx.cmp(self)
919 return fctx.cmp(self)
899
920
900 if self._filenode is None:
921 if self._filenode is None:
901 raise error.ProgrammingError(
922 raise error.ProgrammingError(
902 b'filectx.cmp() must be reimplemented if not backed by revlog'
923 b'filectx.cmp() must be reimplemented if not backed by revlog'
903 )
924 )
904
925
905 if fctx._filenode is None:
926 if fctx._filenode is None:
906 if self._repo._encodefilterpats:
927 if self._repo._encodefilterpats:
907 # can't rely on size() because wdir content may be decoded
928 # can't rely on size() because wdir content may be decoded
908 return self._filelog.cmp(self._filenode, fctx.data())
929 return self._filelog.cmp(self._filenode, fctx.data())
909 if self.size() - 4 == fctx.size():
930 if self.size() - 4 == fctx.size():
910 # size() can match:
931 # size() can match:
911 # if file data starts with '\1\n', empty metadata block is
932 # if file data starts with '\1\n', empty metadata block is
912 # prepended, which adds 4 bytes to filelog.size().
933 # prepended, which adds 4 bytes to filelog.size().
913 return self._filelog.cmp(self._filenode, fctx.data())
934 return self._filelog.cmp(self._filenode, fctx.data())
914 if self.size() == fctx.size():
935 if self.size() == fctx.size():
915 # size() matches: need to compare content
936 # size() matches: need to compare content
916 return self._filelog.cmp(self._filenode, fctx.data())
937 return self._filelog.cmp(self._filenode, fctx.data())
917
938
918 # size() differs
939 # size() differs
919 return True
940 return True
920
941
921 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
942 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
922 """return the first ancestor of <srcrev> introducing <fnode>
943 """return the first ancestor of <srcrev> introducing <fnode>
923
944
924 If the linkrev of the file revision does not point to an ancestor of
945 If the linkrev of the file revision does not point to an ancestor of
925 srcrev, we'll walk down the ancestors until we find one introducing
946 srcrev, we'll walk down the ancestors until we find one introducing
926 this file revision.
947 this file revision.
927
948
928 :srcrev: the changeset revision we search ancestors from
949 :srcrev: the changeset revision we search ancestors from
929 :inclusive: if true, the src revision will also be checked
950 :inclusive: if true, the src revision will also be checked
930 :stoprev: an optional revision to stop the walk at. If no introduction
951 :stoprev: an optional revision to stop the walk at. If no introduction
931 of this file content could be found before this floor
952 of this file content could be found before this floor
932 revision, the function will returns "None" and stops its
953 revision, the function will returns "None" and stops its
933 iteration.
954 iteration.
934 """
955 """
935 repo = self._repo
956 repo = self._repo
936 cl = repo.unfiltered().changelog
957 cl = repo.unfiltered().changelog
937 mfl = repo.manifestlog
958 mfl = repo.manifestlog
938 # fetch the linkrev
959 # fetch the linkrev
939 lkr = self.linkrev()
960 lkr = self.linkrev()
940 if srcrev == lkr:
961 if srcrev == lkr:
941 return lkr
962 return lkr
942 # hack to reuse ancestor computation when searching for renames
963 # hack to reuse ancestor computation when searching for renames
943 memberanc = getattr(self, '_ancestrycontext', None)
964 memberanc = getattr(self, '_ancestrycontext', None)
944 iteranc = None
965 iteranc = None
945 if srcrev is None:
966 if srcrev is None:
946 # wctx case, used by workingfilectx during mergecopy
967 # wctx case, used by workingfilectx during mergecopy
947 revs = [p.rev() for p in self._repo[None].parents()]
968 revs = [p.rev() for p in self._repo[None].parents()]
948 inclusive = True # we skipped the real (revless) source
969 inclusive = True # we skipped the real (revless) source
949 else:
970 else:
950 revs = [srcrev]
971 revs = [srcrev]
951 if memberanc is None:
972 if memberanc is None:
952 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
973 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
953 # check if this linkrev is an ancestor of srcrev
974 # check if this linkrev is an ancestor of srcrev
954 if lkr not in memberanc:
975 if lkr not in memberanc:
955 if iteranc is None:
976 if iteranc is None:
956 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
977 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
957 fnode = self._filenode
978 fnode = self._filenode
958 path = self._path
979 path = self._path
959 for a in iteranc:
980 for a in iteranc:
960 if stoprev is not None and a < stoprev:
981 if stoprev is not None and a < stoprev:
961 return None
982 return None
962 ac = cl.read(a) # get changeset data (we avoid object creation)
983 ac = cl.read(a) # get changeset data (we avoid object creation)
963 if path in ac[3]: # checking the 'files' field.
984 if path in ac[3]: # checking the 'files' field.
964 # The file has been touched, check if the content is
985 # The file has been touched, check if the content is
965 # similar to the one we search for.
986 # similar to the one we search for.
966 if fnode == mfl[ac[0]].readfast().get(path):
987 if fnode == mfl[ac[0]].readfast().get(path):
967 return a
988 return a
968 # In theory, we should never get out of that loop without a result.
989 # In theory, we should never get out of that loop without a result.
969 # But if manifest uses a buggy file revision (not children of the
990 # But if manifest uses a buggy file revision (not children of the
970 # one it replaces) we could. Such a buggy situation will likely
991 # one it replaces) we could. Such a buggy situation will likely
971 # result is crash somewhere else at to some point.
992 # result is crash somewhere else at to some point.
972 return lkr
993 return lkr
973
994
974 def isintroducedafter(self, changelogrev):
995 def isintroducedafter(self, changelogrev):
975 """True if a filectx has been introduced after a given floor revision
996 """True if a filectx has been introduced after a given floor revision
976 """
997 """
977 if self.linkrev() >= changelogrev:
998 if self.linkrev() >= changelogrev:
978 return True
999 return True
979 introrev = self._introrev(stoprev=changelogrev)
1000 introrev = self._introrev(stoprev=changelogrev)
980 if introrev is None:
1001 if introrev is None:
981 return False
1002 return False
982 return introrev >= changelogrev
1003 return introrev >= changelogrev
983
1004
984 def introrev(self):
1005 def introrev(self):
985 """return the rev of the changeset which introduced this file revision
1006 """return the rev of the changeset which introduced this file revision
986
1007
987 This method is different from linkrev because it take into account the
1008 This method is different from linkrev because it take into account the
988 changeset the filectx was created from. It ensures the returned
1009 changeset the filectx was created from. It ensures the returned
989 revision is one of its ancestors. This prevents bugs from
1010 revision is one of its ancestors. This prevents bugs from
990 'linkrev-shadowing' when a file revision is used by multiple
1011 'linkrev-shadowing' when a file revision is used by multiple
991 changesets.
1012 changesets.
992 """
1013 """
993 return self._introrev()
1014 return self._introrev()
994
1015
995 def _introrev(self, stoprev=None):
1016 def _introrev(self, stoprev=None):
996 """
1017 """
997 Same as `introrev` but, with an extra argument to limit changelog
1018 Same as `introrev` but, with an extra argument to limit changelog
998 iteration range in some internal usecase.
1019 iteration range in some internal usecase.
999
1020
1000 If `stoprev` is set, the `introrev` will not be searched past that
1021 If `stoprev` is set, the `introrev` will not be searched past that
1001 `stoprev` revision and "None" might be returned. This is useful to
1022 `stoprev` revision and "None" might be returned. This is useful to
1002 limit the iteration range.
1023 limit the iteration range.
1003 """
1024 """
1004 toprev = None
1025 toprev = None
1005 attrs = vars(self)
1026 attrs = vars(self)
1006 if r'_changeid' in attrs:
1027 if r'_changeid' in attrs:
1007 # We have a cached value already
1028 # We have a cached value already
1008 toprev = self._changeid
1029 toprev = self._changeid
1009 elif r'_changectx' in attrs:
1030 elif r'_changectx' in attrs:
1010 # We know which changelog entry we are coming from
1031 # We know which changelog entry we are coming from
1011 toprev = self._changectx.rev()
1032 toprev = self._changectx.rev()
1012
1033
1013 if toprev is not None:
1034 if toprev is not None:
1014 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1035 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1015 elif r'_descendantrev' in attrs:
1036 elif r'_descendantrev' in attrs:
1016 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1037 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1017 # be nice and cache the result of the computation
1038 # be nice and cache the result of the computation
1018 if introrev is not None:
1039 if introrev is not None:
1019 self._changeid = introrev
1040 self._changeid = introrev
1020 return introrev
1041 return introrev
1021 else:
1042 else:
1022 return self.linkrev()
1043 return self.linkrev()
1023
1044
1024 def introfilectx(self):
1045 def introfilectx(self):
1025 """Return filectx having identical contents, but pointing to the
1046 """Return filectx having identical contents, but pointing to the
1026 changeset revision where this filectx was introduced"""
1047 changeset revision where this filectx was introduced"""
1027 introrev = self.introrev()
1048 introrev = self.introrev()
1028 if self.rev() == introrev:
1049 if self.rev() == introrev:
1029 return self
1050 return self
1030 return self.filectx(self.filenode(), changeid=introrev)
1051 return self.filectx(self.filenode(), changeid=introrev)
1031
1052
1032 def _parentfilectx(self, path, fileid, filelog):
1053 def _parentfilectx(self, path, fileid, filelog):
1033 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1054 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1034 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1055 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1035 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
1056 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
1036 # If self is associated with a changeset (probably explicitly
1057 # If self is associated with a changeset (probably explicitly
1037 # fed), ensure the created filectx is associated with a
1058 # fed), ensure the created filectx is associated with a
1038 # changeset that is an ancestor of self.changectx.
1059 # changeset that is an ancestor of self.changectx.
1039 # This lets us later use _adjustlinkrev to get a correct link.
1060 # This lets us later use _adjustlinkrev to get a correct link.
1040 fctx._descendantrev = self.rev()
1061 fctx._descendantrev = self.rev()
1041 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1062 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1042 elif r'_descendantrev' in vars(self):
1063 elif r'_descendantrev' in vars(self):
1043 # Otherwise propagate _descendantrev if we have one associated.
1064 # Otherwise propagate _descendantrev if we have one associated.
1044 fctx._descendantrev = self._descendantrev
1065 fctx._descendantrev = self._descendantrev
1045 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1066 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1046 return fctx
1067 return fctx
1047
1068
1048 def parents(self):
1069 def parents(self):
1049 _path = self._path
1070 _path = self._path
1050 fl = self._filelog
1071 fl = self._filelog
1051 parents = self._filelog.parents(self._filenode)
1072 parents = self._filelog.parents(self._filenode)
1052 pl = [(_path, node, fl) for node in parents if node != nullid]
1073 pl = [(_path, node, fl) for node in parents if node != nullid]
1053
1074
1054 r = fl.renamed(self._filenode)
1075 r = fl.renamed(self._filenode)
1055 if r:
1076 if r:
1056 # - In the simple rename case, both parent are nullid, pl is empty.
1077 # - In the simple rename case, both parent are nullid, pl is empty.
1057 # - In case of merge, only one of the parent is null id and should
1078 # - In case of merge, only one of the parent is null id and should
1058 # be replaced with the rename information. This parent is -always-
1079 # be replaced with the rename information. This parent is -always-
1059 # the first one.
1080 # the first one.
1060 #
1081 #
1061 # As null id have always been filtered out in the previous list
1082 # As null id have always been filtered out in the previous list
1062 # comprehension, inserting to 0 will always result in "replacing
1083 # comprehension, inserting to 0 will always result in "replacing
1063 # first nullid parent with rename information.
1084 # first nullid parent with rename information.
1064 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1085 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1065
1086
1066 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1087 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1067
1088
1068 def p1(self):
1089 def p1(self):
1069 return self.parents()[0]
1090 return self.parents()[0]
1070
1091
1071 def p2(self):
1092 def p2(self):
1072 p = self.parents()
1093 p = self.parents()
1073 if len(p) == 2:
1094 if len(p) == 2:
1074 return p[1]
1095 return p[1]
1075 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1096 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1076
1097
1077 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1098 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1078 """Returns a list of annotateline objects for each line in the file
1099 """Returns a list of annotateline objects for each line in the file
1079
1100
1080 - line.fctx is the filectx of the node where that line was last changed
1101 - line.fctx is the filectx of the node where that line was last changed
1081 - line.lineno is the line number at the first appearance in the managed
1102 - line.lineno is the line number at the first appearance in the managed
1082 file
1103 file
1083 - line.text is the data on that line (including newline character)
1104 - line.text is the data on that line (including newline character)
1084 """
1105 """
1085 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1106 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1086
1107
1087 def parents(f):
1108 def parents(f):
1088 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1109 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1089 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1110 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1090 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1111 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1091 # isn't an ancestor of the srcrev.
1112 # isn't an ancestor of the srcrev.
1092 f._changeid
1113 f._changeid
1093 pl = f.parents()
1114 pl = f.parents()
1094
1115
1095 # Don't return renamed parents if we aren't following.
1116 # Don't return renamed parents if we aren't following.
1096 if not follow:
1117 if not follow:
1097 pl = [p for p in pl if p.path() == f.path()]
1118 pl = [p for p in pl if p.path() == f.path()]
1098
1119
1099 # renamed filectx won't have a filelog yet, so set it
1120 # renamed filectx won't have a filelog yet, so set it
1100 # from the cache to save time
1121 # from the cache to save time
1101 for p in pl:
1122 for p in pl:
1102 if not r'_filelog' in p.__dict__:
1123 if not r'_filelog' in p.__dict__:
1103 p._filelog = getlog(p.path())
1124 p._filelog = getlog(p.path())
1104
1125
1105 return pl
1126 return pl
1106
1127
1107 # use linkrev to find the first changeset where self appeared
1128 # use linkrev to find the first changeset where self appeared
1108 base = self.introfilectx()
1129 base = self.introfilectx()
1109 if getattr(base, '_ancestrycontext', None) is None:
1130 if getattr(base, '_ancestrycontext', None) is None:
1110 cl = self._repo.changelog
1131 cl = self._repo.changelog
1111 if base.rev() is None:
1132 if base.rev() is None:
1112 # wctx is not inclusive, but works because _ancestrycontext
1133 # wctx is not inclusive, but works because _ancestrycontext
1113 # is used to test filelog revisions
1134 # is used to test filelog revisions
1114 ac = cl.ancestors(
1135 ac = cl.ancestors(
1115 [p.rev() for p in base.parents()], inclusive=True
1136 [p.rev() for p in base.parents()], inclusive=True
1116 )
1137 )
1117 else:
1138 else:
1118 ac = cl.ancestors([base.rev()], inclusive=True)
1139 ac = cl.ancestors([base.rev()], inclusive=True)
1119 base._ancestrycontext = ac
1140 base._ancestrycontext = ac
1120
1141
1121 return dagop.annotate(
1142 return dagop.annotate(
1122 base, parents, skiprevs=skiprevs, diffopts=diffopts
1143 base, parents, skiprevs=skiprevs, diffopts=diffopts
1123 )
1144 )
1124
1145
1125 def ancestors(self, followfirst=False):
1146 def ancestors(self, followfirst=False):
1126 visit = {}
1147 visit = {}
1127 c = self
1148 c = self
1128 if followfirst:
1149 if followfirst:
1129 cut = 1
1150 cut = 1
1130 else:
1151 else:
1131 cut = None
1152 cut = None
1132
1153
1133 while True:
1154 while True:
1134 for parent in c.parents()[:cut]:
1155 for parent in c.parents()[:cut]:
1135 visit[(parent.linkrev(), parent.filenode())] = parent
1156 visit[(parent.linkrev(), parent.filenode())] = parent
1136 if not visit:
1157 if not visit:
1137 break
1158 break
1138 c = visit.pop(max(visit))
1159 c = visit.pop(max(visit))
1139 yield c
1160 yield c
1140
1161
1141 def decodeddata(self):
1162 def decodeddata(self):
1142 """Returns `data()` after running repository decoding filters.
1163 """Returns `data()` after running repository decoding filters.
1143
1164
1144 This is often equivalent to how the data would be expressed on disk.
1165 This is often equivalent to how the data would be expressed on disk.
1145 """
1166 """
1146 return self._repo.wwritedata(self.path(), self.data())
1167 return self._repo.wwritedata(self.path(), self.data())
1147
1168
1148
1169
1149 class filectx(basefilectx):
1170 class filectx(basefilectx):
1150 """A filecontext object makes access to data related to a particular
1171 """A filecontext object makes access to data related to a particular
1151 filerevision convenient."""
1172 filerevision convenient."""
1152
1173
1153 def __init__(
1174 def __init__(
1154 self,
1175 self,
1155 repo,
1176 repo,
1156 path,
1177 path,
1157 changeid=None,
1178 changeid=None,
1158 fileid=None,
1179 fileid=None,
1159 filelog=None,
1180 filelog=None,
1160 changectx=None,
1181 changectx=None,
1161 ):
1182 ):
1162 """changeid must be a revision number, if specified.
1183 """changeid must be a revision number, if specified.
1163 fileid can be a file revision or node."""
1184 fileid can be a file revision or node."""
1164 self._repo = repo
1185 self._repo = repo
1165 self._path = path
1186 self._path = path
1166
1187
1167 assert (
1188 assert (
1168 changeid is not None or fileid is not None or changectx is not None
1189 changeid is not None or fileid is not None or changectx is not None
1169 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1190 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1170 changeid,
1191 changeid,
1171 fileid,
1192 fileid,
1172 changectx,
1193 changectx,
1173 )
1194 )
1174
1195
1175 if filelog is not None:
1196 if filelog is not None:
1176 self._filelog = filelog
1197 self._filelog = filelog
1177
1198
1178 if changeid is not None:
1199 if changeid is not None:
1179 self._changeid = changeid
1200 self._changeid = changeid
1180 if changectx is not None:
1201 if changectx is not None:
1181 self._changectx = changectx
1202 self._changectx = changectx
1182 if fileid is not None:
1203 if fileid is not None:
1183 self._fileid = fileid
1204 self._fileid = fileid
1184
1205
1185 @propertycache
1206 @propertycache
1186 def _changectx(self):
1207 def _changectx(self):
1187 try:
1208 try:
1188 return self._repo[self._changeid]
1209 return self._repo[self._changeid]
1189 except error.FilteredRepoLookupError:
1210 except error.FilteredRepoLookupError:
1190 # Linkrev may point to any revision in the repository. When the
1211 # Linkrev may point to any revision in the repository. When the
1191 # repository is filtered this may lead to `filectx` trying to build
1212 # repository is filtered this may lead to `filectx` trying to build
1192 # `changectx` for filtered revision. In such case we fallback to
1213 # `changectx` for filtered revision. In such case we fallback to
1193 # creating `changectx` on the unfiltered version of the reposition.
1214 # creating `changectx` on the unfiltered version of the reposition.
1194 # This fallback should not be an issue because `changectx` from
1215 # This fallback should not be an issue because `changectx` from
1195 # `filectx` are not used in complex operations that care about
1216 # `filectx` are not used in complex operations that care about
1196 # filtering.
1217 # filtering.
1197 #
1218 #
1198 # This fallback is a cheap and dirty fix that prevent several
1219 # This fallback is a cheap and dirty fix that prevent several
1199 # crashes. It does not ensure the behavior is correct. However the
1220 # crashes. It does not ensure the behavior is correct. However the
1200 # behavior was not correct before filtering either and "incorrect
1221 # behavior was not correct before filtering either and "incorrect
1201 # behavior" is seen as better as "crash"
1222 # behavior" is seen as better as "crash"
1202 #
1223 #
1203 # Linkrevs have several serious troubles with filtering that are
1224 # Linkrevs have several serious troubles with filtering that are
1204 # complicated to solve. Proper handling of the issue here should be
1225 # complicated to solve. Proper handling of the issue here should be
1205 # considered when solving linkrev issue are on the table.
1226 # considered when solving linkrev issue are on the table.
1206 return self._repo.unfiltered()[self._changeid]
1227 return self._repo.unfiltered()[self._changeid]
1207
1228
1208 def filectx(self, fileid, changeid=None):
1229 def filectx(self, fileid, changeid=None):
1209 '''opens an arbitrary revision of the file without
1230 '''opens an arbitrary revision of the file without
1210 opening a new filelog'''
1231 opening a new filelog'''
1211 return filectx(
1232 return filectx(
1212 self._repo,
1233 self._repo,
1213 self._path,
1234 self._path,
1214 fileid=fileid,
1235 fileid=fileid,
1215 filelog=self._filelog,
1236 filelog=self._filelog,
1216 changeid=changeid,
1237 changeid=changeid,
1217 )
1238 )
1218
1239
1219 def rawdata(self):
1240 def rawdata(self):
1220 return self._filelog.rawdata(self._filenode)
1241 return self._filelog.rawdata(self._filenode)
1221
1242
1222 def rawflags(self):
1243 def rawflags(self):
1223 """low-level revlog flags"""
1244 """low-level revlog flags"""
1224 return self._filelog.flags(self._filerev)
1245 return self._filelog.flags(self._filerev)
1225
1246
1226 def data(self):
1247 def data(self):
1227 try:
1248 try:
1228 return self._filelog.read(self._filenode)
1249 return self._filelog.read(self._filenode)
1229 except error.CensoredNodeError:
1250 except error.CensoredNodeError:
1230 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1251 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1231 return b""
1252 return b""
1232 raise error.Abort(
1253 raise error.Abort(
1233 _(b"censored node: %s") % short(self._filenode),
1254 _(b"censored node: %s") % short(self._filenode),
1234 hint=_(b"set censor.policy to ignore errors"),
1255 hint=_(b"set censor.policy to ignore errors"),
1235 )
1256 )
1236
1257
1237 def size(self):
1258 def size(self):
1238 return self._filelog.size(self._filerev)
1259 return self._filelog.size(self._filerev)
1239
1260
1240 @propertycache
1261 @propertycache
1241 def _copied(self):
1262 def _copied(self):
1242 """check if file was actually renamed in this changeset revision
1263 """check if file was actually renamed in this changeset revision
1243
1264
1244 If rename logged in file revision, we report copy for changeset only
1265 If rename logged in file revision, we report copy for changeset only
1245 if file revisions linkrev points back to the changeset in question
1266 if file revisions linkrev points back to the changeset in question
1246 or both changeset parents contain different file revisions.
1267 or both changeset parents contain different file revisions.
1247 """
1268 """
1248
1269
1249 renamed = self._filelog.renamed(self._filenode)
1270 renamed = self._filelog.renamed(self._filenode)
1250 if not renamed:
1271 if not renamed:
1251 return None
1272 return None
1252
1273
1253 if self.rev() == self.linkrev():
1274 if self.rev() == self.linkrev():
1254 return renamed
1275 return renamed
1255
1276
1256 name = self.path()
1277 name = self.path()
1257 fnode = self._filenode
1278 fnode = self._filenode
1258 for p in self._changectx.parents():
1279 for p in self._changectx.parents():
1259 try:
1280 try:
1260 if fnode == p.filenode(name):
1281 if fnode == p.filenode(name):
1261 return None
1282 return None
1262 except error.LookupError:
1283 except error.LookupError:
1263 pass
1284 pass
1264 return renamed
1285 return renamed
1265
1286
1266 def children(self):
1287 def children(self):
1267 # hard for renames
1288 # hard for renames
1268 c = self._filelog.children(self._filenode)
1289 c = self._filelog.children(self._filenode)
1269 return [
1290 return [
1270 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1291 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1271 for x in c
1292 for x in c
1272 ]
1293 ]
1273
1294
1274
1295
1275 class committablectx(basectx):
1296 class committablectx(basectx):
1276 """A committablectx object provides common functionality for a context that
1297 """A committablectx object provides common functionality for a context that
1277 wants the ability to commit, e.g. workingctx or memctx."""
1298 wants the ability to commit, e.g. workingctx or memctx."""
1278
1299
1279 def __init__(
1300 def __init__(
1280 self,
1301 self,
1281 repo,
1302 repo,
1282 text=b"",
1303 text=b"",
1283 user=None,
1304 user=None,
1284 date=None,
1305 date=None,
1285 extra=None,
1306 extra=None,
1286 changes=None,
1307 changes=None,
1287 branch=None,
1308 branch=None,
1288 ):
1309 ):
1289 super(committablectx, self).__init__(repo)
1310 super(committablectx, self).__init__(repo)
1290 self._rev = None
1311 self._rev = None
1291 self._node = None
1312 self._node = None
1292 self._text = text
1313 self._text = text
1293 if date:
1314 if date:
1294 self._date = dateutil.parsedate(date)
1315 self._date = dateutil.parsedate(date)
1295 if user:
1316 if user:
1296 self._user = user
1317 self._user = user
1297 if changes:
1318 if changes:
1298 self._status = changes
1319 self._status = changes
1299
1320
1300 self._extra = {}
1321 self._extra = {}
1301 if extra:
1322 if extra:
1302 self._extra = extra.copy()
1323 self._extra = extra.copy()
1303 if branch is not None:
1324 if branch is not None:
1304 self._extra[b'branch'] = encoding.fromlocal(branch)
1325 self._extra[b'branch'] = encoding.fromlocal(branch)
1305 if not self._extra.get(b'branch'):
1326 if not self._extra.get(b'branch'):
1306 self._extra[b'branch'] = b'default'
1327 self._extra[b'branch'] = b'default'
1307
1328
1308 def __bytes__(self):
1329 def __bytes__(self):
1309 return bytes(self._parents[0]) + b"+"
1330 return bytes(self._parents[0]) + b"+"
1310
1331
1311 __str__ = encoding.strmethod(__bytes__)
1332 __str__ = encoding.strmethod(__bytes__)
1312
1333
1313 def __nonzero__(self):
1334 def __nonzero__(self):
1314 return True
1335 return True
1315
1336
1316 __bool__ = __nonzero__
1337 __bool__ = __nonzero__
1317
1338
1318 @propertycache
1339 @propertycache
1319 def _status(self):
1340 def _status(self):
1320 return self._repo.status()
1341 return self._repo.status()
1321
1342
1322 @propertycache
1343 @propertycache
1323 def _user(self):
1344 def _user(self):
1324 return self._repo.ui.username()
1345 return self._repo.ui.username()
1325
1346
1326 @propertycache
1347 @propertycache
1327 def _date(self):
1348 def _date(self):
1328 ui = self._repo.ui
1349 ui = self._repo.ui
1329 date = ui.configdate(b'devel', b'default-date')
1350 date = ui.configdate(b'devel', b'default-date')
1330 if date is None:
1351 if date is None:
1331 date = dateutil.makedate()
1352 date = dateutil.makedate()
1332 return date
1353 return date
1333
1354
1334 def subrev(self, subpath):
1355 def subrev(self, subpath):
1335 return None
1356 return None
1336
1357
1337 def manifestnode(self):
1358 def manifestnode(self):
1338 return None
1359 return None
1339
1360
1340 def user(self):
1361 def user(self):
1341 return self._user or self._repo.ui.username()
1362 return self._user or self._repo.ui.username()
1342
1363
1343 def date(self):
1364 def date(self):
1344 return self._date
1365 return self._date
1345
1366
1346 def description(self):
1367 def description(self):
1347 return self._text
1368 return self._text
1348
1369
1349 def files(self):
1370 def files(self):
1350 return sorted(
1371 return sorted(
1351 self._status.modified + self._status.added + self._status.removed
1372 self._status.modified + self._status.added + self._status.removed
1352 )
1373 )
1353
1374
1354 def modified(self):
1375 def modified(self):
1355 return self._status.modified
1376 return self._status.modified
1356
1377
1357 def added(self):
1378 def added(self):
1358 return self._status.added
1379 return self._status.added
1359
1380
1360 def removed(self):
1381 def removed(self):
1361 return self._status.removed
1382 return self._status.removed
1362
1383
1363 def deleted(self):
1384 def deleted(self):
1364 return self._status.deleted
1385 return self._status.deleted
1365
1386
1366 filesmodified = modified
1387 filesmodified = modified
1367 filesadded = added
1388 filesadded = added
1368 filesremoved = removed
1389 filesremoved = removed
1369
1390
1370 def branch(self):
1391 def branch(self):
1371 return encoding.tolocal(self._extra[b'branch'])
1392 return encoding.tolocal(self._extra[b'branch'])
1372
1393
1373 def closesbranch(self):
1394 def closesbranch(self):
1374 return b'close' in self._extra
1395 return b'close' in self._extra
1375
1396
1376 def extra(self):
1397 def extra(self):
1377 return self._extra
1398 return self._extra
1378
1399
1379 def isinmemory(self):
1400 def isinmemory(self):
1380 return False
1401 return False
1381
1402
1382 def tags(self):
1403 def tags(self):
1383 return []
1404 return []
1384
1405
1385 def bookmarks(self):
1406 def bookmarks(self):
1386 b = []
1407 b = []
1387 for p in self.parents():
1408 for p in self.parents():
1388 b.extend(p.bookmarks())
1409 b.extend(p.bookmarks())
1389 return b
1410 return b
1390
1411
1391 def phase(self):
1412 def phase(self):
1392 phase = phases.draft # default phase to draft
1413 phase = phases.draft # default phase to draft
1393 for p in self.parents():
1414 for p in self.parents():
1394 phase = max(phase, p.phase())
1415 phase = max(phase, p.phase())
1395 return phase
1416 return phase
1396
1417
1397 def hidden(self):
1418 def hidden(self):
1398 return False
1419 return False
1399
1420
1400 def children(self):
1421 def children(self):
1401 return []
1422 return []
1402
1423
1403 def ancestor(self, c2):
1424 def ancestor(self, c2):
1404 """return the "best" ancestor context of self and c2"""
1425 """return the "best" ancestor context of self and c2"""
1405 return self._parents[0].ancestor(c2) # punt on two parents for now
1426 return self._parents[0].ancestor(c2) # punt on two parents for now
1406
1427
1407 def ancestors(self):
1428 def ancestors(self):
1408 for p in self._parents:
1429 for p in self._parents:
1409 yield p
1430 yield p
1410 for a in self._repo.changelog.ancestors(
1431 for a in self._repo.changelog.ancestors(
1411 [p.rev() for p in self._parents]
1432 [p.rev() for p in self._parents]
1412 ):
1433 ):
1413 yield self._repo[a]
1434 yield self._repo[a]
1414
1435
1415 def markcommitted(self, node):
1436 def markcommitted(self, node):
1416 """Perform post-commit cleanup necessary after committing this ctx
1437 """Perform post-commit cleanup necessary after committing this ctx
1417
1438
1418 Specifically, this updates backing stores this working context
1439 Specifically, this updates backing stores this working context
1419 wraps to reflect the fact that the changes reflected by this
1440 wraps to reflect the fact that the changes reflected by this
1420 workingctx have been committed. For example, it marks
1441 workingctx have been committed. For example, it marks
1421 modified and added files as normal in the dirstate.
1442 modified and added files as normal in the dirstate.
1422
1443
1423 """
1444 """
1424
1445
1425 def dirty(self, missing=False, merge=True, branch=True):
1446 def dirty(self, missing=False, merge=True, branch=True):
1426 return False
1447 return False
1427
1448
1428
1449
1429 class workingctx(committablectx):
1450 class workingctx(committablectx):
1430 """A workingctx object makes access to data related to
1451 """A workingctx object makes access to data related to
1431 the current working directory convenient.
1452 the current working directory convenient.
1432 date - any valid date string or (unixtime, offset), or None.
1453 date - any valid date string or (unixtime, offset), or None.
1433 user - username string, or None.
1454 user - username string, or None.
1434 extra - a dictionary of extra values, or None.
1455 extra - a dictionary of extra values, or None.
1435 changes - a list of file lists as returned by localrepo.status()
1456 changes - a list of file lists as returned by localrepo.status()
1436 or None to use the repository status.
1457 or None to use the repository status.
1437 """
1458 """
1438
1459
1439 def __init__(
1460 def __init__(
1440 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1461 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1441 ):
1462 ):
1442 branch = None
1463 branch = None
1443 if not extra or b'branch' not in extra:
1464 if not extra or b'branch' not in extra:
1444 try:
1465 try:
1445 branch = repo.dirstate.branch()
1466 branch = repo.dirstate.branch()
1446 except UnicodeDecodeError:
1467 except UnicodeDecodeError:
1447 raise error.Abort(_(b'branch name not in UTF-8!'))
1468 raise error.Abort(_(b'branch name not in UTF-8!'))
1448 super(workingctx, self).__init__(
1469 super(workingctx, self).__init__(
1449 repo, text, user, date, extra, changes, branch=branch
1470 repo, text, user, date, extra, changes, branch=branch
1450 )
1471 )
1451
1472
1452 def __iter__(self):
1473 def __iter__(self):
1453 d = self._repo.dirstate
1474 d = self._repo.dirstate
1454 for f in d:
1475 for f in d:
1455 if d[f] != b'r':
1476 if d[f] != b'r':
1456 yield f
1477 yield f
1457
1478
1458 def __contains__(self, key):
1479 def __contains__(self, key):
1459 return self._repo.dirstate[key] not in b"?r"
1480 return self._repo.dirstate[key] not in b"?r"
1460
1481
1461 def hex(self):
1482 def hex(self):
1462 return wdirhex
1483 return wdirhex
1463
1484
1464 @propertycache
1485 @propertycache
1465 def _parents(self):
1486 def _parents(self):
1466 p = self._repo.dirstate.parents()
1487 p = self._repo.dirstate.parents()
1467 if p[1] == nullid:
1488 if p[1] == nullid:
1468 p = p[:-1]
1489 p = p[:-1]
1469 # use unfiltered repo to delay/avoid loading obsmarkers
1490 # use unfiltered repo to delay/avoid loading obsmarkers
1470 unfi = self._repo.unfiltered()
1491 unfi = self._repo.unfiltered()
1471 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1492 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1472
1493
1473 def _fileinfo(self, path):
1494 def _fileinfo(self, path):
1474 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1495 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1475 self._manifest
1496 self._manifest
1476 return super(workingctx, self)._fileinfo(path)
1497 return super(workingctx, self)._fileinfo(path)
1477
1498
1478 def _buildflagfunc(self):
1499 def _buildflagfunc(self):
1479 # Create a fallback function for getting file flags when the
1500 # Create a fallback function for getting file flags when the
1480 # filesystem doesn't support them
1501 # filesystem doesn't support them
1481
1502
1482 copiesget = self._repo.dirstate.copies().get
1503 copiesget = self._repo.dirstate.copies().get
1483 parents = self.parents()
1504 parents = self.parents()
1484 if len(parents) < 2:
1505 if len(parents) < 2:
1485 # when we have one parent, it's easy: copy from parent
1506 # when we have one parent, it's easy: copy from parent
1486 man = parents[0].manifest()
1507 man = parents[0].manifest()
1487
1508
1488 def func(f):
1509 def func(f):
1489 f = copiesget(f, f)
1510 f = copiesget(f, f)
1490 return man.flags(f)
1511 return man.flags(f)
1491
1512
1492 else:
1513 else:
1493 # merges are tricky: we try to reconstruct the unstored
1514 # merges are tricky: we try to reconstruct the unstored
1494 # result from the merge (issue1802)
1515 # result from the merge (issue1802)
1495 p1, p2 = parents
1516 p1, p2 = parents
1496 pa = p1.ancestor(p2)
1517 pa = p1.ancestor(p2)
1497 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1518 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1498
1519
1499 def func(f):
1520 def func(f):
1500 f = copiesget(f, f) # may be wrong for merges with copies
1521 f = copiesget(f, f) # may be wrong for merges with copies
1501 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1522 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1502 if fl1 == fl2:
1523 if fl1 == fl2:
1503 return fl1
1524 return fl1
1504 if fl1 == fla:
1525 if fl1 == fla:
1505 return fl2
1526 return fl2
1506 if fl2 == fla:
1527 if fl2 == fla:
1507 return fl1
1528 return fl1
1508 return b'' # punt for conflicts
1529 return b'' # punt for conflicts
1509
1530
1510 return func
1531 return func
1511
1532
1512 @propertycache
1533 @propertycache
1513 def _flagfunc(self):
1534 def _flagfunc(self):
1514 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1535 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1515
1536
1516 def flags(self, path):
1537 def flags(self, path):
1517 if r'_manifest' in self.__dict__:
1538 if r'_manifest' in self.__dict__:
1518 try:
1539 try:
1519 return self._manifest.flags(path)
1540 return self._manifest.flags(path)
1520 except KeyError:
1541 except KeyError:
1521 return b''
1542 return b''
1522
1543
1523 try:
1544 try:
1524 return self._flagfunc(path)
1545 return self._flagfunc(path)
1525 except OSError:
1546 except OSError:
1526 return b''
1547 return b''
1527
1548
1528 def filectx(self, path, filelog=None):
1549 def filectx(self, path, filelog=None):
1529 """get a file context from the working directory"""
1550 """get a file context from the working directory"""
1530 return workingfilectx(
1551 return workingfilectx(
1531 self._repo, path, workingctx=self, filelog=filelog
1552 self._repo, path, workingctx=self, filelog=filelog
1532 )
1553 )
1533
1554
1534 def dirty(self, missing=False, merge=True, branch=True):
1555 def dirty(self, missing=False, merge=True, branch=True):
1535 b"check whether a working directory is modified"
1556 b"check whether a working directory is modified"
1536 # check subrepos first
1557 # check subrepos first
1537 for s in sorted(self.substate):
1558 for s in sorted(self.substate):
1538 if self.sub(s).dirty(missing=missing):
1559 if self.sub(s).dirty(missing=missing):
1539 return True
1560 return True
1540 # check current working dir
1561 # check current working dir
1541 return (
1562 return (
1542 (merge and self.p2())
1563 (merge and self.p2())
1543 or (branch and self.branch() != self.p1().branch())
1564 or (branch and self.branch() != self.p1().branch())
1544 or self.modified()
1565 or self.modified()
1545 or self.added()
1566 or self.added()
1546 or self.removed()
1567 or self.removed()
1547 or (missing and self.deleted())
1568 or (missing and self.deleted())
1548 )
1569 )
1549
1570
1550 def add(self, list, prefix=b""):
1571 def add(self, list, prefix=b""):
1551 with self._repo.wlock():
1572 with self._repo.wlock():
1552 ui, ds = self._repo.ui, self._repo.dirstate
1573 ui, ds = self._repo.ui, self._repo.dirstate
1553 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1574 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1554 rejected = []
1575 rejected = []
1555 lstat = self._repo.wvfs.lstat
1576 lstat = self._repo.wvfs.lstat
1556 for f in list:
1577 for f in list:
1557 # ds.pathto() returns an absolute file when this is invoked from
1578 # ds.pathto() returns an absolute file when this is invoked from
1558 # the keyword extension. That gets flagged as non-portable on
1579 # the keyword extension. That gets flagged as non-portable on
1559 # Windows, since it contains the drive letter and colon.
1580 # Windows, since it contains the drive letter and colon.
1560 scmutil.checkportable(ui, os.path.join(prefix, f))
1581 scmutil.checkportable(ui, os.path.join(prefix, f))
1561 try:
1582 try:
1562 st = lstat(f)
1583 st = lstat(f)
1563 except OSError:
1584 except OSError:
1564 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1585 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1565 rejected.append(f)
1586 rejected.append(f)
1566 continue
1587 continue
1567 limit = ui.configbytes(b'ui', b'large-file-limit')
1588 limit = ui.configbytes(b'ui', b'large-file-limit')
1568 if limit != 0 and st.st_size > limit:
1589 if limit != 0 and st.st_size > limit:
1569 ui.warn(
1590 ui.warn(
1570 _(
1591 _(
1571 b"%s: up to %d MB of RAM may be required "
1592 b"%s: up to %d MB of RAM may be required "
1572 b"to manage this file\n"
1593 b"to manage this file\n"
1573 b"(use 'hg revert %s' to cancel the "
1594 b"(use 'hg revert %s' to cancel the "
1574 b"pending addition)\n"
1595 b"pending addition)\n"
1575 )
1596 )
1576 % (f, 3 * st.st_size // 1000000, uipath(f))
1597 % (f, 3 * st.st_size // 1000000, uipath(f))
1577 )
1598 )
1578 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1599 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1579 ui.warn(
1600 ui.warn(
1580 _(
1601 _(
1581 b"%s not added: only files and symlinks "
1602 b"%s not added: only files and symlinks "
1582 b"supported currently\n"
1603 b"supported currently\n"
1583 )
1604 )
1584 % uipath(f)
1605 % uipath(f)
1585 )
1606 )
1586 rejected.append(f)
1607 rejected.append(f)
1587 elif ds[f] in b'amn':
1608 elif ds[f] in b'amn':
1588 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1609 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1589 elif ds[f] == b'r':
1610 elif ds[f] == b'r':
1590 ds.normallookup(f)
1611 ds.normallookup(f)
1591 else:
1612 else:
1592 ds.add(f)
1613 ds.add(f)
1593 return rejected
1614 return rejected
1594
1615
1595 def forget(self, files, prefix=b""):
1616 def forget(self, files, prefix=b""):
1596 with self._repo.wlock():
1617 with self._repo.wlock():
1597 ds = self._repo.dirstate
1618 ds = self._repo.dirstate
1598 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1619 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1599 rejected = []
1620 rejected = []
1600 for f in files:
1621 for f in files:
1601 if f not in ds:
1622 if f not in ds:
1602 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1623 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1603 rejected.append(f)
1624 rejected.append(f)
1604 elif ds[f] != b'a':
1625 elif ds[f] != b'a':
1605 ds.remove(f)
1626 ds.remove(f)
1606 else:
1627 else:
1607 ds.drop(f)
1628 ds.drop(f)
1608 return rejected
1629 return rejected
1609
1630
1610 def copy(self, source, dest):
1631 def copy(self, source, dest):
1611 try:
1632 try:
1612 st = self._repo.wvfs.lstat(dest)
1633 st = self._repo.wvfs.lstat(dest)
1613 except OSError as err:
1634 except OSError as err:
1614 if err.errno != errno.ENOENT:
1635 if err.errno != errno.ENOENT:
1615 raise
1636 raise
1616 self._repo.ui.warn(
1637 self._repo.ui.warn(
1617 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1638 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1618 )
1639 )
1619 return
1640 return
1620 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1621 self._repo.ui.warn(
1642 self._repo.ui.warn(
1622 _(b"copy failed: %s is not a file or a symbolic link\n")
1643 _(b"copy failed: %s is not a file or a symbolic link\n")
1623 % self._repo.dirstate.pathto(dest)
1644 % self._repo.dirstate.pathto(dest)
1624 )
1645 )
1625 else:
1646 else:
1626 with self._repo.wlock():
1647 with self._repo.wlock():
1627 ds = self._repo.dirstate
1648 ds = self._repo.dirstate
1628 if ds[dest] in b'?':
1649 if ds[dest] in b'?':
1629 ds.add(dest)
1650 ds.add(dest)
1630 elif ds[dest] in b'r':
1651 elif ds[dest] in b'r':
1631 ds.normallookup(dest)
1652 ds.normallookup(dest)
1632 ds.copy(source, dest)
1653 ds.copy(source, dest)
1633
1654
1634 def match(
1655 def match(
1635 self,
1656 self,
1636 pats=None,
1657 pats=None,
1637 include=None,
1658 include=None,
1638 exclude=None,
1659 exclude=None,
1639 default=b'glob',
1660 default=b'glob',
1640 listsubrepos=False,
1661 listsubrepos=False,
1641 badfn=None,
1662 badfn=None,
1642 ):
1663 ):
1643 r = self._repo
1664 r = self._repo
1644
1665
1645 # Only a case insensitive filesystem needs magic to translate user input
1666 # Only a case insensitive filesystem needs magic to translate user input
1646 # to actual case in the filesystem.
1667 # to actual case in the filesystem.
1647 icasefs = not util.fscasesensitive(r.root)
1668 icasefs = not util.fscasesensitive(r.root)
1648 return matchmod.match(
1669 return matchmod.match(
1649 r.root,
1670 r.root,
1650 r.getcwd(),
1671 r.getcwd(),
1651 pats,
1672 pats,
1652 include,
1673 include,
1653 exclude,
1674 exclude,
1654 default,
1675 default,
1655 auditor=r.auditor,
1676 auditor=r.auditor,
1656 ctx=self,
1677 ctx=self,
1657 listsubrepos=listsubrepos,
1678 listsubrepos=listsubrepos,
1658 badfn=badfn,
1679 badfn=badfn,
1659 icasefs=icasefs,
1680 icasefs=icasefs,
1660 )
1681 )
1661
1682
1662 def _filtersuspectsymlink(self, files):
1683 def _filtersuspectsymlink(self, files):
1663 if not files or self._repo.dirstate._checklink:
1684 if not files or self._repo.dirstate._checklink:
1664 return files
1685 return files
1665
1686
1666 # Symlink placeholders may get non-symlink-like contents
1687 # Symlink placeholders may get non-symlink-like contents
1667 # via user error or dereferencing by NFS or Samba servers,
1688 # via user error or dereferencing by NFS or Samba servers,
1668 # so we filter out any placeholders that don't look like a
1689 # so we filter out any placeholders that don't look like a
1669 # symlink
1690 # symlink
1670 sane = []
1691 sane = []
1671 for f in files:
1692 for f in files:
1672 if self.flags(f) == b'l':
1693 if self.flags(f) == b'l':
1673 d = self[f].data()
1694 d = self[f].data()
1674 if (
1695 if (
1675 d == b''
1696 d == b''
1676 or len(d) >= 1024
1697 or len(d) >= 1024
1677 or b'\n' in d
1698 or b'\n' in d
1678 or stringutil.binary(d)
1699 or stringutil.binary(d)
1679 ):
1700 ):
1680 self._repo.ui.debug(
1701 self._repo.ui.debug(
1681 b'ignoring suspect symlink placeholder "%s"\n' % f
1702 b'ignoring suspect symlink placeholder "%s"\n' % f
1682 )
1703 )
1683 continue
1704 continue
1684 sane.append(f)
1705 sane.append(f)
1685 return sane
1706 return sane
1686
1707
1687 def _checklookup(self, files):
1708 def _checklookup(self, files):
1688 # check for any possibly clean files
1709 # check for any possibly clean files
1689 if not files:
1710 if not files:
1690 return [], [], []
1711 return [], [], []
1691
1712
1692 modified = []
1713 modified = []
1693 deleted = []
1714 deleted = []
1694 fixup = []
1715 fixup = []
1695 pctx = self._parents[0]
1716 pctx = self._parents[0]
1696 # do a full compare of any files that might have changed
1717 # do a full compare of any files that might have changed
1697 for f in sorted(files):
1718 for f in sorted(files):
1698 try:
1719 try:
1699 # This will return True for a file that got replaced by a
1720 # This will return True for a file that got replaced by a
1700 # directory in the interim, but fixing that is pretty hard.
1721 # directory in the interim, but fixing that is pretty hard.
1701 if (
1722 if (
1702 f not in pctx
1723 f not in pctx
1703 or self.flags(f) != pctx.flags(f)
1724 or self.flags(f) != pctx.flags(f)
1704 or pctx[f].cmp(self[f])
1725 or pctx[f].cmp(self[f])
1705 ):
1726 ):
1706 modified.append(f)
1727 modified.append(f)
1707 else:
1728 else:
1708 fixup.append(f)
1729 fixup.append(f)
1709 except (IOError, OSError):
1730 except (IOError, OSError):
1710 # A file become inaccessible in between? Mark it as deleted,
1731 # A file become inaccessible in between? Mark it as deleted,
1711 # matching dirstate behavior (issue5584).
1732 # matching dirstate behavior (issue5584).
1712 # The dirstate has more complex behavior around whether a
1733 # The dirstate has more complex behavior around whether a
1713 # missing file matches a directory, etc, but we don't need to
1734 # missing file matches a directory, etc, but we don't need to
1714 # bother with that: if f has made it to this point, we're sure
1735 # bother with that: if f has made it to this point, we're sure
1715 # it's in the dirstate.
1736 # it's in the dirstate.
1716 deleted.append(f)
1737 deleted.append(f)
1717
1738
1718 return modified, deleted, fixup
1739 return modified, deleted, fixup
1719
1740
1720 def _poststatusfixup(self, status, fixup):
1741 def _poststatusfixup(self, status, fixup):
1721 """update dirstate for files that are actually clean"""
1742 """update dirstate for files that are actually clean"""
1722 poststatus = self._repo.postdsstatus()
1743 poststatus = self._repo.postdsstatus()
1723 if fixup or poststatus:
1744 if fixup or poststatus:
1724 try:
1745 try:
1725 oldid = self._repo.dirstate.identity()
1746 oldid = self._repo.dirstate.identity()
1726
1747
1727 # updating the dirstate is optional
1748 # updating the dirstate is optional
1728 # so we don't wait on the lock
1749 # so we don't wait on the lock
1729 # wlock can invalidate the dirstate, so cache normal _after_
1750 # wlock can invalidate the dirstate, so cache normal _after_
1730 # taking the lock
1751 # taking the lock
1731 with self._repo.wlock(False):
1752 with self._repo.wlock(False):
1732 if self._repo.dirstate.identity() == oldid:
1753 if self._repo.dirstate.identity() == oldid:
1733 if fixup:
1754 if fixup:
1734 normal = self._repo.dirstate.normal
1755 normal = self._repo.dirstate.normal
1735 for f in fixup:
1756 for f in fixup:
1736 normal(f)
1757 normal(f)
1737 # write changes out explicitly, because nesting
1758 # write changes out explicitly, because nesting
1738 # wlock at runtime may prevent 'wlock.release()'
1759 # wlock at runtime may prevent 'wlock.release()'
1739 # after this block from doing so for subsequent
1760 # after this block from doing so for subsequent
1740 # changing files
1761 # changing files
1741 tr = self._repo.currenttransaction()
1762 tr = self._repo.currenttransaction()
1742 self._repo.dirstate.write(tr)
1763 self._repo.dirstate.write(tr)
1743
1764
1744 if poststatus:
1765 if poststatus:
1745 for ps in poststatus:
1766 for ps in poststatus:
1746 ps(self, status)
1767 ps(self, status)
1747 else:
1768 else:
1748 # in this case, writing changes out breaks
1769 # in this case, writing changes out breaks
1749 # consistency, because .hg/dirstate was
1770 # consistency, because .hg/dirstate was
1750 # already changed simultaneously after last
1771 # already changed simultaneously after last
1751 # caching (see also issue5584 for detail)
1772 # caching (see also issue5584 for detail)
1752 self._repo.ui.debug(
1773 self._repo.ui.debug(
1753 b'skip updating dirstate: identity mismatch\n'
1774 b'skip updating dirstate: identity mismatch\n'
1754 )
1775 )
1755 except error.LockError:
1776 except error.LockError:
1756 pass
1777 pass
1757 finally:
1778 finally:
1758 # Even if the wlock couldn't be grabbed, clear out the list.
1779 # Even if the wlock couldn't be grabbed, clear out the list.
1759 self._repo.clearpostdsstatus()
1780 self._repo.clearpostdsstatus()
1760
1781
1761 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1782 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1762 '''Gets the status from the dirstate -- internal use only.'''
1783 '''Gets the status from the dirstate -- internal use only.'''
1763 subrepos = []
1784 subrepos = []
1764 if b'.hgsub' in self:
1785 if b'.hgsub' in self:
1765 subrepos = sorted(self.substate)
1786 subrepos = sorted(self.substate)
1766 cmp, s = self._repo.dirstate.status(
1787 cmp, s = self._repo.dirstate.status(
1767 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1788 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1768 )
1789 )
1769
1790
1770 # check for any possibly clean files
1791 # check for any possibly clean files
1771 fixup = []
1792 fixup = []
1772 if cmp:
1793 if cmp:
1773 modified2, deleted2, fixup = self._checklookup(cmp)
1794 modified2, deleted2, fixup = self._checklookup(cmp)
1774 s.modified.extend(modified2)
1795 s.modified.extend(modified2)
1775 s.deleted.extend(deleted2)
1796 s.deleted.extend(deleted2)
1776
1797
1777 if fixup and clean:
1798 if fixup and clean:
1778 s.clean.extend(fixup)
1799 s.clean.extend(fixup)
1779
1800
1780 self._poststatusfixup(s, fixup)
1801 self._poststatusfixup(s, fixup)
1781
1802
1782 if match.always():
1803 if match.always():
1783 # cache for performance
1804 # cache for performance
1784 if s.unknown or s.ignored or s.clean:
1805 if s.unknown or s.ignored or s.clean:
1785 # "_status" is cached with list*=False in the normal route
1806 # "_status" is cached with list*=False in the normal route
1786 self._status = scmutil.status(
1807 self._status = scmutil.status(
1787 s.modified, s.added, s.removed, s.deleted, [], [], []
1808 s.modified, s.added, s.removed, s.deleted, [], [], []
1788 )
1809 )
1789 else:
1810 else:
1790 self._status = s
1811 self._status = s
1791
1812
1792 return s
1813 return s
1793
1814
1794 @propertycache
1815 @propertycache
1795 def _copies(self):
1816 def _copies(self):
1796 p1copies = {}
1817 p1copies = {}
1797 p2copies = {}
1818 p2copies = {}
1798 parents = self._repo.dirstate.parents()
1819 parents = self._repo.dirstate.parents()
1799 p1manifest = self._repo[parents[0]].manifest()
1820 p1manifest = self._repo[parents[0]].manifest()
1800 p2manifest = self._repo[parents[1]].manifest()
1821 p2manifest = self._repo[parents[1]].manifest()
1801 changedset = set(self.added()) | set(self.modified())
1822 changedset = set(self.added()) | set(self.modified())
1802 narrowmatch = self._repo.narrowmatch()
1823 narrowmatch = self._repo.narrowmatch()
1803 for dst, src in self._repo.dirstate.copies().items():
1824 for dst, src in self._repo.dirstate.copies().items():
1804 if dst not in changedset or not narrowmatch(dst):
1825 if dst not in changedset or not narrowmatch(dst):
1805 continue
1826 continue
1806 if src in p1manifest:
1827 if src in p1manifest:
1807 p1copies[dst] = src
1828 p1copies[dst] = src
1808 elif src in p2manifest:
1829 elif src in p2manifest:
1809 p2copies[dst] = src
1830 p2copies[dst] = src
1810 return p1copies, p2copies
1831 return p1copies, p2copies
1811
1832
1812 @propertycache
1833 @propertycache
1813 def _manifest(self):
1834 def _manifest(self):
1814 """generate a manifest corresponding to the values in self._status
1835 """generate a manifest corresponding to the values in self._status
1815
1836
1816 This reuse the file nodeid from parent, but we use special node
1837 This reuse the file nodeid from parent, but we use special node
1817 identifiers for added and modified files. This is used by manifests
1838 identifiers for added and modified files. This is used by manifests
1818 merge to see that files are different and by update logic to avoid
1839 merge to see that files are different and by update logic to avoid
1819 deleting newly added files.
1840 deleting newly added files.
1820 """
1841 """
1821 return self._buildstatusmanifest(self._status)
1842 return self._buildstatusmanifest(self._status)
1822
1843
1823 def _buildstatusmanifest(self, status):
1844 def _buildstatusmanifest(self, status):
1824 """Builds a manifest that includes the given status results."""
1845 """Builds a manifest that includes the given status results."""
1825 parents = self.parents()
1846 parents = self.parents()
1826
1847
1827 man = parents[0].manifest().copy()
1848 man = parents[0].manifest().copy()
1828
1849
1829 ff = self._flagfunc
1850 ff = self._flagfunc
1830 for i, l in (
1851 for i, l in (
1831 (addednodeid, status.added),
1852 (addednodeid, status.added),
1832 (modifiednodeid, status.modified),
1853 (modifiednodeid, status.modified),
1833 ):
1854 ):
1834 for f in l:
1855 for f in l:
1835 man[f] = i
1856 man[f] = i
1836 try:
1857 try:
1837 man.setflag(f, ff(f))
1858 man.setflag(f, ff(f))
1838 except OSError:
1859 except OSError:
1839 pass
1860 pass
1840
1861
1841 for f in status.deleted + status.removed:
1862 for f in status.deleted + status.removed:
1842 if f in man:
1863 if f in man:
1843 del man[f]
1864 del man[f]
1844
1865
1845 return man
1866 return man
1846
1867
1847 def _buildstatus(
1868 def _buildstatus(
1848 self, other, s, match, listignored, listclean, listunknown
1869 self, other, s, match, listignored, listclean, listunknown
1849 ):
1870 ):
1850 """build a status with respect to another context
1871 """build a status with respect to another context
1851
1872
1852 This includes logic for maintaining the fast path of status when
1873 This includes logic for maintaining the fast path of status when
1853 comparing the working directory against its parent, which is to skip
1874 comparing the working directory against its parent, which is to skip
1854 building a new manifest if self (working directory) is not comparing
1875 building a new manifest if self (working directory) is not comparing
1855 against its parent (repo['.']).
1876 against its parent (repo['.']).
1856 """
1877 """
1857 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1878 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1858 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1879 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1859 # might have accidentally ended up with the entire contents of the file
1880 # might have accidentally ended up with the entire contents of the file
1860 # they are supposed to be linking to.
1881 # they are supposed to be linking to.
1861 s.modified[:] = self._filtersuspectsymlink(s.modified)
1882 s.modified[:] = self._filtersuspectsymlink(s.modified)
1862 if other != self._repo[b'.']:
1883 if other != self._repo[b'.']:
1863 s = super(workingctx, self)._buildstatus(
1884 s = super(workingctx, self)._buildstatus(
1864 other, s, match, listignored, listclean, listunknown
1885 other, s, match, listignored, listclean, listunknown
1865 )
1886 )
1866 return s
1887 return s
1867
1888
1868 def _matchstatus(self, other, match):
1889 def _matchstatus(self, other, match):
1869 """override the match method with a filter for directory patterns
1890 """override the match method with a filter for directory patterns
1870
1891
1871 We use inheritance to customize the match.bad method only in cases of
1892 We use inheritance to customize the match.bad method only in cases of
1872 workingctx since it belongs only to the working directory when
1893 workingctx since it belongs only to the working directory when
1873 comparing against the parent changeset.
1894 comparing against the parent changeset.
1874
1895
1875 If we aren't comparing against the working directory's parent, then we
1896 If we aren't comparing against the working directory's parent, then we
1876 just use the default match object sent to us.
1897 just use the default match object sent to us.
1877 """
1898 """
1878 if other != self._repo[b'.']:
1899 if other != self._repo[b'.']:
1879
1900
1880 def bad(f, msg):
1901 def bad(f, msg):
1881 # 'f' may be a directory pattern from 'match.files()',
1902 # 'f' may be a directory pattern from 'match.files()',
1882 # so 'f not in ctx1' is not enough
1903 # so 'f not in ctx1' is not enough
1883 if f not in other and not other.hasdir(f):
1904 if f not in other and not other.hasdir(f):
1884 self._repo.ui.warn(
1905 self._repo.ui.warn(
1885 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1906 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1886 )
1907 )
1887
1908
1888 match.bad = bad
1909 match.bad = bad
1889 return match
1910 return match
1890
1911
1891 def walk(self, match):
1912 def walk(self, match):
1892 '''Generates matching file names.'''
1913 '''Generates matching file names.'''
1893 return sorted(
1914 return sorted(
1894 self._repo.dirstate.walk(
1915 self._repo.dirstate.walk(
1895 self._repo.narrowmatch(match),
1916 self._repo.narrowmatch(match),
1896 subrepos=sorted(self.substate),
1917 subrepos=sorted(self.substate),
1897 unknown=True,
1918 unknown=True,
1898 ignored=False,
1919 ignored=False,
1899 )
1920 )
1900 )
1921 )
1901
1922
1902 def matches(self, match):
1923 def matches(self, match):
1903 match = self._repo.narrowmatch(match)
1924 match = self._repo.narrowmatch(match)
1904 ds = self._repo.dirstate
1925 ds = self._repo.dirstate
1905 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1926 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1906
1927
1907 def markcommitted(self, node):
1928 def markcommitted(self, node):
1908 with self._repo.dirstate.parentchange():
1929 with self._repo.dirstate.parentchange():
1909 for f in self.modified() + self.added():
1930 for f in self.modified() + self.added():
1910 self._repo.dirstate.normal(f)
1931 self._repo.dirstate.normal(f)
1911 for f in self.removed():
1932 for f in self.removed():
1912 self._repo.dirstate.drop(f)
1933 self._repo.dirstate.drop(f)
1913 self._repo.dirstate.setparents(node)
1934 self._repo.dirstate.setparents(node)
1914
1935
1915 # write changes out explicitly, because nesting wlock at
1936 # write changes out explicitly, because nesting wlock at
1916 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1937 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1917 # from immediately doing so for subsequent changing files
1938 # from immediately doing so for subsequent changing files
1918 self._repo.dirstate.write(self._repo.currenttransaction())
1939 self._repo.dirstate.write(self._repo.currenttransaction())
1919
1940
1920 sparse.aftercommit(self._repo, node)
1941 sparse.aftercommit(self._repo, node)
1921
1942
1922
1943
1923 class committablefilectx(basefilectx):
1944 class committablefilectx(basefilectx):
1924 """A committablefilectx provides common functionality for a file context
1945 """A committablefilectx provides common functionality for a file context
1925 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1946 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1926
1947
1927 def __init__(self, repo, path, filelog=None, ctx=None):
1948 def __init__(self, repo, path, filelog=None, ctx=None):
1928 self._repo = repo
1949 self._repo = repo
1929 self._path = path
1950 self._path = path
1930 self._changeid = None
1951 self._changeid = None
1931 self._filerev = self._filenode = None
1952 self._filerev = self._filenode = None
1932
1953
1933 if filelog is not None:
1954 if filelog is not None:
1934 self._filelog = filelog
1955 self._filelog = filelog
1935 if ctx:
1956 if ctx:
1936 self._changectx = ctx
1957 self._changectx = ctx
1937
1958
1938 def __nonzero__(self):
1959 def __nonzero__(self):
1939 return True
1960 return True
1940
1961
1941 __bool__ = __nonzero__
1962 __bool__ = __nonzero__
1942
1963
1943 def linkrev(self):
1964 def linkrev(self):
1944 # linked to self._changectx no matter if file is modified or not
1965 # linked to self._changectx no matter if file is modified or not
1945 return self.rev()
1966 return self.rev()
1946
1967
1947 def renamed(self):
1968 def renamed(self):
1948 path = self.copysource()
1969 path = self.copysource()
1949 if not path:
1970 if not path:
1950 return None
1971 return None
1951 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1972 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1952
1973
1953 def parents(self):
1974 def parents(self):
1954 '''return parent filectxs, following copies if necessary'''
1975 '''return parent filectxs, following copies if necessary'''
1955
1976
1956 def filenode(ctx, path):
1977 def filenode(ctx, path):
1957 return ctx._manifest.get(path, nullid)
1978 return ctx._manifest.get(path, nullid)
1958
1979
1959 path = self._path
1980 path = self._path
1960 fl = self._filelog
1981 fl = self._filelog
1961 pcl = self._changectx._parents
1982 pcl = self._changectx._parents
1962 renamed = self.renamed()
1983 renamed = self.renamed()
1963
1984
1964 if renamed:
1985 if renamed:
1965 pl = [renamed + (None,)]
1986 pl = [renamed + (None,)]
1966 else:
1987 else:
1967 pl = [(path, filenode(pcl[0], path), fl)]
1988 pl = [(path, filenode(pcl[0], path), fl)]
1968
1989
1969 for pc in pcl[1:]:
1990 for pc in pcl[1:]:
1970 pl.append((path, filenode(pc, path), fl))
1991 pl.append((path, filenode(pc, path), fl))
1971
1992
1972 return [
1993 return [
1973 self._parentfilectx(p, fileid=n, filelog=l)
1994 self._parentfilectx(p, fileid=n, filelog=l)
1974 for p, n, l in pl
1995 for p, n, l in pl
1975 if n != nullid
1996 if n != nullid
1976 ]
1997 ]
1977
1998
1978 def children(self):
1999 def children(self):
1979 return []
2000 return []
1980
2001
1981
2002
1982 class workingfilectx(committablefilectx):
2003 class workingfilectx(committablefilectx):
1983 """A workingfilectx object makes access to data related to a particular
2004 """A workingfilectx object makes access to data related to a particular
1984 file in the working directory convenient."""
2005 file in the working directory convenient."""
1985
2006
1986 def __init__(self, repo, path, filelog=None, workingctx=None):
2007 def __init__(self, repo, path, filelog=None, workingctx=None):
1987 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2008 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1988
2009
1989 @propertycache
2010 @propertycache
1990 def _changectx(self):
2011 def _changectx(self):
1991 return workingctx(self._repo)
2012 return workingctx(self._repo)
1992
2013
1993 def data(self):
2014 def data(self):
1994 return self._repo.wread(self._path)
2015 return self._repo.wread(self._path)
1995
2016
1996 def copysource(self):
2017 def copysource(self):
1997 return self._repo.dirstate.copied(self._path)
2018 return self._repo.dirstate.copied(self._path)
1998
2019
1999 def size(self):
2020 def size(self):
2000 return self._repo.wvfs.lstat(self._path).st_size
2021 return self._repo.wvfs.lstat(self._path).st_size
2001
2022
2002 def lstat(self):
2023 def lstat(self):
2003 return self._repo.wvfs.lstat(self._path)
2024 return self._repo.wvfs.lstat(self._path)
2004
2025
2005 def date(self):
2026 def date(self):
2006 t, tz = self._changectx.date()
2027 t, tz = self._changectx.date()
2007 try:
2028 try:
2008 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2029 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2009 except OSError as err:
2030 except OSError as err:
2010 if err.errno != errno.ENOENT:
2031 if err.errno != errno.ENOENT:
2011 raise
2032 raise
2012 return (t, tz)
2033 return (t, tz)
2013
2034
2014 def exists(self):
2035 def exists(self):
2015 return self._repo.wvfs.exists(self._path)
2036 return self._repo.wvfs.exists(self._path)
2016
2037
2017 def lexists(self):
2038 def lexists(self):
2018 return self._repo.wvfs.lexists(self._path)
2039 return self._repo.wvfs.lexists(self._path)
2019
2040
2020 def audit(self):
2041 def audit(self):
2021 return self._repo.wvfs.audit(self._path)
2042 return self._repo.wvfs.audit(self._path)
2022
2043
2023 def cmp(self, fctx):
2044 def cmp(self, fctx):
2024 """compare with other file context
2045 """compare with other file context
2025
2046
2026 returns True if different than fctx.
2047 returns True if different than fctx.
2027 """
2048 """
2028 # fctx should be a filectx (not a workingfilectx)
2049 # fctx should be a filectx (not a workingfilectx)
2029 # invert comparison to reuse the same code path
2050 # invert comparison to reuse the same code path
2030 return fctx.cmp(self)
2051 return fctx.cmp(self)
2031
2052
2032 def remove(self, ignoremissing=False):
2053 def remove(self, ignoremissing=False):
2033 """wraps unlink for a repo's working directory"""
2054 """wraps unlink for a repo's working directory"""
2034 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2055 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2035 self._repo.wvfs.unlinkpath(
2056 self._repo.wvfs.unlinkpath(
2036 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2057 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2037 )
2058 )
2038
2059
2039 def write(self, data, flags, backgroundclose=False, **kwargs):
2060 def write(self, data, flags, backgroundclose=False, **kwargs):
2040 """wraps repo.wwrite"""
2061 """wraps repo.wwrite"""
2041 return self._repo.wwrite(
2062 return self._repo.wwrite(
2042 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2063 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2043 )
2064 )
2044
2065
2045 def markcopied(self, src):
2066 def markcopied(self, src):
2046 """marks this file a copy of `src`"""
2067 """marks this file a copy of `src`"""
2047 self._repo.dirstate.copy(src, self._path)
2068 self._repo.dirstate.copy(src, self._path)
2048
2069
2049 def clearunknown(self):
2070 def clearunknown(self):
2050 """Removes conflicting items in the working directory so that
2071 """Removes conflicting items in the working directory so that
2051 ``write()`` can be called successfully.
2072 ``write()`` can be called successfully.
2052 """
2073 """
2053 wvfs = self._repo.wvfs
2074 wvfs = self._repo.wvfs
2054 f = self._path
2075 f = self._path
2055 wvfs.audit(f)
2076 wvfs.audit(f)
2056 if self._repo.ui.configbool(
2077 if self._repo.ui.configbool(
2057 b'experimental', b'merge.checkpathconflicts'
2078 b'experimental', b'merge.checkpathconflicts'
2058 ):
2079 ):
2059 # remove files under the directory as they should already be
2080 # remove files under the directory as they should already be
2060 # warned and backed up
2081 # warned and backed up
2061 if wvfs.isdir(f) and not wvfs.islink(f):
2082 if wvfs.isdir(f) and not wvfs.islink(f):
2062 wvfs.rmtree(f, forcibly=True)
2083 wvfs.rmtree(f, forcibly=True)
2063 for p in reversed(list(util.finddirs(f))):
2084 for p in reversed(list(util.finddirs(f))):
2064 if wvfs.isfileorlink(p):
2085 if wvfs.isfileorlink(p):
2065 wvfs.unlink(p)
2086 wvfs.unlink(p)
2066 break
2087 break
2067 else:
2088 else:
2068 # don't remove files if path conflicts are not processed
2089 # don't remove files if path conflicts are not processed
2069 if wvfs.isdir(f) and not wvfs.islink(f):
2090 if wvfs.isdir(f) and not wvfs.islink(f):
2070 wvfs.removedirs(f)
2091 wvfs.removedirs(f)
2071
2092
2072 def setflags(self, l, x):
2093 def setflags(self, l, x):
2073 self._repo.wvfs.setflags(self._path, l, x)
2094 self._repo.wvfs.setflags(self._path, l, x)
2074
2095
2075
2096
2076 class overlayworkingctx(committablectx):
2097 class overlayworkingctx(committablectx):
2077 """Wraps another mutable context with a write-back cache that can be
2098 """Wraps another mutable context with a write-back cache that can be
2078 converted into a commit context.
2099 converted into a commit context.
2079
2100
2080 self._cache[path] maps to a dict with keys: {
2101 self._cache[path] maps to a dict with keys: {
2081 'exists': bool?
2102 'exists': bool?
2082 'date': date?
2103 'date': date?
2083 'data': str?
2104 'data': str?
2084 'flags': str?
2105 'flags': str?
2085 'copied': str? (path or None)
2106 'copied': str? (path or None)
2086 }
2107 }
2087 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2108 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2088 is `False`, the file was deleted.
2109 is `False`, the file was deleted.
2089 """
2110 """
2090
2111
2091 def __init__(self, repo):
2112 def __init__(self, repo):
2092 super(overlayworkingctx, self).__init__(repo)
2113 super(overlayworkingctx, self).__init__(repo)
2093 self.clean()
2114 self.clean()
2094
2115
2095 def setbase(self, wrappedctx):
2116 def setbase(self, wrappedctx):
2096 self._wrappedctx = wrappedctx
2117 self._wrappedctx = wrappedctx
2097 self._parents = [wrappedctx]
2118 self._parents = [wrappedctx]
2098 # Drop old manifest cache as it is now out of date.
2119 # Drop old manifest cache as it is now out of date.
2099 # This is necessary when, e.g., rebasing several nodes with one
2120 # This is necessary when, e.g., rebasing several nodes with one
2100 # ``overlayworkingctx`` (e.g. with --collapse).
2121 # ``overlayworkingctx`` (e.g. with --collapse).
2101 util.clearcachedproperty(self, b'_manifest')
2122 util.clearcachedproperty(self, b'_manifest')
2102
2123
2103 def data(self, path):
2124 def data(self, path):
2104 if self.isdirty(path):
2125 if self.isdirty(path):
2105 if self._cache[path][b'exists']:
2126 if self._cache[path][b'exists']:
2106 if self._cache[path][b'data'] is not None:
2127 if self._cache[path][b'data'] is not None:
2107 return self._cache[path][b'data']
2128 return self._cache[path][b'data']
2108 else:
2129 else:
2109 # Must fallback here, too, because we only set flags.
2130 # Must fallback here, too, because we only set flags.
2110 return self._wrappedctx[path].data()
2131 return self._wrappedctx[path].data()
2111 else:
2132 else:
2112 raise error.ProgrammingError(
2133 raise error.ProgrammingError(
2113 b"No such file or directory: %s" % path
2134 b"No such file or directory: %s" % path
2114 )
2135 )
2115 else:
2136 else:
2116 return self._wrappedctx[path].data()
2137 return self._wrappedctx[path].data()
2117
2138
2118 @propertycache
2139 @propertycache
2119 def _manifest(self):
2140 def _manifest(self):
2120 parents = self.parents()
2141 parents = self.parents()
2121 man = parents[0].manifest().copy()
2142 man = parents[0].manifest().copy()
2122
2143
2123 flag = self._flagfunc
2144 flag = self._flagfunc
2124 for path in self.added():
2145 for path in self.added():
2125 man[path] = addednodeid
2146 man[path] = addednodeid
2126 man.setflag(path, flag(path))
2147 man.setflag(path, flag(path))
2127 for path in self.modified():
2148 for path in self.modified():
2128 man[path] = modifiednodeid
2149 man[path] = modifiednodeid
2129 man.setflag(path, flag(path))
2150 man.setflag(path, flag(path))
2130 for path in self.removed():
2151 for path in self.removed():
2131 del man[path]
2152 del man[path]
2132 return man
2153 return man
2133
2154
2134 @propertycache
2155 @propertycache
2135 def _flagfunc(self):
2156 def _flagfunc(self):
2136 def f(path):
2157 def f(path):
2137 return self._cache[path][b'flags']
2158 return self._cache[path][b'flags']
2138
2159
2139 return f
2160 return f
2140
2161
2141 def files(self):
2162 def files(self):
2142 return sorted(self.added() + self.modified() + self.removed())
2163 return sorted(self.added() + self.modified() + self.removed())
2143
2164
2144 def modified(self):
2165 def modified(self):
2145 return [
2166 return [
2146 f
2167 f
2147 for f in self._cache.keys()
2168 for f in self._cache.keys()
2148 if self._cache[f][b'exists'] and self._existsinparent(f)
2169 if self._cache[f][b'exists'] and self._existsinparent(f)
2149 ]
2170 ]
2150
2171
2151 def added(self):
2172 def added(self):
2152 return [
2173 return [
2153 f
2174 f
2154 for f in self._cache.keys()
2175 for f in self._cache.keys()
2155 if self._cache[f][b'exists'] and not self._existsinparent(f)
2176 if self._cache[f][b'exists'] and not self._existsinparent(f)
2156 ]
2177 ]
2157
2178
2158 def removed(self):
2179 def removed(self):
2159 return [
2180 return [
2160 f
2181 f
2161 for f in self._cache.keys()
2182 for f in self._cache.keys()
2162 if not self._cache[f][b'exists'] and self._existsinparent(f)
2183 if not self._cache[f][b'exists'] and self._existsinparent(f)
2163 ]
2184 ]
2164
2185
2165 def p1copies(self):
2186 def p1copies(self):
2166 copies = self._repo._wrappedctx.p1copies().copy()
2187 copies = self._repo._wrappedctx.p1copies().copy()
2167 narrowmatch = self._repo.narrowmatch()
2188 narrowmatch = self._repo.narrowmatch()
2168 for f in self._cache.keys():
2189 for f in self._cache.keys():
2169 if not narrowmatch(f):
2190 if not narrowmatch(f):
2170 continue
2191 continue
2171 copies.pop(f, None) # delete if it exists
2192 copies.pop(f, None) # delete if it exists
2172 source = self._cache[f][b'copied']
2193 source = self._cache[f][b'copied']
2173 if source:
2194 if source:
2174 copies[f] = source
2195 copies[f] = source
2175 return copies
2196 return copies
2176
2197
2177 def p2copies(self):
2198 def p2copies(self):
2178 copies = self._repo._wrappedctx.p2copies().copy()
2199 copies = self._repo._wrappedctx.p2copies().copy()
2179 narrowmatch = self._repo.narrowmatch()
2200 narrowmatch = self._repo.narrowmatch()
2180 for f in self._cache.keys():
2201 for f in self._cache.keys():
2181 if not narrowmatch(f):
2202 if not narrowmatch(f):
2182 continue
2203 continue
2183 copies.pop(f, None) # delete if it exists
2204 copies.pop(f, None) # delete if it exists
2184 source = self._cache[f][b'copied']
2205 source = self._cache[f][b'copied']
2185 if source:
2206 if source:
2186 copies[f] = source
2207 copies[f] = source
2187 return copies
2208 return copies
2188
2209
2189 def isinmemory(self):
2210 def isinmemory(self):
2190 return True
2211 return True
2191
2212
2192 def filedate(self, path):
2213 def filedate(self, path):
2193 if self.isdirty(path):
2214 if self.isdirty(path):
2194 return self._cache[path][b'date']
2215 return self._cache[path][b'date']
2195 else:
2216 else:
2196 return self._wrappedctx[path].date()
2217 return self._wrappedctx[path].date()
2197
2218
2198 def markcopied(self, path, origin):
2219 def markcopied(self, path, origin):
2199 self._markdirty(
2220 self._markdirty(
2200 path,
2221 path,
2201 exists=True,
2222 exists=True,
2202 date=self.filedate(path),
2223 date=self.filedate(path),
2203 flags=self.flags(path),
2224 flags=self.flags(path),
2204 copied=origin,
2225 copied=origin,
2205 )
2226 )
2206
2227
2207 def copydata(self, path):
2228 def copydata(self, path):
2208 if self.isdirty(path):
2229 if self.isdirty(path):
2209 return self._cache[path][b'copied']
2230 return self._cache[path][b'copied']
2210 else:
2231 else:
2211 return None
2232 return None
2212
2233
2213 def flags(self, path):
2234 def flags(self, path):
2214 if self.isdirty(path):
2235 if self.isdirty(path):
2215 if self._cache[path][b'exists']:
2236 if self._cache[path][b'exists']:
2216 return self._cache[path][b'flags']
2237 return self._cache[path][b'flags']
2217 else:
2238 else:
2218 raise error.ProgrammingError(
2239 raise error.ProgrammingError(
2219 b"No such file or directory: %s" % self._path
2240 b"No such file or directory: %s" % self._path
2220 )
2241 )
2221 else:
2242 else:
2222 return self._wrappedctx[path].flags()
2243 return self._wrappedctx[path].flags()
2223
2244
2224 def __contains__(self, key):
2245 def __contains__(self, key):
2225 if key in self._cache:
2246 if key in self._cache:
2226 return self._cache[key][b'exists']
2247 return self._cache[key][b'exists']
2227 return key in self.p1()
2248 return key in self.p1()
2228
2249
2229 def _existsinparent(self, path):
2250 def _existsinparent(self, path):
2230 try:
2251 try:
2231 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2252 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2232 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2253 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2233 # with an ``exists()`` function.
2254 # with an ``exists()`` function.
2234 self._wrappedctx[path]
2255 self._wrappedctx[path]
2235 return True
2256 return True
2236 except error.ManifestLookupError:
2257 except error.ManifestLookupError:
2237 return False
2258 return False
2238
2259
2239 def _auditconflicts(self, path):
2260 def _auditconflicts(self, path):
2240 """Replicates conflict checks done by wvfs.write().
2261 """Replicates conflict checks done by wvfs.write().
2241
2262
2242 Since we never write to the filesystem and never call `applyupdates` in
2263 Since we never write to the filesystem and never call `applyupdates` in
2243 IMM, we'll never check that a path is actually writable -- e.g., because
2264 IMM, we'll never check that a path is actually writable -- e.g., because
2244 it adds `a/foo`, but `a` is actually a file in the other commit.
2265 it adds `a/foo`, but `a` is actually a file in the other commit.
2245 """
2266 """
2246
2267
2247 def fail(path, component):
2268 def fail(path, component):
2248 # p1() is the base and we're receiving "writes" for p2()'s
2269 # p1() is the base and we're receiving "writes" for p2()'s
2249 # files.
2270 # files.
2250 if b'l' in self.p1()[component].flags():
2271 if b'l' in self.p1()[component].flags():
2251 raise error.Abort(
2272 raise error.Abort(
2252 b"error: %s conflicts with symlink %s "
2273 b"error: %s conflicts with symlink %s "
2253 b"in %d." % (path, component, self.p1().rev())
2274 b"in %d." % (path, component, self.p1().rev())
2254 )
2275 )
2255 else:
2276 else:
2256 raise error.Abort(
2277 raise error.Abort(
2257 b"error: '%s' conflicts with file '%s' in "
2278 b"error: '%s' conflicts with file '%s' in "
2258 b"%d." % (path, component, self.p1().rev())
2279 b"%d." % (path, component, self.p1().rev())
2259 )
2280 )
2260
2281
2261 # Test that each new directory to be created to write this path from p2
2282 # Test that each new directory to be created to write this path from p2
2262 # is not a file in p1.
2283 # is not a file in p1.
2263 components = path.split(b'/')
2284 components = path.split(b'/')
2264 for i in pycompat.xrange(len(components)):
2285 for i in pycompat.xrange(len(components)):
2265 component = b"/".join(components[0:i])
2286 component = b"/".join(components[0:i])
2266 if component in self:
2287 if component in self:
2267 fail(path, component)
2288 fail(path, component)
2268
2289
2269 # Test the other direction -- that this path from p2 isn't a directory
2290 # Test the other direction -- that this path from p2 isn't a directory
2270 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2291 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2271 match = self.match([path], default=b'path')
2292 match = self.match([path], default=b'path')
2272 matches = self.p1().manifest().matches(match)
2293 matches = self.p1().manifest().matches(match)
2273 mfiles = matches.keys()
2294 mfiles = matches.keys()
2274 if len(mfiles) > 0:
2295 if len(mfiles) > 0:
2275 if len(mfiles) == 1 and mfiles[0] == path:
2296 if len(mfiles) == 1 and mfiles[0] == path:
2276 return
2297 return
2277 # omit the files which are deleted in current IMM wctx
2298 # omit the files which are deleted in current IMM wctx
2278 mfiles = [m for m in mfiles if m in self]
2299 mfiles = [m for m in mfiles if m in self]
2279 if not mfiles:
2300 if not mfiles:
2280 return
2301 return
2281 raise error.Abort(
2302 raise error.Abort(
2282 b"error: file '%s' cannot be written because "
2303 b"error: file '%s' cannot be written because "
2283 b" '%s/' is a directory in %s (containing %d "
2304 b" '%s/' is a directory in %s (containing %d "
2284 b"entries: %s)"
2305 b"entries: %s)"
2285 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2306 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2286 )
2307 )
2287
2308
2288 def write(self, path, data, flags=b'', **kwargs):
2309 def write(self, path, data, flags=b'', **kwargs):
2289 if data is None:
2310 if data is None:
2290 raise error.ProgrammingError(b"data must be non-None")
2311 raise error.ProgrammingError(b"data must be non-None")
2291 self._auditconflicts(path)
2312 self._auditconflicts(path)
2292 self._markdirty(
2313 self._markdirty(
2293 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2314 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2294 )
2315 )
2295
2316
2296 def setflags(self, path, l, x):
2317 def setflags(self, path, l, x):
2297 flag = b''
2318 flag = b''
2298 if l:
2319 if l:
2299 flag = b'l'
2320 flag = b'l'
2300 elif x:
2321 elif x:
2301 flag = b'x'
2322 flag = b'x'
2302 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2323 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2303
2324
2304 def remove(self, path):
2325 def remove(self, path):
2305 self._markdirty(path, exists=False)
2326 self._markdirty(path, exists=False)
2306
2327
2307 def exists(self, path):
2328 def exists(self, path):
2308 """exists behaves like `lexists`, but needs to follow symlinks and
2329 """exists behaves like `lexists`, but needs to follow symlinks and
2309 return False if they are broken.
2330 return False if they are broken.
2310 """
2331 """
2311 if self.isdirty(path):
2332 if self.isdirty(path):
2312 # If this path exists and is a symlink, "follow" it by calling
2333 # If this path exists and is a symlink, "follow" it by calling
2313 # exists on the destination path.
2334 # exists on the destination path.
2314 if (
2335 if (
2315 self._cache[path][b'exists']
2336 self._cache[path][b'exists']
2316 and b'l' in self._cache[path][b'flags']
2337 and b'l' in self._cache[path][b'flags']
2317 ):
2338 ):
2318 return self.exists(self._cache[path][b'data'].strip())
2339 return self.exists(self._cache[path][b'data'].strip())
2319 else:
2340 else:
2320 return self._cache[path][b'exists']
2341 return self._cache[path][b'exists']
2321
2342
2322 return self._existsinparent(path)
2343 return self._existsinparent(path)
2323
2344
2324 def lexists(self, path):
2345 def lexists(self, path):
2325 """lexists returns True if the path exists"""
2346 """lexists returns True if the path exists"""
2326 if self.isdirty(path):
2347 if self.isdirty(path):
2327 return self._cache[path][b'exists']
2348 return self._cache[path][b'exists']
2328
2349
2329 return self._existsinparent(path)
2350 return self._existsinparent(path)
2330
2351
2331 def size(self, path):
2352 def size(self, path):
2332 if self.isdirty(path):
2353 if self.isdirty(path):
2333 if self._cache[path][b'exists']:
2354 if self._cache[path][b'exists']:
2334 return len(self._cache[path][b'data'])
2355 return len(self._cache[path][b'data'])
2335 else:
2356 else:
2336 raise error.ProgrammingError(
2357 raise error.ProgrammingError(
2337 b"No such file or directory: %s" % self._path
2358 b"No such file or directory: %s" % self._path
2338 )
2359 )
2339 return self._wrappedctx[path].size()
2360 return self._wrappedctx[path].size()
2340
2361
2341 def tomemctx(
2362 def tomemctx(
2342 self,
2363 self,
2343 text,
2364 text,
2344 branch=None,
2365 branch=None,
2345 extra=None,
2366 extra=None,
2346 date=None,
2367 date=None,
2347 parents=None,
2368 parents=None,
2348 user=None,
2369 user=None,
2349 editor=None,
2370 editor=None,
2350 ):
2371 ):
2351 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2372 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2352 committed.
2373 committed.
2353
2374
2354 ``text`` is the commit message.
2375 ``text`` is the commit message.
2355 ``parents`` (optional) are rev numbers.
2376 ``parents`` (optional) are rev numbers.
2356 """
2377 """
2357 # Default parents to the wrapped contexts' if not passed.
2378 # Default parents to the wrapped contexts' if not passed.
2358 if parents is None:
2379 if parents is None:
2359 parents = self._wrappedctx.parents()
2380 parents = self._wrappedctx.parents()
2360 if len(parents) == 1:
2381 if len(parents) == 1:
2361 parents = (parents[0], None)
2382 parents = (parents[0], None)
2362
2383
2363 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2384 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2364 if parents[1] is None:
2385 if parents[1] is None:
2365 parents = (self._repo[parents[0]], None)
2386 parents = (self._repo[parents[0]], None)
2366 else:
2387 else:
2367 parents = (self._repo[parents[0]], self._repo[parents[1]])
2388 parents = (self._repo[parents[0]], self._repo[parents[1]])
2368
2389
2369 files = self.files()
2390 files = self.files()
2370
2391
2371 def getfile(repo, memctx, path):
2392 def getfile(repo, memctx, path):
2372 if self._cache[path][b'exists']:
2393 if self._cache[path][b'exists']:
2373 return memfilectx(
2394 return memfilectx(
2374 repo,
2395 repo,
2375 memctx,
2396 memctx,
2376 path,
2397 path,
2377 self._cache[path][b'data'],
2398 self._cache[path][b'data'],
2378 b'l' in self._cache[path][b'flags'],
2399 b'l' in self._cache[path][b'flags'],
2379 b'x' in self._cache[path][b'flags'],
2400 b'x' in self._cache[path][b'flags'],
2380 self._cache[path][b'copied'],
2401 self._cache[path][b'copied'],
2381 )
2402 )
2382 else:
2403 else:
2383 # Returning None, but including the path in `files`, is
2404 # Returning None, but including the path in `files`, is
2384 # necessary for memctx to register a deletion.
2405 # necessary for memctx to register a deletion.
2385 return None
2406 return None
2386
2407
2387 return memctx(
2408 return memctx(
2388 self._repo,
2409 self._repo,
2389 parents,
2410 parents,
2390 text,
2411 text,
2391 files,
2412 files,
2392 getfile,
2413 getfile,
2393 date=date,
2414 date=date,
2394 extra=extra,
2415 extra=extra,
2395 user=user,
2416 user=user,
2396 branch=branch,
2417 branch=branch,
2397 editor=editor,
2418 editor=editor,
2398 )
2419 )
2399
2420
2400 def isdirty(self, path):
2421 def isdirty(self, path):
2401 return path in self._cache
2422 return path in self._cache
2402
2423
2403 def isempty(self):
2424 def isempty(self):
2404 # We need to discard any keys that are actually clean before the empty
2425 # We need to discard any keys that are actually clean before the empty
2405 # commit check.
2426 # commit check.
2406 self._compact()
2427 self._compact()
2407 return len(self._cache) == 0
2428 return len(self._cache) == 0
2408
2429
2409 def clean(self):
2430 def clean(self):
2410 self._cache = {}
2431 self._cache = {}
2411
2432
2412 def _compact(self):
2433 def _compact(self):
2413 """Removes keys from the cache that are actually clean, by comparing
2434 """Removes keys from the cache that are actually clean, by comparing
2414 them with the underlying context.
2435 them with the underlying context.
2415
2436
2416 This can occur during the merge process, e.g. by passing --tool :local
2437 This can occur during the merge process, e.g. by passing --tool :local
2417 to resolve a conflict.
2438 to resolve a conflict.
2418 """
2439 """
2419 keys = []
2440 keys = []
2420 # This won't be perfect, but can help performance significantly when
2441 # This won't be perfect, but can help performance significantly when
2421 # using things like remotefilelog.
2442 # using things like remotefilelog.
2422 scmutil.prefetchfiles(
2443 scmutil.prefetchfiles(
2423 self.repo(),
2444 self.repo(),
2424 [self.p1().rev()],
2445 [self.p1().rev()],
2425 scmutil.matchfiles(self.repo(), self._cache.keys()),
2446 scmutil.matchfiles(self.repo(), self._cache.keys()),
2426 )
2447 )
2427
2448
2428 for path in self._cache.keys():
2449 for path in self._cache.keys():
2429 cache = self._cache[path]
2450 cache = self._cache[path]
2430 try:
2451 try:
2431 underlying = self._wrappedctx[path]
2452 underlying = self._wrappedctx[path]
2432 if (
2453 if (
2433 underlying.data() == cache[b'data']
2454 underlying.data() == cache[b'data']
2434 and underlying.flags() == cache[b'flags']
2455 and underlying.flags() == cache[b'flags']
2435 ):
2456 ):
2436 keys.append(path)
2457 keys.append(path)
2437 except error.ManifestLookupError:
2458 except error.ManifestLookupError:
2438 # Path not in the underlying manifest (created).
2459 # Path not in the underlying manifest (created).
2439 continue
2460 continue
2440
2461
2441 for path in keys:
2462 for path in keys:
2442 del self._cache[path]
2463 del self._cache[path]
2443 return keys
2464 return keys
2444
2465
2445 def _markdirty(
2466 def _markdirty(
2446 self, path, exists, data=None, date=None, flags=b'', copied=None
2467 self, path, exists, data=None, date=None, flags=b'', copied=None
2447 ):
2468 ):
2448 # data not provided, let's see if we already have some; if not, let's
2469 # data not provided, let's see if we already have some; if not, let's
2449 # grab it from our underlying context, so that we always have data if
2470 # grab it from our underlying context, so that we always have data if
2450 # the file is marked as existing.
2471 # the file is marked as existing.
2451 if exists and data is None:
2472 if exists and data is None:
2452 oldentry = self._cache.get(path) or {}
2473 oldentry = self._cache.get(path) or {}
2453 data = oldentry.get(b'data')
2474 data = oldentry.get(b'data')
2454 if data is None:
2475 if data is None:
2455 data = self._wrappedctx[path].data()
2476 data = self._wrappedctx[path].data()
2456
2477
2457 self._cache[path] = {
2478 self._cache[path] = {
2458 b'exists': exists,
2479 b'exists': exists,
2459 b'data': data,
2480 b'data': data,
2460 b'date': date,
2481 b'date': date,
2461 b'flags': flags,
2482 b'flags': flags,
2462 b'copied': copied,
2483 b'copied': copied,
2463 }
2484 }
2464
2485
2465 def filectx(self, path, filelog=None):
2486 def filectx(self, path, filelog=None):
2466 return overlayworkingfilectx(
2487 return overlayworkingfilectx(
2467 self._repo, path, parent=self, filelog=filelog
2488 self._repo, path, parent=self, filelog=filelog
2468 )
2489 )
2469
2490
2470
2491
2471 class overlayworkingfilectx(committablefilectx):
2492 class overlayworkingfilectx(committablefilectx):
2472 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2493 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2473 cache, which can be flushed through later by calling ``flush()``."""
2494 cache, which can be flushed through later by calling ``flush()``."""
2474
2495
2475 def __init__(self, repo, path, filelog=None, parent=None):
2496 def __init__(self, repo, path, filelog=None, parent=None):
2476 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2497 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2477 self._repo = repo
2498 self._repo = repo
2478 self._parent = parent
2499 self._parent = parent
2479 self._path = path
2500 self._path = path
2480
2501
2481 def cmp(self, fctx):
2502 def cmp(self, fctx):
2482 return self.data() != fctx.data()
2503 return self.data() != fctx.data()
2483
2504
2484 def changectx(self):
2505 def changectx(self):
2485 return self._parent
2506 return self._parent
2486
2507
2487 def data(self):
2508 def data(self):
2488 return self._parent.data(self._path)
2509 return self._parent.data(self._path)
2489
2510
2490 def date(self):
2511 def date(self):
2491 return self._parent.filedate(self._path)
2512 return self._parent.filedate(self._path)
2492
2513
2493 def exists(self):
2514 def exists(self):
2494 return self.lexists()
2515 return self.lexists()
2495
2516
2496 def lexists(self):
2517 def lexists(self):
2497 return self._parent.exists(self._path)
2518 return self._parent.exists(self._path)
2498
2519
2499 def copysource(self):
2520 def copysource(self):
2500 return self._parent.copydata(self._path)
2521 return self._parent.copydata(self._path)
2501
2522
2502 def size(self):
2523 def size(self):
2503 return self._parent.size(self._path)
2524 return self._parent.size(self._path)
2504
2525
2505 def markcopied(self, origin):
2526 def markcopied(self, origin):
2506 self._parent.markcopied(self._path, origin)
2527 self._parent.markcopied(self._path, origin)
2507
2528
2508 def audit(self):
2529 def audit(self):
2509 pass
2530 pass
2510
2531
2511 def flags(self):
2532 def flags(self):
2512 return self._parent.flags(self._path)
2533 return self._parent.flags(self._path)
2513
2534
2514 def setflags(self, islink, isexec):
2535 def setflags(self, islink, isexec):
2515 return self._parent.setflags(self._path, islink, isexec)
2536 return self._parent.setflags(self._path, islink, isexec)
2516
2537
2517 def write(self, data, flags, backgroundclose=False, **kwargs):
2538 def write(self, data, flags, backgroundclose=False, **kwargs):
2518 return self._parent.write(self._path, data, flags, **kwargs)
2539 return self._parent.write(self._path, data, flags, **kwargs)
2519
2540
2520 def remove(self, ignoremissing=False):
2541 def remove(self, ignoremissing=False):
2521 return self._parent.remove(self._path)
2542 return self._parent.remove(self._path)
2522
2543
2523 def clearunknown(self):
2544 def clearunknown(self):
2524 pass
2545 pass
2525
2546
2526
2547
2527 class workingcommitctx(workingctx):
2548 class workingcommitctx(workingctx):
2528 """A workingcommitctx object makes access to data related to
2549 """A workingcommitctx object makes access to data related to
2529 the revision being committed convenient.
2550 the revision being committed convenient.
2530
2551
2531 This hides changes in the working directory, if they aren't
2552 This hides changes in the working directory, if they aren't
2532 committed in this context.
2553 committed in this context.
2533 """
2554 """
2534
2555
2535 def __init__(
2556 def __init__(
2536 self, repo, changes, text=b"", user=None, date=None, extra=None
2557 self, repo, changes, text=b"", user=None, date=None, extra=None
2537 ):
2558 ):
2538 super(workingcommitctx, self).__init__(
2559 super(workingcommitctx, self).__init__(
2539 repo, text, user, date, extra, changes
2560 repo, text, user, date, extra, changes
2540 )
2561 )
2541
2562
2542 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2563 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2543 """Return matched files only in ``self._status``
2564 """Return matched files only in ``self._status``
2544
2565
2545 Uncommitted files appear "clean" via this context, even if
2566 Uncommitted files appear "clean" via this context, even if
2546 they aren't actually so in the working directory.
2567 they aren't actually so in the working directory.
2547 """
2568 """
2548 if clean:
2569 if clean:
2549 clean = [f for f in self._manifest if f not in self._changedset]
2570 clean = [f for f in self._manifest if f not in self._changedset]
2550 else:
2571 else:
2551 clean = []
2572 clean = []
2552 return scmutil.status(
2573 return scmutil.status(
2553 [f for f in self._status.modified if match(f)],
2574 [f for f in self._status.modified if match(f)],
2554 [f for f in self._status.added if match(f)],
2575 [f for f in self._status.added if match(f)],
2555 [f for f in self._status.removed if match(f)],
2576 [f for f in self._status.removed if match(f)],
2556 [],
2577 [],
2557 [],
2578 [],
2558 [],
2579 [],
2559 clean,
2580 clean,
2560 )
2581 )
2561
2582
2562 @propertycache
2583 @propertycache
2563 def _changedset(self):
2584 def _changedset(self):
2564 """Return the set of files changed in this context
2585 """Return the set of files changed in this context
2565 """
2586 """
2566 changed = set(self._status.modified)
2587 changed = set(self._status.modified)
2567 changed.update(self._status.added)
2588 changed.update(self._status.added)
2568 changed.update(self._status.removed)
2589 changed.update(self._status.removed)
2569 return changed
2590 return changed
2570
2591
2571
2592
2572 def makecachingfilectxfn(func):
2593 def makecachingfilectxfn(func):
2573 """Create a filectxfn that caches based on the path.
2594 """Create a filectxfn that caches based on the path.
2574
2595
2575 We can't use util.cachefunc because it uses all arguments as the cache
2596 We can't use util.cachefunc because it uses all arguments as the cache
2576 key and this creates a cycle since the arguments include the repo and
2597 key and this creates a cycle since the arguments include the repo and
2577 memctx.
2598 memctx.
2578 """
2599 """
2579 cache = {}
2600 cache = {}
2580
2601
2581 def getfilectx(repo, memctx, path):
2602 def getfilectx(repo, memctx, path):
2582 if path not in cache:
2603 if path not in cache:
2583 cache[path] = func(repo, memctx, path)
2604 cache[path] = func(repo, memctx, path)
2584 return cache[path]
2605 return cache[path]
2585
2606
2586 return getfilectx
2607 return getfilectx
2587
2608
2588
2609
2589 def memfilefromctx(ctx):
2610 def memfilefromctx(ctx):
2590 """Given a context return a memfilectx for ctx[path]
2611 """Given a context return a memfilectx for ctx[path]
2591
2612
2592 This is a convenience method for building a memctx based on another
2613 This is a convenience method for building a memctx based on another
2593 context.
2614 context.
2594 """
2615 """
2595
2616
2596 def getfilectx(repo, memctx, path):
2617 def getfilectx(repo, memctx, path):
2597 fctx = ctx[path]
2618 fctx = ctx[path]
2598 copysource = fctx.copysource()
2619 copysource = fctx.copysource()
2599 return memfilectx(
2620 return memfilectx(
2600 repo,
2621 repo,
2601 memctx,
2622 memctx,
2602 path,
2623 path,
2603 fctx.data(),
2624 fctx.data(),
2604 islink=fctx.islink(),
2625 islink=fctx.islink(),
2605 isexec=fctx.isexec(),
2626 isexec=fctx.isexec(),
2606 copysource=copysource,
2627 copysource=copysource,
2607 )
2628 )
2608
2629
2609 return getfilectx
2630 return getfilectx
2610
2631
2611
2632
2612 def memfilefrompatch(patchstore):
2633 def memfilefrompatch(patchstore):
2613 """Given a patch (e.g. patchstore object) return a memfilectx
2634 """Given a patch (e.g. patchstore object) return a memfilectx
2614
2635
2615 This is a convenience method for building a memctx based on a patchstore.
2636 This is a convenience method for building a memctx based on a patchstore.
2616 """
2637 """
2617
2638
2618 def getfilectx(repo, memctx, path):
2639 def getfilectx(repo, memctx, path):
2619 data, mode, copysource = patchstore.getfile(path)
2640 data, mode, copysource = patchstore.getfile(path)
2620 if data is None:
2641 if data is None:
2621 return None
2642 return None
2622 islink, isexec = mode
2643 islink, isexec = mode
2623 return memfilectx(
2644 return memfilectx(
2624 repo,
2645 repo,
2625 memctx,
2646 memctx,
2626 path,
2647 path,
2627 data,
2648 data,
2628 islink=islink,
2649 islink=islink,
2629 isexec=isexec,
2650 isexec=isexec,
2630 copysource=copysource,
2651 copysource=copysource,
2631 )
2652 )
2632
2653
2633 return getfilectx
2654 return getfilectx
2634
2655
2635
2656
2636 class memctx(committablectx):
2657 class memctx(committablectx):
2637 """Use memctx to perform in-memory commits via localrepo.commitctx().
2658 """Use memctx to perform in-memory commits via localrepo.commitctx().
2638
2659
2639 Revision information is supplied at initialization time while
2660 Revision information is supplied at initialization time while
2640 related files data and is made available through a callback
2661 related files data and is made available through a callback
2641 mechanism. 'repo' is the current localrepo, 'parents' is a
2662 mechanism. 'repo' is the current localrepo, 'parents' is a
2642 sequence of two parent revisions identifiers (pass None for every
2663 sequence of two parent revisions identifiers (pass None for every
2643 missing parent), 'text' is the commit message and 'files' lists
2664 missing parent), 'text' is the commit message and 'files' lists
2644 names of files touched by the revision (normalized and relative to
2665 names of files touched by the revision (normalized and relative to
2645 repository root).
2666 repository root).
2646
2667
2647 filectxfn(repo, memctx, path) is a callable receiving the
2668 filectxfn(repo, memctx, path) is a callable receiving the
2648 repository, the current memctx object and the normalized path of
2669 repository, the current memctx object and the normalized path of
2649 requested file, relative to repository root. It is fired by the
2670 requested file, relative to repository root. It is fired by the
2650 commit function for every file in 'files', but calls order is
2671 commit function for every file in 'files', but calls order is
2651 undefined. If the file is available in the revision being
2672 undefined. If the file is available in the revision being
2652 committed (updated or added), filectxfn returns a memfilectx
2673 committed (updated or added), filectxfn returns a memfilectx
2653 object. If the file was removed, filectxfn return None for recent
2674 object. If the file was removed, filectxfn return None for recent
2654 Mercurial. Moved files are represented by marking the source file
2675 Mercurial. Moved files are represented by marking the source file
2655 removed and the new file added with copy information (see
2676 removed and the new file added with copy information (see
2656 memfilectx).
2677 memfilectx).
2657
2678
2658 user receives the committer name and defaults to current
2679 user receives the committer name and defaults to current
2659 repository username, date is the commit date in any format
2680 repository username, date is the commit date in any format
2660 supported by dateutil.parsedate() and defaults to current date, extra
2681 supported by dateutil.parsedate() and defaults to current date, extra
2661 is a dictionary of metadata or is left empty.
2682 is a dictionary of metadata or is left empty.
2662 """
2683 """
2663
2684
2664 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2685 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2665 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2686 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2666 # this field to determine what to do in filectxfn.
2687 # this field to determine what to do in filectxfn.
2667 _returnnoneformissingfiles = True
2688 _returnnoneformissingfiles = True
2668
2689
2669 def __init__(
2690 def __init__(
2670 self,
2691 self,
2671 repo,
2692 repo,
2672 parents,
2693 parents,
2673 text,
2694 text,
2674 files,
2695 files,
2675 filectxfn,
2696 filectxfn,
2676 user=None,
2697 user=None,
2677 date=None,
2698 date=None,
2678 extra=None,
2699 extra=None,
2679 branch=None,
2700 branch=None,
2680 editor=False,
2701 editor=False,
2681 ):
2702 ):
2682 super(memctx, self).__init__(
2703 super(memctx, self).__init__(
2683 repo, text, user, date, extra, branch=branch
2704 repo, text, user, date, extra, branch=branch
2684 )
2705 )
2685 self._rev = None
2706 self._rev = None
2686 self._node = None
2707 self._node = None
2687 parents = [(p or nullid) for p in parents]
2708 parents = [(p or nullid) for p in parents]
2688 p1, p2 = parents
2709 p1, p2 = parents
2689 self._parents = [self._repo[p] for p in (p1, p2)]
2710 self._parents = [self._repo[p] for p in (p1, p2)]
2690 files = sorted(set(files))
2711 files = sorted(set(files))
2691 self._files = files
2712 self._files = files
2692 self.substate = {}
2713 self.substate = {}
2693
2714
2694 if isinstance(filectxfn, patch.filestore):
2715 if isinstance(filectxfn, patch.filestore):
2695 filectxfn = memfilefrompatch(filectxfn)
2716 filectxfn = memfilefrompatch(filectxfn)
2696 elif not callable(filectxfn):
2717 elif not callable(filectxfn):
2697 # if store is not callable, wrap it in a function
2718 # if store is not callable, wrap it in a function
2698 filectxfn = memfilefromctx(filectxfn)
2719 filectxfn = memfilefromctx(filectxfn)
2699
2720
2700 # memoizing increases performance for e.g. vcs convert scenarios.
2721 # memoizing increases performance for e.g. vcs convert scenarios.
2701 self._filectxfn = makecachingfilectxfn(filectxfn)
2722 self._filectxfn = makecachingfilectxfn(filectxfn)
2702
2723
2703 if editor:
2724 if editor:
2704 self._text = editor(self._repo, self, [])
2725 self._text = editor(self._repo, self, [])
2705 self._repo.savecommitmessage(self._text)
2726 self._repo.savecommitmessage(self._text)
2706
2727
2707 def filectx(self, path, filelog=None):
2728 def filectx(self, path, filelog=None):
2708 """get a file context from the working directory
2729 """get a file context from the working directory
2709
2730
2710 Returns None if file doesn't exist and should be removed."""
2731 Returns None if file doesn't exist and should be removed."""
2711 return self._filectxfn(self._repo, self, path)
2732 return self._filectxfn(self._repo, self, path)
2712
2733
2713 def commit(self):
2734 def commit(self):
2714 """commit context to the repo"""
2735 """commit context to the repo"""
2715 return self._repo.commitctx(self)
2736 return self._repo.commitctx(self)
2716
2737
2717 @propertycache
2738 @propertycache
2718 def _manifest(self):
2739 def _manifest(self):
2719 """generate a manifest based on the return values of filectxfn"""
2740 """generate a manifest based on the return values of filectxfn"""
2720
2741
2721 # keep this simple for now; just worry about p1
2742 # keep this simple for now; just worry about p1
2722 pctx = self._parents[0]
2743 pctx = self._parents[0]
2723 man = pctx.manifest().copy()
2744 man = pctx.manifest().copy()
2724
2745
2725 for f in self._status.modified:
2746 for f in self._status.modified:
2726 man[f] = modifiednodeid
2747 man[f] = modifiednodeid
2727
2748
2728 for f in self._status.added:
2749 for f in self._status.added:
2729 man[f] = addednodeid
2750 man[f] = addednodeid
2730
2751
2731 for f in self._status.removed:
2752 for f in self._status.removed:
2732 if f in man:
2753 if f in man:
2733 del man[f]
2754 del man[f]
2734
2755
2735 return man
2756 return man
2736
2757
2737 @propertycache
2758 @propertycache
2738 def _status(self):
2759 def _status(self):
2739 """Calculate exact status from ``files`` specified at construction
2760 """Calculate exact status from ``files`` specified at construction
2740 """
2761 """
2741 man1 = self.p1().manifest()
2762 man1 = self.p1().manifest()
2742 p2 = self._parents[1]
2763 p2 = self._parents[1]
2743 # "1 < len(self._parents)" can't be used for checking
2764 # "1 < len(self._parents)" can't be used for checking
2744 # existence of the 2nd parent, because "memctx._parents" is
2765 # existence of the 2nd parent, because "memctx._parents" is
2745 # explicitly initialized by the list, of which length is 2.
2766 # explicitly initialized by the list, of which length is 2.
2746 if p2.node() != nullid:
2767 if p2.node() != nullid:
2747 man2 = p2.manifest()
2768 man2 = p2.manifest()
2748 managing = lambda f: f in man1 or f in man2
2769 managing = lambda f: f in man1 or f in man2
2749 else:
2770 else:
2750 managing = lambda f: f in man1
2771 managing = lambda f: f in man1
2751
2772
2752 modified, added, removed = [], [], []
2773 modified, added, removed = [], [], []
2753 for f in self._files:
2774 for f in self._files:
2754 if not managing(f):
2775 if not managing(f):
2755 added.append(f)
2776 added.append(f)
2756 elif self[f]:
2777 elif self[f]:
2757 modified.append(f)
2778 modified.append(f)
2758 else:
2779 else:
2759 removed.append(f)
2780 removed.append(f)
2760
2781
2761 return scmutil.status(modified, added, removed, [], [], [], [])
2782 return scmutil.status(modified, added, removed, [], [], [], [])
2762
2783
2763
2784
2764 class memfilectx(committablefilectx):
2785 class memfilectx(committablefilectx):
2765 """memfilectx represents an in-memory file to commit.
2786 """memfilectx represents an in-memory file to commit.
2766
2787
2767 See memctx and committablefilectx for more details.
2788 See memctx and committablefilectx for more details.
2768 """
2789 """
2769
2790
2770 def __init__(
2791 def __init__(
2771 self,
2792 self,
2772 repo,
2793 repo,
2773 changectx,
2794 changectx,
2774 path,
2795 path,
2775 data,
2796 data,
2776 islink=False,
2797 islink=False,
2777 isexec=False,
2798 isexec=False,
2778 copysource=None,
2799 copysource=None,
2779 ):
2800 ):
2780 """
2801 """
2781 path is the normalized file path relative to repository root.
2802 path is the normalized file path relative to repository root.
2782 data is the file content as a string.
2803 data is the file content as a string.
2783 islink is True if the file is a symbolic link.
2804 islink is True if the file is a symbolic link.
2784 isexec is True if the file is executable.
2805 isexec is True if the file is executable.
2785 copied is the source file path if current file was copied in the
2806 copied is the source file path if current file was copied in the
2786 revision being committed, or None."""
2807 revision being committed, or None."""
2787 super(memfilectx, self).__init__(repo, path, None, changectx)
2808 super(memfilectx, self).__init__(repo, path, None, changectx)
2788 self._data = data
2809 self._data = data
2789 if islink:
2810 if islink:
2790 self._flags = b'l'
2811 self._flags = b'l'
2791 elif isexec:
2812 elif isexec:
2792 self._flags = b'x'
2813 self._flags = b'x'
2793 else:
2814 else:
2794 self._flags = b''
2815 self._flags = b''
2795 self._copysource = copysource
2816 self._copysource = copysource
2796
2817
2797 def copysource(self):
2818 def copysource(self):
2798 return self._copysource
2819 return self._copysource
2799
2820
2800 def cmp(self, fctx):
2821 def cmp(self, fctx):
2801 return self.data() != fctx.data()
2822 return self.data() != fctx.data()
2802
2823
2803 def data(self):
2824 def data(self):
2804 return self._data
2825 return self._data
2805
2826
2806 def remove(self, ignoremissing=False):
2827 def remove(self, ignoremissing=False):
2807 """wraps unlink for a repo's working directory"""
2828 """wraps unlink for a repo's working directory"""
2808 # need to figure out what to do here
2829 # need to figure out what to do here
2809 del self._changectx[self._path]
2830 del self._changectx[self._path]
2810
2831
2811 def write(self, data, flags, **kwargs):
2832 def write(self, data, flags, **kwargs):
2812 """wraps repo.wwrite"""
2833 """wraps repo.wwrite"""
2813 self._data = data
2834 self._data = data
2814
2835
2815
2836
2816 class metadataonlyctx(committablectx):
2837 class metadataonlyctx(committablectx):
2817 """Like memctx but it's reusing the manifest of different commit.
2838 """Like memctx but it's reusing the manifest of different commit.
2818 Intended to be used by lightweight operations that are creating
2839 Intended to be used by lightweight operations that are creating
2819 metadata-only changes.
2840 metadata-only changes.
2820
2841
2821 Revision information is supplied at initialization time. 'repo' is the
2842 Revision information is supplied at initialization time. 'repo' is the
2822 current localrepo, 'ctx' is original revision which manifest we're reuisng
2843 current localrepo, 'ctx' is original revision which manifest we're reuisng
2823 'parents' is a sequence of two parent revisions identifiers (pass None for
2844 'parents' is a sequence of two parent revisions identifiers (pass None for
2824 every missing parent), 'text' is the commit.
2845 every missing parent), 'text' is the commit.
2825
2846
2826 user receives the committer name and defaults to current repository
2847 user receives the committer name and defaults to current repository
2827 username, date is the commit date in any format supported by
2848 username, date is the commit date in any format supported by
2828 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2849 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2829 metadata or is left empty.
2850 metadata or is left empty.
2830 """
2851 """
2831
2852
2832 def __init__(
2853 def __init__(
2833 self,
2854 self,
2834 repo,
2855 repo,
2835 originalctx,
2856 originalctx,
2836 parents=None,
2857 parents=None,
2837 text=None,
2858 text=None,
2838 user=None,
2859 user=None,
2839 date=None,
2860 date=None,
2840 extra=None,
2861 extra=None,
2841 editor=False,
2862 editor=False,
2842 ):
2863 ):
2843 if text is None:
2864 if text is None:
2844 text = originalctx.description()
2865 text = originalctx.description()
2845 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2866 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2846 self._rev = None
2867 self._rev = None
2847 self._node = None
2868 self._node = None
2848 self._originalctx = originalctx
2869 self._originalctx = originalctx
2849 self._manifestnode = originalctx.manifestnode()
2870 self._manifestnode = originalctx.manifestnode()
2850 if parents is None:
2871 if parents is None:
2851 parents = originalctx.parents()
2872 parents = originalctx.parents()
2852 else:
2873 else:
2853 parents = [repo[p] for p in parents if p is not None]
2874 parents = [repo[p] for p in parents if p is not None]
2854 parents = parents[:]
2875 parents = parents[:]
2855 while len(parents) < 2:
2876 while len(parents) < 2:
2856 parents.append(repo[nullid])
2877 parents.append(repo[nullid])
2857 p1, p2 = self._parents = parents
2878 p1, p2 = self._parents = parents
2858
2879
2859 # sanity check to ensure that the reused manifest parents are
2880 # sanity check to ensure that the reused manifest parents are
2860 # manifests of our commit parents
2881 # manifests of our commit parents
2861 mp1, mp2 = self.manifestctx().parents
2882 mp1, mp2 = self.manifestctx().parents
2862 if p1 != nullid and p1.manifestnode() != mp1:
2883 if p1 != nullid and p1.manifestnode() != mp1:
2863 raise RuntimeError(
2884 raise RuntimeError(
2864 r"can't reuse the manifest: its p1 "
2885 r"can't reuse the manifest: its p1 "
2865 r"doesn't match the new ctx p1"
2886 r"doesn't match the new ctx p1"
2866 )
2887 )
2867 if p2 != nullid and p2.manifestnode() != mp2:
2888 if p2 != nullid and p2.manifestnode() != mp2:
2868 raise RuntimeError(
2889 raise RuntimeError(
2869 r"can't reuse the manifest: "
2890 r"can't reuse the manifest: "
2870 r"its p2 doesn't match the new ctx p2"
2891 r"its p2 doesn't match the new ctx p2"
2871 )
2892 )
2872
2893
2873 self._files = originalctx.files()
2894 self._files = originalctx.files()
2874 self.substate = {}
2895 self.substate = {}
2875
2896
2876 if editor:
2897 if editor:
2877 self._text = editor(self._repo, self, [])
2898 self._text = editor(self._repo, self, [])
2878 self._repo.savecommitmessage(self._text)
2899 self._repo.savecommitmessage(self._text)
2879
2900
2880 def manifestnode(self):
2901 def manifestnode(self):
2881 return self._manifestnode
2902 return self._manifestnode
2882
2903
2883 @property
2904 @property
2884 def _manifestctx(self):
2905 def _manifestctx(self):
2885 return self._repo.manifestlog[self._manifestnode]
2906 return self._repo.manifestlog[self._manifestnode]
2886
2907
2887 def filectx(self, path, filelog=None):
2908 def filectx(self, path, filelog=None):
2888 return self._originalctx.filectx(path, filelog=filelog)
2909 return self._originalctx.filectx(path, filelog=filelog)
2889
2910
2890 def commit(self):
2911 def commit(self):
2891 """commit context to the repo"""
2912 """commit context to the repo"""
2892 return self._repo.commitctx(self)
2913 return self._repo.commitctx(self)
2893
2914
2894 @property
2915 @property
2895 def _manifest(self):
2916 def _manifest(self):
2896 return self._originalctx.manifest()
2917 return self._originalctx.manifest()
2897
2918
2898 @propertycache
2919 @propertycache
2899 def _status(self):
2920 def _status(self):
2900 """Calculate exact status from ``files`` specified in the ``origctx``
2921 """Calculate exact status from ``files`` specified in the ``origctx``
2901 and parents manifests.
2922 and parents manifests.
2902 """
2923 """
2903 man1 = self.p1().manifest()
2924 man1 = self.p1().manifest()
2904 p2 = self._parents[1]
2925 p2 = self._parents[1]
2905 # "1 < len(self._parents)" can't be used for checking
2926 # "1 < len(self._parents)" can't be used for checking
2906 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2927 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2907 # explicitly initialized by the list, of which length is 2.
2928 # explicitly initialized by the list, of which length is 2.
2908 if p2.node() != nullid:
2929 if p2.node() != nullid:
2909 man2 = p2.manifest()
2930 man2 = p2.manifest()
2910 managing = lambda f: f in man1 or f in man2
2931 managing = lambda f: f in man1 or f in man2
2911 else:
2932 else:
2912 managing = lambda f: f in man1
2933 managing = lambda f: f in man1
2913
2934
2914 modified, added, removed = [], [], []
2935 modified, added, removed = [], [], []
2915 for f in self._files:
2936 for f in self._files:
2916 if not managing(f):
2937 if not managing(f):
2917 added.append(f)
2938 added.append(f)
2918 elif f in self:
2939 elif f in self:
2919 modified.append(f)
2940 modified.append(f)
2920 else:
2941 else:
2921 removed.append(f)
2942 removed.append(f)
2922
2943
2923 return scmutil.status(modified, added, removed, [], [], [], [])
2944 return scmutil.status(modified, added, removed, [], [], [], [])
2924
2945
2925
2946
2926 class arbitraryfilectx(object):
2947 class arbitraryfilectx(object):
2927 """Allows you to use filectx-like functions on a file in an arbitrary
2948 """Allows you to use filectx-like functions on a file in an arbitrary
2928 location on disk, possibly not in the working directory.
2949 location on disk, possibly not in the working directory.
2929 """
2950 """
2930
2951
2931 def __init__(self, path, repo=None):
2952 def __init__(self, path, repo=None):
2932 # Repo is optional because contrib/simplemerge uses this class.
2953 # Repo is optional because contrib/simplemerge uses this class.
2933 self._repo = repo
2954 self._repo = repo
2934 self._path = path
2955 self._path = path
2935
2956
2936 def cmp(self, fctx):
2957 def cmp(self, fctx):
2937 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2958 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2938 # path if either side is a symlink.
2959 # path if either side is a symlink.
2939 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2960 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2940 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2961 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2941 # Add a fast-path for merge if both sides are disk-backed.
2962 # Add a fast-path for merge if both sides are disk-backed.
2942 # Note that filecmp uses the opposite return values (True if same)
2963 # Note that filecmp uses the opposite return values (True if same)
2943 # from our cmp functions (True if different).
2964 # from our cmp functions (True if different).
2944 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2965 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2945 return self.data() != fctx.data()
2966 return self.data() != fctx.data()
2946
2967
2947 def path(self):
2968 def path(self):
2948 return self._path
2969 return self._path
2949
2970
2950 def flags(self):
2971 def flags(self):
2951 return b''
2972 return b''
2952
2973
2953 def data(self):
2974 def data(self):
2954 return util.readfile(self._path)
2975 return util.readfile(self._path)
2955
2976
2956 def decodeddata(self):
2977 def decodeddata(self):
2957 with open(self._path, b"rb") as f:
2978 with open(self._path, b"rb") as f:
2958 return f.read()
2979 return f.read()
2959
2980
2960 def remove(self):
2981 def remove(self):
2961 util.unlink(self._path)
2982 util.unlink(self._path)
2962
2983
2963 def write(self, data, flags, **kwargs):
2984 def write(self, data, flags, **kwargs):
2964 assert not flags
2985 assert not flags
2965 with open(self._path, b"wb") as f:
2986 with open(self._path, b"wb") as f:
2966 f.write(data)
2987 f.write(data)
@@ -1,879 +1,881 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16 from . import (
16 from . import (
17 match as matchmod,
17 match as matchmod,
18 node,
18 node,
19 pathutil,
19 pathutil,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25
25
26 def _findlimit(repo, ctxa, ctxb):
26 def _findlimit(repo, ctxa, ctxb):
27 """
27 """
28 Find the last revision that needs to be checked to ensure that a full
28 Find the last revision that needs to be checked to ensure that a full
29 transitive closure for file copies can be properly calculated.
29 transitive closure for file copies can be properly calculated.
30 Generally, this means finding the earliest revision number that's an
30 Generally, this means finding the earliest revision number that's an
31 ancestor of a or b but not both, except when a or b is a direct descendent
31 ancestor of a or b but not both, except when a or b is a direct descendent
32 of the other, in which case we can return the minimum revnum of a and b.
32 of the other, in which case we can return the minimum revnum of a and b.
33 """
33 """
34
34
35 # basic idea:
35 # basic idea:
36 # - mark a and b with different sides
36 # - mark a and b with different sides
37 # - if a parent's children are all on the same side, the parent is
37 # - if a parent's children are all on the same side, the parent is
38 # on that side, otherwise it is on no side
38 # on that side, otherwise it is on no side
39 # - walk the graph in topological order with the help of a heap;
39 # - walk the graph in topological order with the help of a heap;
40 # - add unseen parents to side map
40 # - add unseen parents to side map
41 # - clear side of any parent that has children on different sides
41 # - clear side of any parent that has children on different sides
42 # - track number of interesting revs that might still be on a side
42 # - track number of interesting revs that might still be on a side
43 # - track the lowest interesting rev seen
43 # - track the lowest interesting rev seen
44 # - quit when interesting revs is zero
44 # - quit when interesting revs is zero
45
45
46 cl = repo.changelog
46 cl = repo.changelog
47 wdirparents = None
47 wdirparents = None
48 a = ctxa.rev()
48 a = ctxa.rev()
49 b = ctxb.rev()
49 b = ctxb.rev()
50 if a is None:
50 if a is None:
51 wdirparents = (ctxa.p1(), ctxa.p2())
51 wdirparents = (ctxa.p1(), ctxa.p2())
52 a = node.wdirrev
52 a = node.wdirrev
53 if b is None:
53 if b is None:
54 assert not wdirparents
54 assert not wdirparents
55 wdirparents = (ctxb.p1(), ctxb.p2())
55 wdirparents = (ctxb.p1(), ctxb.p2())
56 b = node.wdirrev
56 b = node.wdirrev
57
57
58 side = {a: -1, b: 1}
58 side = {a: -1, b: 1}
59 visit = [-a, -b]
59 visit = [-a, -b]
60 heapq.heapify(visit)
60 heapq.heapify(visit)
61 interesting = len(visit)
61 interesting = len(visit)
62 limit = node.wdirrev
62 limit = node.wdirrev
63
63
64 while interesting:
64 while interesting:
65 r = -(heapq.heappop(visit))
65 r = -(heapq.heappop(visit))
66 if r == node.wdirrev:
66 if r == node.wdirrev:
67 parents = [pctx.rev() for pctx in wdirparents]
67 parents = [pctx.rev() for pctx in wdirparents]
68 else:
68 else:
69 parents = cl.parentrevs(r)
69 parents = cl.parentrevs(r)
70 if parents[1] == node.nullrev:
70 if parents[1] == node.nullrev:
71 parents = parents[:1]
71 parents = parents[:1]
72 for p in parents:
72 for p in parents:
73 if p not in side:
73 if p not in side:
74 # first time we see p; add it to visit
74 # first time we see p; add it to visit
75 side[p] = side[r]
75 side[p] = side[r]
76 if side[p]:
76 if side[p]:
77 interesting += 1
77 interesting += 1
78 heapq.heappush(visit, -p)
78 heapq.heappush(visit, -p)
79 elif side[p] and side[p] != side[r]:
79 elif side[p] and side[p] != side[r]:
80 # p was interesting but now we know better
80 # p was interesting but now we know better
81 side[p] = 0
81 side[p] = 0
82 interesting -= 1
82 interesting -= 1
83 if side[r]:
83 if side[r]:
84 limit = r # lowest rev visited
84 limit = r # lowest rev visited
85 interesting -= 1
85 interesting -= 1
86
86
87 # Consider the following flow (see test-commit-amend.t under issue4405):
87 # Consider the following flow (see test-commit-amend.t under issue4405):
88 # 1/ File 'a0' committed
88 # 1/ File 'a0' committed
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
90 # 3/ Move back to first commit
90 # 3/ Move back to first commit
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
93 #
93 #
94 # During the amend in step five, we will be in this state:
94 # During the amend in step five, we will be in this state:
95 #
95 #
96 # @ 3 temporary amend commit for a1-amend
96 # @ 3 temporary amend commit for a1-amend
97 # |
97 # |
98 # o 2 a1-amend
98 # o 2 a1-amend
99 # |
99 # |
100 # | o 1 a1
100 # | o 1 a1
101 # |/
101 # |/
102 # o 0 a0
102 # o 0 a0
103 #
103 #
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
105 # yet the filelog has the copy information in rev 1 and we will not look
105 # yet the filelog has the copy information in rev 1 and we will not look
106 # back far enough unless we also look at the a and b as candidates.
106 # back far enough unless we also look at the a and b as candidates.
107 # This only occurs when a is a descendent of b or visa-versa.
107 # This only occurs when a is a descendent of b or visa-versa.
108 return min(limit, a, b)
108 return min(limit, a, b)
109
109
110
110
111 def _filter(src, dst, t):
111 def _filter(src, dst, t):
112 """filters out invalid copies after chaining"""
112 """filters out invalid copies after chaining"""
113
113
114 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
114 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
115 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
115 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
116 # in the following table (not including trivial cases). For example, case 2
116 # in the following table (not including trivial cases). For example, case 2
117 # is where a file existed in 'src' and remained under that name in 'mid' and
117 # is where a file existed in 'src' and remained under that name in 'mid' and
118 # then was renamed between 'mid' and 'dst'.
118 # then was renamed between 'mid' and 'dst'.
119 #
119 #
120 # case src mid dst result
120 # case src mid dst result
121 # 1 x y - -
121 # 1 x y - -
122 # 2 x y y x->y
122 # 2 x y y x->y
123 # 3 x y x -
123 # 3 x y x -
124 # 4 x y z x->z
124 # 4 x y z x->z
125 # 5 - x y -
125 # 5 - x y -
126 # 6 x x y x->y
126 # 6 x x y x->y
127 #
127 #
128 # _chain() takes care of chaining the copies in 'a' and 'b', but it
128 # _chain() takes care of chaining the copies in 'a' and 'b', but it
129 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
129 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
130 # between 5 and 6, so it includes all cases in its result.
130 # between 5 and 6, so it includes all cases in its result.
131 # Cases 1, 3, and 5 are then removed by _filter().
131 # Cases 1, 3, and 5 are then removed by _filter().
132
132
133 for k, v in list(t.items()):
133 for k, v in list(t.items()):
134 # remove copies from files that didn't exist
134 # remove copies from files that didn't exist
135 if v not in src:
135 if v not in src:
136 del t[k]
136 del t[k]
137 # remove criss-crossed copies
137 # remove criss-crossed copies
138 elif k in src and v in dst:
138 elif k in src and v in dst:
139 del t[k]
139 del t[k]
140 # remove copies to files that were then removed
140 # remove copies to files that were then removed
141 elif k not in dst:
141 elif k not in dst:
142 del t[k]
142 del t[k]
143
143
144
144
145 def _chain(a, b):
145 def _chain(a, b):
146 """chain two sets of copies 'a' and 'b'"""
146 """chain two sets of copies 'a' and 'b'"""
147 t = a.copy()
147 t = a.copy()
148 for k, v in pycompat.iteritems(b):
148 for k, v in pycompat.iteritems(b):
149 if v in t:
149 if v in t:
150 t[k] = t[v]
150 t[k] = t[v]
151 else:
151 else:
152 t[k] = v
152 t[k] = v
153 return t
153 return t
154
154
155
155
156 def _tracefile(fctx, am, basemf, limit):
156 def _tracefile(fctx, am, basemf, limit):
157 """return file context that is the ancestor of fctx present in ancestor
157 """return file context that is the ancestor of fctx present in ancestor
158 manifest am, stopping after the first ancestor lower than limit"""
158 manifest am, stopping after the first ancestor lower than limit"""
159
159
160 for f in fctx.ancestors():
160 for f in fctx.ancestors():
161 path = f.path()
161 path = f.path()
162 if am.get(path, None) == f.filenode():
162 if am.get(path, None) == f.filenode():
163 return path
163 return path
164 if basemf and basemf.get(path, None) == f.filenode():
164 if basemf and basemf.get(path, None) == f.filenode():
165 return path
165 return path
166 if not f.isintroducedafter(limit):
166 if not f.isintroducedafter(limit):
167 return None
167 return None
168
168
169
169
170 def _dirstatecopies(repo, match=None):
170 def _dirstatecopies(repo, match=None):
171 ds = repo.dirstate
171 ds = repo.dirstate
172 c = ds.copies().copy()
172 c = ds.copies().copy()
173 for k in list(c):
173 for k in list(c):
174 if ds[k] not in b'anm' or (match and not match(k)):
174 if ds[k] not in b'anm' or (match and not match(k)):
175 del c[k]
175 del c[k]
176 return c
176 return c
177
177
178
178
179 def _computeforwardmissing(a, b, match=None):
179 def _computeforwardmissing(a, b, match=None):
180 """Computes which files are in b but not a.
180 """Computes which files are in b but not a.
181 This is its own function so extensions can easily wrap this call to see what
181 This is its own function so extensions can easily wrap this call to see what
182 files _forwardcopies is about to process.
182 files _forwardcopies is about to process.
183 """
183 """
184 ma = a.manifest()
184 ma = a.manifest()
185 mb = b.manifest()
185 mb = b.manifest()
186 return mb.filesnotin(ma, match=match)
186 return mb.filesnotin(ma, match=match)
187
187
188
188
189 def usechangesetcentricalgo(repo):
189 def usechangesetcentricalgo(repo):
190 """Checks if we should use changeset-centric copy algorithms"""
190 """Checks if we should use changeset-centric copy algorithms"""
191 if repo.filecopiesmode == b'changeset-sidedata':
192 return True
191 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
193 readfrom = repo.ui.config(b'experimental', b'copies.read-from')
192 changesetsource = (b'changeset-only', b'compatibility')
194 changesetsource = (b'changeset-only', b'compatibility')
193 return readfrom in changesetsource
195 return readfrom in changesetsource
194
196
195
197
196 def _committedforwardcopies(a, b, base, match):
198 def _committedforwardcopies(a, b, base, match):
197 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
199 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
198 # files might have to be traced back to the fctx parent of the last
200 # files might have to be traced back to the fctx parent of the last
199 # one-side-only changeset, but not further back than that
201 # one-side-only changeset, but not further back than that
200 repo = a._repo
202 repo = a._repo
201
203
202 if usechangesetcentricalgo(repo):
204 if usechangesetcentricalgo(repo):
203 return _changesetforwardcopies(a, b, match)
205 return _changesetforwardcopies(a, b, match)
204
206
205 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
207 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
206 dbg = repo.ui.debug
208 dbg = repo.ui.debug
207 if debug:
209 if debug:
208 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
210 dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
209 limit = _findlimit(repo, a, b)
211 limit = _findlimit(repo, a, b)
210 if debug:
212 if debug:
211 dbg(b'debug.copies: search limit: %d\n' % limit)
213 dbg(b'debug.copies: search limit: %d\n' % limit)
212 am = a.manifest()
214 am = a.manifest()
213 basemf = None if base is None else base.manifest()
215 basemf = None if base is None else base.manifest()
214
216
215 # find where new files came from
217 # find where new files came from
216 # we currently don't try to find where old files went, too expensive
218 # we currently don't try to find where old files went, too expensive
217 # this means we can miss a case like 'hg rm b; hg cp a b'
219 # this means we can miss a case like 'hg rm b; hg cp a b'
218 cm = {}
220 cm = {}
219
221
220 # Computing the forward missing is quite expensive on large manifests, since
222 # Computing the forward missing is quite expensive on large manifests, since
221 # it compares the entire manifests. We can optimize it in the common use
223 # it compares the entire manifests. We can optimize it in the common use
222 # case of computing what copies are in a commit versus its parent (like
224 # case of computing what copies are in a commit versus its parent (like
223 # during a rebase or histedit). Note, we exclude merge commits from this
225 # during a rebase or histedit). Note, we exclude merge commits from this
224 # optimization, since the ctx.files() for a merge commit is not correct for
226 # optimization, since the ctx.files() for a merge commit is not correct for
225 # this comparison.
227 # this comparison.
226 forwardmissingmatch = match
228 forwardmissingmatch = match
227 if b.p1() == a and b.p2().node() == node.nullid:
229 if b.p1() == a and b.p2().node() == node.nullid:
228 filesmatcher = matchmod.exact(b.files())
230 filesmatcher = matchmod.exact(b.files())
229 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
231 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
230 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
232 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
231
233
232 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
234 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
233
235
234 if debug:
236 if debug:
235 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
237 dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
236
238
237 for f in sorted(missing):
239 for f in sorted(missing):
238 if debug:
240 if debug:
239 dbg(b'debug.copies: tracing file: %s\n' % f)
241 dbg(b'debug.copies: tracing file: %s\n' % f)
240 fctx = b[f]
242 fctx = b[f]
241 fctx._ancestrycontext = ancestrycontext
243 fctx._ancestrycontext = ancestrycontext
242
244
243 if debug:
245 if debug:
244 start = util.timer()
246 start = util.timer()
245 opath = _tracefile(fctx, am, basemf, limit)
247 opath = _tracefile(fctx, am, basemf, limit)
246 if opath:
248 if opath:
247 if debug:
249 if debug:
248 dbg(b'debug.copies: rename of: %s\n' % opath)
250 dbg(b'debug.copies: rename of: %s\n' % opath)
249 cm[f] = opath
251 cm[f] = opath
250 if debug:
252 if debug:
251 dbg(
253 dbg(
252 b'debug.copies: time: %f seconds\n'
254 b'debug.copies: time: %f seconds\n'
253 % (util.timer() - start)
255 % (util.timer() - start)
254 )
256 )
255 return cm
257 return cm
256
258
257
259
258 def _changesetforwardcopies(a, b, match):
260 def _changesetforwardcopies(a, b, match):
259 if a.rev() in (node.nullrev, b.rev()):
261 if a.rev() in (node.nullrev, b.rev()):
260 return {}
262 return {}
261
263
262 repo = a.repo()
264 repo = a.repo()
263 children = {}
265 children = {}
264 cl = repo.changelog
266 cl = repo.changelog
265 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
267 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
266 for r in missingrevs:
268 for r in missingrevs:
267 for p in cl.parentrevs(r):
269 for p in cl.parentrevs(r):
268 if p == node.nullrev:
270 if p == node.nullrev:
269 continue
271 continue
270 if p not in children:
272 if p not in children:
271 children[p] = [r]
273 children[p] = [r]
272 else:
274 else:
273 children[p].append(r)
275 children[p].append(r)
274
276
275 roots = set(children) - set(missingrevs)
277 roots = set(children) - set(missingrevs)
276 # 'work' contains 3-tuples of a (revision number, parent number, copies).
278 # 'work' contains 3-tuples of a (revision number, parent number, copies).
277 # The parent number is only used for knowing which parent the copies dict
279 # The parent number is only used for knowing which parent the copies dict
278 # came from.
280 # came from.
279 # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
281 # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
280 # instance for *one* of the child nodes (the last one). Once an instance
282 # instance for *one* of the child nodes (the last one). Once an instance
281 # has been put on the queue, it is thus no longer safe to modify it.
283 # has been put on the queue, it is thus no longer safe to modify it.
282 # Conversely, it *is* safe to modify an instance popped off the queue.
284 # Conversely, it *is* safe to modify an instance popped off the queue.
283 work = [(r, 1, {}) for r in roots]
285 work = [(r, 1, {}) for r in roots]
284 heapq.heapify(work)
286 heapq.heapify(work)
285 alwaysmatch = match.always()
287 alwaysmatch = match.always()
286 while work:
288 while work:
287 r, i1, copies = heapq.heappop(work)
289 r, i1, copies = heapq.heappop(work)
288 if work and work[0][0] == r:
290 if work and work[0][0] == r:
289 # We are tracing copies from both parents
291 # We are tracing copies from both parents
290 r, i2, copies2 = heapq.heappop(work)
292 r, i2, copies2 = heapq.heappop(work)
291 for dst, src in copies2.items():
293 for dst, src in copies2.items():
292 # Unlike when copies are stored in the filelog, we consider
294 # Unlike when copies are stored in the filelog, we consider
293 # it a copy even if the destination already existed on the
295 # it a copy even if the destination already existed on the
294 # other branch. It's simply too expensive to check if the
296 # other branch. It's simply too expensive to check if the
295 # file existed in the manifest.
297 # file existed in the manifest.
296 if dst not in copies:
298 if dst not in copies:
297 # If it was copied on the p1 side, leave it as copied from
299 # If it was copied on the p1 side, leave it as copied from
298 # that side, even if it was also copied on the p2 side.
300 # that side, even if it was also copied on the p2 side.
299 copies[dst] = copies2[dst]
301 copies[dst] = copies2[dst]
300 if r == b.rev():
302 if r == b.rev():
301 return copies
303 return copies
302 for i, c in enumerate(children[r]):
304 for i, c in enumerate(children[r]):
303 childctx = repo[c]
305 childctx = repo[c]
304 if r == childctx.p1().rev():
306 if r == childctx.p1().rev():
305 parent = 1
307 parent = 1
306 childcopies = childctx.p1copies()
308 childcopies = childctx.p1copies()
307 else:
309 else:
308 assert r == childctx.p2().rev()
310 assert r == childctx.p2().rev()
309 parent = 2
311 parent = 2
310 childcopies = childctx.p2copies()
312 childcopies = childctx.p2copies()
311 if not alwaysmatch:
313 if not alwaysmatch:
312 childcopies = {
314 childcopies = {
313 dst: src for dst, src in childcopies.items() if match(dst)
315 dst: src for dst, src in childcopies.items() if match(dst)
314 }
316 }
315 # Copy the dict only if later iterations will also need it
317 # Copy the dict only if later iterations will also need it
316 if i != len(children[r]) - 1:
318 if i != len(children[r]) - 1:
317 newcopies = copies.copy()
319 newcopies = copies.copy()
318 else:
320 else:
319 newcopies = copies
321 newcopies = copies
320 if childcopies:
322 if childcopies:
321 newcopies = _chain(newcopies, childcopies)
323 newcopies = _chain(newcopies, childcopies)
322 for f in childctx.filesremoved():
324 for f in childctx.filesremoved():
323 if f in newcopies:
325 if f in newcopies:
324 del newcopies[f]
326 del newcopies[f]
325 heapq.heappush(work, (c, parent, newcopies))
327 heapq.heappush(work, (c, parent, newcopies))
326 assert False
328 assert False
327
329
328
330
329 def _forwardcopies(a, b, base=None, match=None):
331 def _forwardcopies(a, b, base=None, match=None):
330 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
332 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
331
333
332 if base is None:
334 if base is None:
333 base = a
335 base = a
334 match = a.repo().narrowmatch(match)
336 match = a.repo().narrowmatch(match)
335 # check for working copy
337 # check for working copy
336 if b.rev() is None:
338 if b.rev() is None:
337 cm = _committedforwardcopies(a, b.p1(), base, match)
339 cm = _committedforwardcopies(a, b.p1(), base, match)
338 # combine copies from dirstate if necessary
340 # combine copies from dirstate if necessary
339 copies = _chain(cm, _dirstatecopies(b._repo, match))
341 copies = _chain(cm, _dirstatecopies(b._repo, match))
340 else:
342 else:
341 copies = _committedforwardcopies(a, b, base, match)
343 copies = _committedforwardcopies(a, b, base, match)
342 return copies
344 return copies
343
345
344
346
345 def _backwardrenames(a, b, match):
347 def _backwardrenames(a, b, match):
346 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
348 if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
347 return {}
349 return {}
348
350
349 # Even though we're not taking copies into account, 1:n rename situations
351 # Even though we're not taking copies into account, 1:n rename situations
350 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
352 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
351 # arbitrarily pick one of the renames.
353 # arbitrarily pick one of the renames.
352 # We don't want to pass in "match" here, since that would filter
354 # We don't want to pass in "match" here, since that would filter
353 # the destination by it. Since we're reversing the copies, we want
355 # the destination by it. Since we're reversing the copies, we want
354 # to filter the source instead.
356 # to filter the source instead.
355 f = _forwardcopies(b, a)
357 f = _forwardcopies(b, a)
356 r = {}
358 r = {}
357 for k, v in sorted(pycompat.iteritems(f)):
359 for k, v in sorted(pycompat.iteritems(f)):
358 if match and not match(v):
360 if match and not match(v):
359 continue
361 continue
360 # remove copies
362 # remove copies
361 if v in a:
363 if v in a:
362 continue
364 continue
363 r[v] = k
365 r[v] = k
364 return r
366 return r
365
367
366
368
367 def pathcopies(x, y, match=None):
369 def pathcopies(x, y, match=None):
368 """find {dst@y: src@x} copy mapping for directed compare"""
370 """find {dst@y: src@x} copy mapping for directed compare"""
369 repo = x._repo
371 repo = x._repo
370 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
372 debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
371 if debug:
373 if debug:
372 repo.ui.debug(
374 repo.ui.debug(
373 b'debug.copies: searching copies from %s to %s\n' % (x, y)
375 b'debug.copies: searching copies from %s to %s\n' % (x, y)
374 )
376 )
375 if x == y or not x or not y:
377 if x == y or not x or not y:
376 return {}
378 return {}
377 a = y.ancestor(x)
379 a = y.ancestor(x)
378 if a == x:
380 if a == x:
379 if debug:
381 if debug:
380 repo.ui.debug(b'debug.copies: search mode: forward\n')
382 repo.ui.debug(b'debug.copies: search mode: forward\n')
381 if y.rev() is None and x == y.p1():
383 if y.rev() is None and x == y.p1():
382 # short-circuit to avoid issues with merge states
384 # short-circuit to avoid issues with merge states
383 return _dirstatecopies(repo, match)
385 return _dirstatecopies(repo, match)
384 copies = _forwardcopies(x, y, match=match)
386 copies = _forwardcopies(x, y, match=match)
385 elif a == y:
387 elif a == y:
386 if debug:
388 if debug:
387 repo.ui.debug(b'debug.copies: search mode: backward\n')
389 repo.ui.debug(b'debug.copies: search mode: backward\n')
388 copies = _backwardrenames(x, y, match=match)
390 copies = _backwardrenames(x, y, match=match)
389 else:
391 else:
390 if debug:
392 if debug:
391 repo.ui.debug(b'debug.copies: search mode: combined\n')
393 repo.ui.debug(b'debug.copies: search mode: combined\n')
392 base = None
394 base = None
393 if a.rev() != node.nullrev:
395 if a.rev() != node.nullrev:
394 base = x
396 base = x
395 copies = _chain(
397 copies = _chain(
396 _backwardrenames(x, a, match=match),
398 _backwardrenames(x, a, match=match),
397 _forwardcopies(a, y, base, match=match),
399 _forwardcopies(a, y, base, match=match),
398 )
400 )
399 _filter(x, y, copies)
401 _filter(x, y, copies)
400 return copies
402 return copies
401
403
402
404
403 def mergecopies(repo, c1, c2, base):
405 def mergecopies(repo, c1, c2, base):
404 """
406 """
405 Finds moves and copies between context c1 and c2 that are relevant for
407 Finds moves and copies between context c1 and c2 that are relevant for
406 merging. 'base' will be used as the merge base.
408 merging. 'base' will be used as the merge base.
407
409
408 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
410 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
409 files that were moved/ copied in one merge parent and modified in another.
411 files that were moved/ copied in one merge parent and modified in another.
410 For example:
412 For example:
411
413
412 o ---> 4 another commit
414 o ---> 4 another commit
413 |
415 |
414 | o ---> 3 commit that modifies a.txt
416 | o ---> 3 commit that modifies a.txt
415 | /
417 | /
416 o / ---> 2 commit that moves a.txt to b.txt
418 o / ---> 2 commit that moves a.txt to b.txt
417 |/
419 |/
418 o ---> 1 merge base
420 o ---> 1 merge base
419
421
420 If we try to rebase revision 3 on revision 4, since there is no a.txt in
422 If we try to rebase revision 3 on revision 4, since there is no a.txt in
421 revision 4, and if user have copytrace disabled, we prints the following
423 revision 4, and if user have copytrace disabled, we prints the following
422 message:
424 message:
423
425
424 ```other changed <file> which local deleted```
426 ```other changed <file> which local deleted```
425
427
426 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
428 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
427 "dirmove".
429 "dirmove".
428
430
429 "copy" is a mapping from destination name -> source name,
431 "copy" is a mapping from destination name -> source name,
430 where source is in c1 and destination is in c2 or vice-versa.
432 where source is in c1 and destination is in c2 or vice-versa.
431
433
432 "movewithdir" is a mapping from source name -> destination name,
434 "movewithdir" is a mapping from source name -> destination name,
433 where the file at source present in one context but not the other
435 where the file at source present in one context but not the other
434 needs to be moved to destination by the merge process, because the
436 needs to be moved to destination by the merge process, because the
435 other context moved the directory it is in.
437 other context moved the directory it is in.
436
438
437 "diverge" is a mapping of source name -> list of destination names
439 "diverge" is a mapping of source name -> list of destination names
438 for divergent renames.
440 for divergent renames.
439
441
440 "renamedelete" is a mapping of source name -> list of destination
442 "renamedelete" is a mapping of source name -> list of destination
441 names for files deleted in c1 that were renamed in c2 or vice-versa.
443 names for files deleted in c1 that were renamed in c2 or vice-versa.
442
444
443 "dirmove" is a mapping of detected source dir -> destination dir renames.
445 "dirmove" is a mapping of detected source dir -> destination dir renames.
444 This is needed for handling changes to new files previously grafted into
446 This is needed for handling changes to new files previously grafted into
445 renamed directories.
447 renamed directories.
446
448
447 This function calls different copytracing algorithms based on config.
449 This function calls different copytracing algorithms based on config.
448 """
450 """
449 # avoid silly behavior for update from empty dir
451 # avoid silly behavior for update from empty dir
450 if not c1 or not c2 or c1 == c2:
452 if not c1 or not c2 or c1 == c2:
451 return {}, {}, {}, {}, {}
453 return {}, {}, {}, {}, {}
452
454
453 narrowmatch = c1.repo().narrowmatch()
455 narrowmatch = c1.repo().narrowmatch()
454
456
455 # avoid silly behavior for parent -> working dir
457 # avoid silly behavior for parent -> working dir
456 if c2.node() is None and c1.node() == repo.dirstate.p1():
458 if c2.node() is None and c1.node() == repo.dirstate.p1():
457 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
459 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
458
460
459 copytracing = repo.ui.config(b'experimental', b'copytrace')
461 copytracing = repo.ui.config(b'experimental', b'copytrace')
460 if stringutil.parsebool(copytracing) is False:
462 if stringutil.parsebool(copytracing) is False:
461 # stringutil.parsebool() returns None when it is unable to parse the
463 # stringutil.parsebool() returns None when it is unable to parse the
462 # value, so we should rely on making sure copytracing is on such cases
464 # value, so we should rely on making sure copytracing is on such cases
463 return {}, {}, {}, {}, {}
465 return {}, {}, {}, {}, {}
464
466
465 if usechangesetcentricalgo(repo):
467 if usechangesetcentricalgo(repo):
466 # The heuristics don't make sense when we need changeset-centric algos
468 # The heuristics don't make sense when we need changeset-centric algos
467 return _fullcopytracing(repo, c1, c2, base)
469 return _fullcopytracing(repo, c1, c2, base)
468
470
469 # Copy trace disabling is explicitly below the node == p1 logic above
471 # Copy trace disabling is explicitly below the node == p1 logic above
470 # because the logic above is required for a simple copy to be kept across a
472 # because the logic above is required for a simple copy to be kept across a
471 # rebase.
473 # rebase.
472 if copytracing == b'heuristics':
474 if copytracing == b'heuristics':
473 # Do full copytracing if only non-public revisions are involved as
475 # Do full copytracing if only non-public revisions are involved as
474 # that will be fast enough and will also cover the copies which could
476 # that will be fast enough and will also cover the copies which could
475 # be missed by heuristics
477 # be missed by heuristics
476 if _isfullcopytraceable(repo, c1, base):
478 if _isfullcopytraceable(repo, c1, base):
477 return _fullcopytracing(repo, c1, c2, base)
479 return _fullcopytracing(repo, c1, c2, base)
478 return _heuristicscopytracing(repo, c1, c2, base)
480 return _heuristicscopytracing(repo, c1, c2, base)
479 else:
481 else:
480 return _fullcopytracing(repo, c1, c2, base)
482 return _fullcopytracing(repo, c1, c2, base)
481
483
482
484
483 def _isfullcopytraceable(repo, c1, base):
485 def _isfullcopytraceable(repo, c1, base):
484 """ Checks that if base, source and destination are all no-public branches,
486 """ Checks that if base, source and destination are all no-public branches,
485 if yes let's use the full copytrace algorithm for increased capabilities
487 if yes let's use the full copytrace algorithm for increased capabilities
486 since it will be fast enough.
488 since it will be fast enough.
487
489
488 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
490 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
489 number of changesets from c1 to base such that if number of changesets are
491 number of changesets from c1 to base such that if number of changesets are
490 more than the limit, full copytracing algorithm won't be used.
492 more than the limit, full copytracing algorithm won't be used.
491 """
493 """
492 if c1.rev() is None:
494 if c1.rev() is None:
493 c1 = c1.p1()
495 c1 = c1.p1()
494 if c1.mutable() and base.mutable():
496 if c1.mutable() and base.mutable():
495 sourcecommitlimit = repo.ui.configint(
497 sourcecommitlimit = repo.ui.configint(
496 b'experimental', b'copytrace.sourcecommitlimit'
498 b'experimental', b'copytrace.sourcecommitlimit'
497 )
499 )
498 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
500 commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
499 return commits < sourcecommitlimit
501 return commits < sourcecommitlimit
500 return False
502 return False
501
503
502
504
503 def _checksinglesidecopies(
505 def _checksinglesidecopies(
504 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
506 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
505 ):
507 ):
506 if src not in m2:
508 if src not in m2:
507 # deleted on side 2
509 # deleted on side 2
508 if src not in m1:
510 if src not in m1:
509 # renamed on side 1, deleted on side 2
511 # renamed on side 1, deleted on side 2
510 renamedelete[src] = dsts1
512 renamedelete[src] = dsts1
511 elif m2[src] != mb[src]:
513 elif m2[src] != mb[src]:
512 if not _related(c2[src], base[src]):
514 if not _related(c2[src], base[src]):
513 return
515 return
514 # modified on side 2
516 # modified on side 2
515 for dst in dsts1:
517 for dst in dsts1:
516 if dst not in m2:
518 if dst not in m2:
517 # dst not added on side 2 (handle as regular
519 # dst not added on side 2 (handle as regular
518 # "both created" case in manifestmerge otherwise)
520 # "both created" case in manifestmerge otherwise)
519 copy[dst] = src
521 copy[dst] = src
520
522
521
523
522 def _fullcopytracing(repo, c1, c2, base):
524 def _fullcopytracing(repo, c1, c2, base):
523 """ The full copytracing algorithm which finds all the new files that were
525 """ The full copytracing algorithm which finds all the new files that were
524 added from merge base up to the top commit and for each file it checks if
526 added from merge base up to the top commit and for each file it checks if
525 this file was copied from another file.
527 this file was copied from another file.
526
528
527 This is pretty slow when a lot of changesets are involved but will track all
529 This is pretty slow when a lot of changesets are involved but will track all
528 the copies.
530 the copies.
529 """
531 """
530 m1 = c1.manifest()
532 m1 = c1.manifest()
531 m2 = c2.manifest()
533 m2 = c2.manifest()
532 mb = base.manifest()
534 mb = base.manifest()
533
535
534 copies1 = pathcopies(base, c1)
536 copies1 = pathcopies(base, c1)
535 copies2 = pathcopies(base, c2)
537 copies2 = pathcopies(base, c2)
536
538
537 inversecopies1 = {}
539 inversecopies1 = {}
538 inversecopies2 = {}
540 inversecopies2 = {}
539 for dst, src in copies1.items():
541 for dst, src in copies1.items():
540 inversecopies1.setdefault(src, []).append(dst)
542 inversecopies1.setdefault(src, []).append(dst)
541 for dst, src in copies2.items():
543 for dst, src in copies2.items():
542 inversecopies2.setdefault(src, []).append(dst)
544 inversecopies2.setdefault(src, []).append(dst)
543
545
544 copy = {}
546 copy = {}
545 diverge = {}
547 diverge = {}
546 renamedelete = {}
548 renamedelete = {}
547 allsources = set(inversecopies1) | set(inversecopies2)
549 allsources = set(inversecopies1) | set(inversecopies2)
548 for src in allsources:
550 for src in allsources:
549 dsts1 = inversecopies1.get(src)
551 dsts1 = inversecopies1.get(src)
550 dsts2 = inversecopies2.get(src)
552 dsts2 = inversecopies2.get(src)
551 if dsts1 and dsts2:
553 if dsts1 and dsts2:
552 # copied/renamed on both sides
554 # copied/renamed on both sides
553 if src not in m1 and src not in m2:
555 if src not in m1 and src not in m2:
554 # renamed on both sides
556 # renamed on both sides
555 dsts1 = set(dsts1)
557 dsts1 = set(dsts1)
556 dsts2 = set(dsts2)
558 dsts2 = set(dsts2)
557 # If there's some overlap in the rename destinations, we
559 # If there's some overlap in the rename destinations, we
558 # consider it not divergent. For example, if side 1 copies 'a'
560 # consider it not divergent. For example, if side 1 copies 'a'
559 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
561 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
560 # and 'd' and deletes 'a'.
562 # and 'd' and deletes 'a'.
561 if dsts1 & dsts2:
563 if dsts1 & dsts2:
562 for dst in dsts1 & dsts2:
564 for dst in dsts1 & dsts2:
563 copy[dst] = src
565 copy[dst] = src
564 else:
566 else:
565 diverge[src] = sorted(dsts1 | dsts2)
567 diverge[src] = sorted(dsts1 | dsts2)
566 elif src in m1 and src in m2:
568 elif src in m1 and src in m2:
567 # copied on both sides
569 # copied on both sides
568 dsts1 = set(dsts1)
570 dsts1 = set(dsts1)
569 dsts2 = set(dsts2)
571 dsts2 = set(dsts2)
570 for dst in dsts1 & dsts2:
572 for dst in dsts1 & dsts2:
571 copy[dst] = src
573 copy[dst] = src
572 # TODO: Handle cases where it was renamed on one side and copied
574 # TODO: Handle cases where it was renamed on one side and copied
573 # on the other side
575 # on the other side
574 elif dsts1:
576 elif dsts1:
575 # copied/renamed only on side 1
577 # copied/renamed only on side 1
576 _checksinglesidecopies(
578 _checksinglesidecopies(
577 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
579 src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
578 )
580 )
579 elif dsts2:
581 elif dsts2:
580 # copied/renamed only on side 2
582 # copied/renamed only on side 2
581 _checksinglesidecopies(
583 _checksinglesidecopies(
582 src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
584 src, dsts2, m2, m1, mb, c1, base, copy, renamedelete
583 )
585 )
584
586
585 renamedeleteset = set()
587 renamedeleteset = set()
586 divergeset = set()
588 divergeset = set()
587 for dsts in diverge.values():
589 for dsts in diverge.values():
588 divergeset.update(dsts)
590 divergeset.update(dsts)
589 for dsts in renamedelete.values():
591 for dsts in renamedelete.values():
590 renamedeleteset.update(dsts)
592 renamedeleteset.update(dsts)
591
593
592 # find interesting file sets from manifests
594 # find interesting file sets from manifests
593 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
595 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
594 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
596 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
595 u1 = sorted(addedinm1 - addedinm2)
597 u1 = sorted(addedinm1 - addedinm2)
596 u2 = sorted(addedinm2 - addedinm1)
598 u2 = sorted(addedinm2 - addedinm1)
597
599
598 header = b" unmatched files in %s"
600 header = b" unmatched files in %s"
599 if u1:
601 if u1:
600 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
602 repo.ui.debug(b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1)))
601 if u2:
603 if u2:
602 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
604 repo.ui.debug(b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2)))
603
605
604 fullcopy = copies1.copy()
606 fullcopy = copies1.copy()
605 fullcopy.update(copies2)
607 fullcopy.update(copies2)
606 if not fullcopy:
608 if not fullcopy:
607 return copy, {}, diverge, renamedelete, {}
609 return copy, {}, diverge, renamedelete, {}
608
610
609 if repo.ui.debugflag:
611 if repo.ui.debugflag:
610 repo.ui.debug(
612 repo.ui.debug(
611 b" all copies found (* = to merge, ! = divergent, "
613 b" all copies found (* = to merge, ! = divergent, "
612 b"% = renamed and deleted):\n"
614 b"% = renamed and deleted):\n"
613 )
615 )
614 for f in sorted(fullcopy):
616 for f in sorted(fullcopy):
615 note = b""
617 note = b""
616 if f in copy:
618 if f in copy:
617 note += b"*"
619 note += b"*"
618 if f in divergeset:
620 if f in divergeset:
619 note += b"!"
621 note += b"!"
620 if f in renamedeleteset:
622 if f in renamedeleteset:
621 note += b"%"
623 note += b"%"
622 repo.ui.debug(
624 repo.ui.debug(
623 b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
625 b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)
624 )
626 )
625 del divergeset
627 del divergeset
626
628
627 repo.ui.debug(b" checking for directory renames\n")
629 repo.ui.debug(b" checking for directory renames\n")
628
630
629 # generate a directory move map
631 # generate a directory move map
630 d1, d2 = c1.dirs(), c2.dirs()
632 d1, d2 = c1.dirs(), c2.dirs()
631 invalid = set()
633 invalid = set()
632 dirmove = {}
634 dirmove = {}
633
635
634 # examine each file copy for a potential directory move, which is
636 # examine each file copy for a potential directory move, which is
635 # when all the files in a directory are moved to a new directory
637 # when all the files in a directory are moved to a new directory
636 for dst, src in pycompat.iteritems(fullcopy):
638 for dst, src in pycompat.iteritems(fullcopy):
637 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
639 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
638 if dsrc in invalid:
640 if dsrc in invalid:
639 # already seen to be uninteresting
641 # already seen to be uninteresting
640 continue
642 continue
641 elif dsrc in d1 and ddst in d1:
643 elif dsrc in d1 and ddst in d1:
642 # directory wasn't entirely moved locally
644 # directory wasn't entirely moved locally
643 invalid.add(dsrc)
645 invalid.add(dsrc)
644 elif dsrc in d2 and ddst in d2:
646 elif dsrc in d2 and ddst in d2:
645 # directory wasn't entirely moved remotely
647 # directory wasn't entirely moved remotely
646 invalid.add(dsrc)
648 invalid.add(dsrc)
647 elif dsrc in dirmove and dirmove[dsrc] != ddst:
649 elif dsrc in dirmove and dirmove[dsrc] != ddst:
648 # files from the same directory moved to two different places
650 # files from the same directory moved to two different places
649 invalid.add(dsrc)
651 invalid.add(dsrc)
650 else:
652 else:
651 # looks good so far
653 # looks good so far
652 dirmove[dsrc] = ddst
654 dirmove[dsrc] = ddst
653
655
654 for i in invalid:
656 for i in invalid:
655 if i in dirmove:
657 if i in dirmove:
656 del dirmove[i]
658 del dirmove[i]
657 del d1, d2, invalid
659 del d1, d2, invalid
658
660
659 if not dirmove:
661 if not dirmove:
660 return copy, {}, diverge, renamedelete, {}
662 return copy, {}, diverge, renamedelete, {}
661
663
662 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
664 dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
663
665
664 for d in dirmove:
666 for d in dirmove:
665 repo.ui.debug(
667 repo.ui.debug(
666 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
668 b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
667 )
669 )
668
670
669 movewithdir = {}
671 movewithdir = {}
670 # check unaccounted nonoverlapping files against directory moves
672 # check unaccounted nonoverlapping files against directory moves
671 for f in u1 + u2:
673 for f in u1 + u2:
672 if f not in fullcopy:
674 if f not in fullcopy:
673 for d in dirmove:
675 for d in dirmove:
674 if f.startswith(d):
676 if f.startswith(d):
675 # new file added in a directory that was moved, move it
677 # new file added in a directory that was moved, move it
676 df = dirmove[d] + f[len(d) :]
678 df = dirmove[d] + f[len(d) :]
677 if df not in copy:
679 if df not in copy:
678 movewithdir[f] = df
680 movewithdir[f] = df
679 repo.ui.debug(
681 repo.ui.debug(
680 b" pending file src: '%s' -> dst: '%s'\n"
682 b" pending file src: '%s' -> dst: '%s'\n"
681 % (f, df)
683 % (f, df)
682 )
684 )
683 break
685 break
684
686
685 return copy, movewithdir, diverge, renamedelete, dirmove
687 return copy, movewithdir, diverge, renamedelete, dirmove
686
688
687
689
688 def _heuristicscopytracing(repo, c1, c2, base):
690 def _heuristicscopytracing(repo, c1, c2, base):
689 """ Fast copytracing using filename heuristics
691 """ Fast copytracing using filename heuristics
690
692
691 Assumes that moves or renames are of following two types:
693 Assumes that moves or renames are of following two types:
692
694
693 1) Inside a directory only (same directory name but different filenames)
695 1) Inside a directory only (same directory name but different filenames)
694 2) Move from one directory to another
696 2) Move from one directory to another
695 (same filenames but different directory names)
697 (same filenames but different directory names)
696
698
697 Works only when there are no merge commits in the "source branch".
699 Works only when there are no merge commits in the "source branch".
698 Source branch is commits from base up to c2 not including base.
700 Source branch is commits from base up to c2 not including base.
699
701
700 If merge is involved it fallbacks to _fullcopytracing().
702 If merge is involved it fallbacks to _fullcopytracing().
701
703
702 Can be used by setting the following config:
704 Can be used by setting the following config:
703
705
704 [experimental]
706 [experimental]
705 copytrace = heuristics
707 copytrace = heuristics
706
708
707 In some cases the copy/move candidates found by heuristics can be very large
709 In some cases the copy/move candidates found by heuristics can be very large
708 in number and that will make the algorithm slow. The number of possible
710 in number and that will make the algorithm slow. The number of possible
709 candidates to check can be limited by using the config
711 candidates to check can be limited by using the config
710 `experimental.copytrace.movecandidateslimit` which defaults to 100.
712 `experimental.copytrace.movecandidateslimit` which defaults to 100.
711 """
713 """
712
714
713 if c1.rev() is None:
715 if c1.rev() is None:
714 c1 = c1.p1()
716 c1 = c1.p1()
715 if c2.rev() is None:
717 if c2.rev() is None:
716 c2 = c2.p1()
718 c2 = c2.p1()
717
719
718 copies = {}
720 copies = {}
719
721
720 changedfiles = set()
722 changedfiles = set()
721 m1 = c1.manifest()
723 m1 = c1.manifest()
722 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
724 if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
723 # If base is not in c2 branch, we switch to fullcopytracing
725 # If base is not in c2 branch, we switch to fullcopytracing
724 repo.ui.debug(
726 repo.ui.debug(
725 b"switching to full copytracing as base is not "
727 b"switching to full copytracing as base is not "
726 b"an ancestor of c2\n"
728 b"an ancestor of c2\n"
727 )
729 )
728 return _fullcopytracing(repo, c1, c2, base)
730 return _fullcopytracing(repo, c1, c2, base)
729
731
730 ctx = c2
732 ctx = c2
731 while ctx != base:
733 while ctx != base:
732 if len(ctx.parents()) == 2:
734 if len(ctx.parents()) == 2:
733 # To keep things simple let's not handle merges
735 # To keep things simple let's not handle merges
734 repo.ui.debug(b"switching to full copytracing because of merges\n")
736 repo.ui.debug(b"switching to full copytracing because of merges\n")
735 return _fullcopytracing(repo, c1, c2, base)
737 return _fullcopytracing(repo, c1, c2, base)
736 changedfiles.update(ctx.files())
738 changedfiles.update(ctx.files())
737 ctx = ctx.p1()
739 ctx = ctx.p1()
738
740
739 cp = _forwardcopies(base, c2)
741 cp = _forwardcopies(base, c2)
740 for dst, src in pycompat.iteritems(cp):
742 for dst, src in pycompat.iteritems(cp):
741 if src in m1:
743 if src in m1:
742 copies[dst] = src
744 copies[dst] = src
743
745
744 # file is missing if it isn't present in the destination, but is present in
746 # file is missing if it isn't present in the destination, but is present in
745 # the base and present in the source.
747 # the base and present in the source.
746 # Presence in the base is important to exclude added files, presence in the
748 # Presence in the base is important to exclude added files, presence in the
747 # source is important to exclude removed files.
749 # source is important to exclude removed files.
748 filt = lambda f: f not in m1 and f in base and f in c2
750 filt = lambda f: f not in m1 and f in base and f in c2
749 missingfiles = [f for f in changedfiles if filt(f)]
751 missingfiles = [f for f in changedfiles if filt(f)]
750
752
751 if missingfiles:
753 if missingfiles:
752 basenametofilename = collections.defaultdict(list)
754 basenametofilename = collections.defaultdict(list)
753 dirnametofilename = collections.defaultdict(list)
755 dirnametofilename = collections.defaultdict(list)
754
756
755 for f in m1.filesnotin(base.manifest()):
757 for f in m1.filesnotin(base.manifest()):
756 basename = os.path.basename(f)
758 basename = os.path.basename(f)
757 dirname = os.path.dirname(f)
759 dirname = os.path.dirname(f)
758 basenametofilename[basename].append(f)
760 basenametofilename[basename].append(f)
759 dirnametofilename[dirname].append(f)
761 dirnametofilename[dirname].append(f)
760
762
761 for f in missingfiles:
763 for f in missingfiles:
762 basename = os.path.basename(f)
764 basename = os.path.basename(f)
763 dirname = os.path.dirname(f)
765 dirname = os.path.dirname(f)
764 samebasename = basenametofilename[basename]
766 samebasename = basenametofilename[basename]
765 samedirname = dirnametofilename[dirname]
767 samedirname = dirnametofilename[dirname]
766 movecandidates = samebasename + samedirname
768 movecandidates = samebasename + samedirname
767 # f is guaranteed to be present in c2, that's why
769 # f is guaranteed to be present in c2, that's why
768 # c2.filectx(f) won't fail
770 # c2.filectx(f) won't fail
769 f2 = c2.filectx(f)
771 f2 = c2.filectx(f)
770 # we can have a lot of candidates which can slow down the heuristics
772 # we can have a lot of candidates which can slow down the heuristics
771 # config value to limit the number of candidates moves to check
773 # config value to limit the number of candidates moves to check
772 maxcandidates = repo.ui.configint(
774 maxcandidates = repo.ui.configint(
773 b'experimental', b'copytrace.movecandidateslimit'
775 b'experimental', b'copytrace.movecandidateslimit'
774 )
776 )
775
777
776 if len(movecandidates) > maxcandidates:
778 if len(movecandidates) > maxcandidates:
777 repo.ui.status(
779 repo.ui.status(
778 _(
780 _(
779 b"skipping copytracing for '%s', more "
781 b"skipping copytracing for '%s', more "
780 b"candidates than the limit: %d\n"
782 b"candidates than the limit: %d\n"
781 )
783 )
782 % (f, len(movecandidates))
784 % (f, len(movecandidates))
783 )
785 )
784 continue
786 continue
785
787
786 for candidate in movecandidates:
788 for candidate in movecandidates:
787 f1 = c1.filectx(candidate)
789 f1 = c1.filectx(candidate)
788 if _related(f1, f2):
790 if _related(f1, f2):
789 # if there are a few related copies then we'll merge
791 # if there are a few related copies then we'll merge
790 # changes into all of them. This matches the behaviour
792 # changes into all of them. This matches the behaviour
791 # of upstream copytracing
793 # of upstream copytracing
792 copies[candidate] = f
794 copies[candidate] = f
793
795
794 return copies, {}, {}, {}, {}
796 return copies, {}, {}, {}, {}
795
797
796
798
797 def _related(f1, f2):
799 def _related(f1, f2):
798 """return True if f1 and f2 filectx have a common ancestor
800 """return True if f1 and f2 filectx have a common ancestor
799
801
800 Walk back to common ancestor to see if the two files originate
802 Walk back to common ancestor to see if the two files originate
801 from the same file. Since workingfilectx's rev() is None it messes
803 from the same file. Since workingfilectx's rev() is None it messes
802 up the integer comparison logic, hence the pre-step check for
804 up the integer comparison logic, hence the pre-step check for
803 None (f1 and f2 can only be workingfilectx's initially).
805 None (f1 and f2 can only be workingfilectx's initially).
804 """
806 """
805
807
806 if f1 == f2:
808 if f1 == f2:
807 return True # a match
809 return True # a match
808
810
809 g1, g2 = f1.ancestors(), f2.ancestors()
811 g1, g2 = f1.ancestors(), f2.ancestors()
810 try:
812 try:
811 f1r, f2r = f1.linkrev(), f2.linkrev()
813 f1r, f2r = f1.linkrev(), f2.linkrev()
812
814
813 if f1r is None:
815 if f1r is None:
814 f1 = next(g1)
816 f1 = next(g1)
815 if f2r is None:
817 if f2r is None:
816 f2 = next(g2)
818 f2 = next(g2)
817
819
818 while True:
820 while True:
819 f1r, f2r = f1.linkrev(), f2.linkrev()
821 f1r, f2r = f1.linkrev(), f2.linkrev()
820 if f1r > f2r:
822 if f1r > f2r:
821 f1 = next(g1)
823 f1 = next(g1)
822 elif f2r > f1r:
824 elif f2r > f1r:
823 f2 = next(g2)
825 f2 = next(g2)
824 else: # f1 and f2 point to files in the same linkrev
826 else: # f1 and f2 point to files in the same linkrev
825 return f1 == f2 # true if they point to the same file
827 return f1 == f2 # true if they point to the same file
826 except StopIteration:
828 except StopIteration:
827 return False
829 return False
828
830
829
831
830 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
832 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
831 """reproduce copies from fromrev to rev in the dirstate
833 """reproduce copies from fromrev to rev in the dirstate
832
834
833 If skiprev is specified, it's a revision that should be used to
835 If skiprev is specified, it's a revision that should be used to
834 filter copy records. Any copies that occur between fromrev and
836 filter copy records. Any copies that occur between fromrev and
835 skiprev will not be duplicated, even if they appear in the set of
837 skiprev will not be duplicated, even if they appear in the set of
836 copies between fromrev and rev.
838 copies between fromrev and rev.
837 """
839 """
838 exclude = {}
840 exclude = {}
839 ctraceconfig = repo.ui.config(b'experimental', b'copytrace')
841 ctraceconfig = repo.ui.config(b'experimental', b'copytrace')
840 bctrace = stringutil.parsebool(ctraceconfig)
842 bctrace = stringutil.parsebool(ctraceconfig)
841 if skiprev is not None and (
843 if skiprev is not None and (
842 ctraceconfig == b'heuristics' or bctrace or bctrace is None
844 ctraceconfig == b'heuristics' or bctrace or bctrace is None
843 ):
845 ):
844 # copytrace='off' skips this line, but not the entire function because
846 # copytrace='off' skips this line, but not the entire function because
845 # the line below is O(size of the repo) during a rebase, while the rest
847 # the line below is O(size of the repo) during a rebase, while the rest
846 # of the function is much faster (and is required for carrying copy
848 # of the function is much faster (and is required for carrying copy
847 # metadata across the rebase anyway).
849 # metadata across the rebase anyway).
848 exclude = pathcopies(repo[fromrev], repo[skiprev])
850 exclude = pathcopies(repo[fromrev], repo[skiprev])
849 for dst, src in pycompat.iteritems(pathcopies(repo[fromrev], repo[rev])):
851 for dst, src in pycompat.iteritems(pathcopies(repo[fromrev], repo[rev])):
850 if dst in exclude:
852 if dst in exclude:
851 continue
853 continue
852 if dst in wctx:
854 if dst in wctx:
853 wctx[dst].markcopied(src)
855 wctx[dst].markcopied(src)
854
856
855
857
856 def computechangesetcopies(ctx):
858 def computechangesetcopies(ctx):
857 """return the copies data for a changeset
859 """return the copies data for a changeset
858
860
859 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
861 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
860
862
861 Each dictionnary are in the form: `{newname: oldname}`
863 Each dictionnary are in the form: `{newname: oldname}`
862 """
864 """
863 p1copies = {}
865 p1copies = {}
864 p2copies = {}
866 p2copies = {}
865 p1 = ctx.p1()
867 p1 = ctx.p1()
866 p2 = ctx.p2()
868 p2 = ctx.p2()
867 narrowmatch = ctx._repo.narrowmatch()
869 narrowmatch = ctx._repo.narrowmatch()
868 for dst in ctx.files():
870 for dst in ctx.files():
869 if not narrowmatch(dst) or dst not in ctx:
871 if not narrowmatch(dst) or dst not in ctx:
870 continue
872 continue
871 copied = ctx[dst].renamed()
873 copied = ctx[dst].renamed()
872 if not copied:
874 if not copied:
873 continue
875 continue
874 src, srcnode = copied
876 src, srcnode = copied
875 if src in p1 and p1[src].filenode() == srcnode:
877 if src in p1 and p1[src].filenode() == srcnode:
876 p1copies[dst] = src
878 p1copies[dst] = src
877 elif src in p2 and p2[src].filenode() == srcnode:
879 elif src in p2 and p2[src].filenode() == srcnode:
878 p2copies[dst] = src
880 p2copies[dst] = src
879 return p1copies, p2copies
881 return p1copies, p2copies
@@ -1,396 +1,396 b''
1 #testcases filelog compatibility changeset sidedata
1 #testcases filelog compatibility changeset sidedata
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > rebase=
5 > rebase=
6 > [alias]
6 > [alias]
7 > l = log -G -T '{rev} {desc}\n{files}\n'
7 > l = log -G -T '{rev} {desc}\n{files}\n'
8 > EOF
8 > EOF
9
9
10 #if compatibility
10 #if compatibility
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > copies.read-from = compatibility
13 > copies.read-from = compatibility
14 > EOF
14 > EOF
15 #endif
15 #endif
16
16
17 #if changeset
17 #if changeset
18 $ cat >> $HGRCPATH << EOF
18 $ cat >> $HGRCPATH << EOF
19 > [experimental]
19 > [experimental]
20 > copies.read-from = changeset-only
20 > copies.read-from = changeset-only
21 > copies.write-to = changeset-only
21 > copies.write-to = changeset-only
22 > EOF
22 > EOF
23 #endif
23 #endif
24
24
25 #if sidedata
25 #if sidedata
26 $ cat >> $HGRCPATH << EOF
26 $ cat >> $HGRCPATH << EOF
27 > [format]
27 > [format]
28 > exp-use-copies-side-data-changeset = yes
28 > exp-use-copies-side-data-changeset = yes
29 > EOF
29 > EOF
30 #endif
30 #endif
31
31
32 $ REPONUM=0
32 $ REPONUM=0
33 $ newrepo() {
33 $ newrepo() {
34 > cd $TESTTMP
34 > cd $TESTTMP
35 > REPONUM=`expr $REPONUM + 1`
35 > REPONUM=`expr $REPONUM + 1`
36 > hg init repo-$REPONUM
36 > hg init repo-$REPONUM
37 > cd repo-$REPONUM
37 > cd repo-$REPONUM
38 > }
38 > }
39
39
40 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
40 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
41 $ newrepo
41 $ newrepo
42 $ echo x > x
42 $ echo x > x
43 $ hg ci -Aqm 'add x'
43 $ hg ci -Aqm 'add x'
44 $ echo x2 > x
44 $ echo x2 > x
45 $ hg ci -m 'modify x'
45 $ hg ci -m 'modify x'
46 $ hg co -q 0
46 $ hg co -q 0
47 $ hg cp x y
47 $ hg cp x y
48 $ hg ci -qm 'copy x to y'
48 $ hg ci -qm 'copy x to y'
49 $ hg rm y
49 $ hg rm y
50 $ hg ci -m 'remove y'
50 $ hg ci -m 'remove y'
51 $ hg cp -f x y
51 $ hg cp -f x y
52 $ hg ci -m 'copy x onto y (again)'
52 $ hg ci -m 'copy x onto y (again)'
53 $ hg l
53 $ hg l
54 @ 4 copy x onto y (again)
54 @ 4 copy x onto y (again)
55 | y
55 | y
56 o 3 remove y
56 o 3 remove y
57 | y
57 | y
58 o 2 copy x to y
58 o 2 copy x to y
59 | y
59 | y
60 | o 1 modify x
60 | o 1 modify x
61 |/ x
61 |/ x
62 o 0 add x
62 o 0 add x
63 x
63 x
64 $ hg debugp1copies -r 4
64 $ hg debugp1copies -r 4
65 x -> y
65 x -> y
66 $ hg debugpathcopies 0 4
66 $ hg debugpathcopies 0 4
67 x -> y
67 x -> y
68 $ hg graft -r 1
68 $ hg graft -r 1
69 grafting 1:* "modify x" (glob)
69 grafting 1:* "modify x" (glob)
70 merging y and x to y
70 merging y and x to y
71 $ hg co -qC 1
71 $ hg co -qC 1
72 $ hg graft -r 4
72 $ hg graft -r 4
73 grafting 4:* "copy x onto y (again)" (glob)
73 grafting 4:* "copy x onto y (again)" (glob)
74 merging x and y to y
74 merging x and y to y
75
75
76 Copy x to y, then remove y, then add back y. With copy metadata in the
76 Copy x to y, then remove y, then add back y. With copy metadata in the
77 changeset, this could easily end up reporting y as copied from x (if we don't
77 changeset, this could easily end up reporting y as copied from x (if we don't
78 unmark it as a copy when it's removed). Despite x and y not being related, we
78 unmark it as a copy when it's removed). Despite x and y not being related, we
79 want grafts to propagate across the rename.
79 want grafts to propagate across the rename.
80 $ newrepo
80 $ newrepo
81 $ echo x > x
81 $ echo x > x
82 $ hg ci -Aqm 'add x'
82 $ hg ci -Aqm 'add x'
83 $ echo x2 > x
83 $ echo x2 > x
84 $ hg ci -m 'modify x'
84 $ hg ci -m 'modify x'
85 $ hg co -q 0
85 $ hg co -q 0
86 $ hg mv x y
86 $ hg mv x y
87 $ hg ci -qm 'rename x to y'
87 $ hg ci -qm 'rename x to y'
88 $ hg rm y
88 $ hg rm y
89 $ hg ci -qm 'remove y'
89 $ hg ci -qm 'remove y'
90 $ echo x > y
90 $ echo x > y
91 $ hg ci -Aqm 'add back y'
91 $ hg ci -Aqm 'add back y'
92 $ hg l
92 $ hg l
93 @ 4 add back y
93 @ 4 add back y
94 | y
94 | y
95 o 3 remove y
95 o 3 remove y
96 | y
96 | y
97 o 2 rename x to y
97 o 2 rename x to y
98 | x y
98 | x y
99 | o 1 modify x
99 | o 1 modify x
100 |/ x
100 |/ x
101 o 0 add x
101 o 0 add x
102 x
102 x
103 $ hg debugpathcopies 0 4
103 $ hg debugpathcopies 0 4
104 BROKEN: This should succeed and merge the changes from x into y
104 BROKEN: This should succeed and merge the changes from x into y
105 $ hg graft -r 1
105 $ hg graft -r 1
106 grafting 1:* "modify x" (glob)
106 grafting 1:* "modify x" (glob)
107 file 'x' was deleted in local [local] but was modified in other [graft].
107 file 'x' was deleted in local [local] but was modified in other [graft].
108 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
108 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
109 What do you want to do? u
109 What do you want to do? u
110 abort: unresolved conflicts, can't continue
110 abort: unresolved conflicts, can't continue
111 (use 'hg resolve' and 'hg graft --continue')
111 (use 'hg resolve' and 'hg graft --continue')
112 [255]
112 [255]
113
113
114 Add x, remove it, then add it back, then rename x to y. Similar to the case
114 Add x, remove it, then add it back, then rename x to y. Similar to the case
115 above, but here the break in history is before the rename.
115 above, but here the break in history is before the rename.
116 $ newrepo
116 $ newrepo
117 $ echo x > x
117 $ echo x > x
118 $ hg ci -Aqm 'add x'
118 $ hg ci -Aqm 'add x'
119 $ echo x2 > x
119 $ echo x2 > x
120 $ hg ci -m 'modify x'
120 $ hg ci -m 'modify x'
121 $ hg co -q 0
121 $ hg co -q 0
122 $ hg rm x
122 $ hg rm x
123 $ hg ci -qm 'remove x'
123 $ hg ci -qm 'remove x'
124 $ echo x > x
124 $ echo x > x
125 $ hg ci -Aqm 'add x again'
125 $ hg ci -Aqm 'add x again'
126 $ hg mv x y
126 $ hg mv x y
127 $ hg ci -m 'rename x to y'
127 $ hg ci -m 'rename x to y'
128 $ hg l
128 $ hg l
129 @ 4 rename x to y
129 @ 4 rename x to y
130 | x y
130 | x y
131 o 3 add x again
131 o 3 add x again
132 | x
132 | x
133 o 2 remove x
133 o 2 remove x
134 | x
134 | x
135 | o 1 modify x
135 | o 1 modify x
136 |/ x
136 |/ x
137 o 0 add x
137 o 0 add x
138 x
138 x
139 $ hg debugpathcopies 0 4
139 $ hg debugpathcopies 0 4
140 x -> y
140 x -> y
141 $ hg graft -r 1
141 $ hg graft -r 1
142 grafting 1:* "modify x" (glob)
142 grafting 1:* "modify x" (glob)
143 merging y and x to y
143 merging y and x to y
144 $ hg co -qC 1
144 $ hg co -qC 1
145 $ hg graft -r 4
145 $ hg graft -r 4
146 grafting 4:* "rename x to y" (glob)
146 grafting 4:* "rename x to y" (glob)
147 merging x and y to y
147 merging x and y to y
148
148
149 Add x, modify it, remove it, then add it back, then rename x to y. Similar to
149 Add x, modify it, remove it, then add it back, then rename x to y. Similar to
150 the case above, but here the re-added file's nodeid is different from before
150 the case above, but here the re-added file's nodeid is different from before
151 the break.
151 the break.
152
152
153 $ newrepo
153 $ newrepo
154 $ echo x > x
154 $ echo x > x
155 $ hg ci -Aqm 'add x'
155 $ hg ci -Aqm 'add x'
156 $ echo x2 > x
156 $ echo x2 > x
157 $ hg ci -m 'modify x'
157 $ hg ci -m 'modify x'
158 $ echo x3 > x
158 $ echo x3 > x
159 $ hg ci -qm 'modify x again'
159 $ hg ci -qm 'modify x again'
160 $ hg co -q 1
160 $ hg co -q 1
161 $ hg rm x
161 $ hg rm x
162 $ hg ci -qm 'remove x'
162 $ hg ci -qm 'remove x'
163 # Same content to avoid conflicts
163 # Same content to avoid conflicts
164 $ hg revert -r 1 x
164 $ hg revert -r 1 x
165 $ hg ci -Aqm 'add x again'
165 $ hg ci -Aqm 'add x again'
166 $ hg mv x y
166 $ hg mv x y
167 $ hg ci -m 'rename x to y'
167 $ hg ci -m 'rename x to y'
168 $ hg l
168 $ hg l
169 @ 5 rename x to y
169 @ 5 rename x to y
170 | x y
170 | x y
171 o 4 add x again
171 o 4 add x again
172 | x
172 | x
173 o 3 remove x
173 o 3 remove x
174 | x
174 | x
175 | o 2 modify x again
175 | o 2 modify x again
176 |/ x
176 |/ x
177 o 1 modify x
177 o 1 modify x
178 | x
178 | x
179 o 0 add x
179 o 0 add x
180 x
180 x
181 $ hg debugpathcopies 0 5
181 $ hg debugpathcopies 0 5
182 x -> y (no-filelog no-sidedata !)
182 x -> y (no-filelog !)
183 #if no-filelog no-sidedata
183 #if no-filelog
184 $ hg graft -r 2
184 $ hg graft -r 2
185 grafting 2:* "modify x again" (glob)
185 grafting 2:* "modify x again" (glob)
186 merging y and x to y
186 merging y and x to y
187 #else
187 #else
188 BROKEN: This should succeed and merge the changes from x into y
188 BROKEN: This should succeed and merge the changes from x into y
189 $ hg graft -r 2
189 $ hg graft -r 2
190 grafting 2:* "modify x again" (glob)
190 grafting 2:* "modify x again" (glob)
191 file 'x' was deleted in local [local] but was modified in other [graft].
191 file 'x' was deleted in local [local] but was modified in other [graft].
192 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
192 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
193 What do you want to do? u
193 What do you want to do? u
194 abort: unresolved conflicts, can't continue
194 abort: unresolved conflicts, can't continue
195 (use 'hg resolve' and 'hg graft --continue')
195 (use 'hg resolve' and 'hg graft --continue')
196 [255]
196 [255]
197 #endif
197 #endif
198 $ hg co -qC 2
198 $ hg co -qC 2
199 BROKEN: This should succeed and merge the changes from x into y
199 BROKEN: This should succeed and merge the changes from x into y
200 $ hg graft -r 5
200 $ hg graft -r 5
201 grafting 5:* "rename x to y"* (glob)
201 grafting 5:* "rename x to y"* (glob)
202 file 'x' was deleted in other [graft] but was modified in local [local].
202 file 'x' was deleted in other [graft] but was modified in local [local].
203 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
203 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
204 What do you want to do? u
204 What do you want to do? u
205 abort: unresolved conflicts, can't continue
205 abort: unresolved conflicts, can't continue
206 (use 'hg resolve' and 'hg graft --continue')
206 (use 'hg resolve' and 'hg graft --continue')
207 [255]
207 [255]
208
208
209 Add x, remove it, then add it back, rename x to y from the first commit.
209 Add x, remove it, then add it back, rename x to y from the first commit.
210 Similar to the case above, but here the break in history is parallel to the
210 Similar to the case above, but here the break in history is parallel to the
211 rename.
211 rename.
212 $ newrepo
212 $ newrepo
213 $ echo x > x
213 $ echo x > x
214 $ hg ci -Aqm 'add x'
214 $ hg ci -Aqm 'add x'
215 $ hg rm x
215 $ hg rm x
216 $ hg ci -qm 'remove x'
216 $ hg ci -qm 'remove x'
217 $ echo x > x
217 $ echo x > x
218 $ hg ci -Aqm 'add x again'
218 $ hg ci -Aqm 'add x again'
219 $ echo x2 > x
219 $ echo x2 > x
220 $ hg ci -m 'modify x'
220 $ hg ci -m 'modify x'
221 $ hg co -q 0
221 $ hg co -q 0
222 $ hg mv x y
222 $ hg mv x y
223 $ hg ci -qm 'rename x to y'
223 $ hg ci -qm 'rename x to y'
224 $ hg l
224 $ hg l
225 @ 4 rename x to y
225 @ 4 rename x to y
226 | x y
226 | x y
227 | o 3 modify x
227 | o 3 modify x
228 | | x
228 | | x
229 | o 2 add x again
229 | o 2 add x again
230 | | x
230 | | x
231 | o 1 remove x
231 | o 1 remove x
232 |/ x
232 |/ x
233 o 0 add x
233 o 0 add x
234 x
234 x
235 $ hg debugpathcopies 2 4
235 $ hg debugpathcopies 2 4
236 x -> y
236 x -> y
237 $ hg graft -r 3
237 $ hg graft -r 3
238 grafting 3:* "modify x" (glob)
238 grafting 3:* "modify x" (glob)
239 merging y and x to y
239 merging y and x to y
240 $ hg co -qC 3
240 $ hg co -qC 3
241 $ hg graft -r 4
241 $ hg graft -r 4
242 grafting 4:* "rename x to y" (glob)
242 grafting 4:* "rename x to y" (glob)
243 merging x and y to y
243 merging x and y to y
244
244
245 Add x, remove it, then add it back, rename x to y from the first commit.
245 Add x, remove it, then add it back, rename x to y from the first commit.
246 Similar to the case above, but here the re-added file's nodeid is different
246 Similar to the case above, but here the re-added file's nodeid is different
247 from the base.
247 from the base.
248 $ newrepo
248 $ newrepo
249 $ echo x > x
249 $ echo x > x
250 $ hg ci -Aqm 'add x'
250 $ hg ci -Aqm 'add x'
251 $ hg rm x
251 $ hg rm x
252 $ hg ci -qm 'remove x'
252 $ hg ci -qm 'remove x'
253 $ echo x2 > x
253 $ echo x2 > x
254 $ hg ci -Aqm 'add x again with different content'
254 $ hg ci -Aqm 'add x again with different content'
255 $ hg co -q 0
255 $ hg co -q 0
256 $ hg mv x y
256 $ hg mv x y
257 $ hg ci -qm 'rename x to y'
257 $ hg ci -qm 'rename x to y'
258 $ hg l
258 $ hg l
259 @ 3 rename x to y
259 @ 3 rename x to y
260 | x y
260 | x y
261 | o 2 add x again with different content
261 | o 2 add x again with different content
262 | | x
262 | | x
263 | o 1 remove x
263 | o 1 remove x
264 |/ x
264 |/ x
265 o 0 add x
265 o 0 add x
266 x
266 x
267 $ hg debugpathcopies 2 3
267 $ hg debugpathcopies 2 3
268 x -> y
268 x -> y
269 BROKEN: This should merge the changes from x into y
269 BROKEN: This should merge the changes from x into y
270 $ hg graft -r 2
270 $ hg graft -r 2
271 grafting 2:* "add x again with different content" (glob)
271 grafting 2:* "add x again with different content" (glob)
272 $ hg co -qC 2
272 $ hg co -qC 2
273 BROKEN: This should succeed and merge the changes from x into y
273 BROKEN: This should succeed and merge the changes from x into y
274 $ hg graft -r 3
274 $ hg graft -r 3
275 grafting 3:* "rename x to y" (glob)
275 grafting 3:* "rename x to y" (glob)
276 file 'x' was deleted in other [graft] but was modified in local [local].
276 file 'x' was deleted in other [graft] but was modified in local [local].
277 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
277 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
278 What do you want to do? u
278 What do you want to do? u
279 abort: unresolved conflicts, can't continue
279 abort: unresolved conflicts, can't continue
280 (use 'hg resolve' and 'hg graft --continue')
280 (use 'hg resolve' and 'hg graft --continue')
281 [255]
281 [255]
282
282
283 Add x on two branches, then rename x to y on one side. Similar to the case
283 Add x on two branches, then rename x to y on one side. Similar to the case
284 above, but here the break in history is via the base commit.
284 above, but here the break in history is via the base commit.
285 $ newrepo
285 $ newrepo
286 $ echo a > a
286 $ echo a > a
287 $ hg ci -Aqm 'base'
287 $ hg ci -Aqm 'base'
288 $ echo x > x
288 $ echo x > x
289 $ hg ci -Aqm 'add x'
289 $ hg ci -Aqm 'add x'
290 $ echo x2 > x
290 $ echo x2 > x
291 $ hg ci -m 'modify x'
291 $ hg ci -m 'modify x'
292 $ hg co -q 0
292 $ hg co -q 0
293 $ echo x > x
293 $ echo x > x
294 $ hg ci -Aqm 'add x again'
294 $ hg ci -Aqm 'add x again'
295 $ hg mv x y
295 $ hg mv x y
296 $ hg ci -qm 'rename x to y'
296 $ hg ci -qm 'rename x to y'
297 $ hg l
297 $ hg l
298 @ 4 rename x to y
298 @ 4 rename x to y
299 | x y
299 | x y
300 o 3 add x again
300 o 3 add x again
301 | x
301 | x
302 | o 2 modify x
302 | o 2 modify x
303 | | x
303 | | x
304 | o 1 add x
304 | o 1 add x
305 |/ x
305 |/ x
306 o 0 base
306 o 0 base
307 a
307 a
308 $ hg debugpathcopies 1 4
308 $ hg debugpathcopies 1 4
309 x -> y
309 x -> y
310 $ hg graft -r 2
310 $ hg graft -r 2
311 grafting 2:* "modify x" (glob)
311 grafting 2:* "modify x" (glob)
312 merging y and x to y
312 merging y and x to y
313 $ hg co -qC 2
313 $ hg co -qC 2
314 $ hg graft -r 4
314 $ hg graft -r 4
315 grafting 4:* "rename x to y"* (glob)
315 grafting 4:* "rename x to y"* (glob)
316 merging x and y to y
316 merging x and y to y
317
317
318 Add x on two branches, with same content but different history, then rename x
318 Add x on two branches, with same content but different history, then rename x
319 to y on one side. Similar to the case above, here the file's nodeid is
319 to y on one side. Similar to the case above, here the file's nodeid is
320 different between the branches.
320 different between the branches.
321 $ newrepo
321 $ newrepo
322 $ echo a > a
322 $ echo a > a
323 $ hg ci -Aqm 'base'
323 $ hg ci -Aqm 'base'
324 $ echo x > x
324 $ echo x > x
325 $ hg ci -Aqm 'add x'
325 $ hg ci -Aqm 'add x'
326 $ echo x2 > x
326 $ echo x2 > x
327 $ hg ci -m 'modify x'
327 $ hg ci -m 'modify x'
328 $ hg co -q 0
328 $ hg co -q 0
329 $ touch x
329 $ touch x
330 $ hg ci -Aqm 'add empty x'
330 $ hg ci -Aqm 'add empty x'
331 # Same content to avoid conflicts
331 # Same content to avoid conflicts
332 $ hg revert -r 1 x
332 $ hg revert -r 1 x
333 $ hg ci -m 'modify x to match commit 1'
333 $ hg ci -m 'modify x to match commit 1'
334 $ hg mv x y
334 $ hg mv x y
335 $ hg ci -qm 'rename x to y'
335 $ hg ci -qm 'rename x to y'
336 $ hg l
336 $ hg l
337 @ 5 rename x to y
337 @ 5 rename x to y
338 | x y
338 | x y
339 o 4 modify x to match commit 1
339 o 4 modify x to match commit 1
340 | x
340 | x
341 o 3 add empty x
341 o 3 add empty x
342 | x
342 | x
343 | o 2 modify x
343 | o 2 modify x
344 | | x
344 | | x
345 | o 1 add x
345 | o 1 add x
346 |/ x
346 |/ x
347 o 0 base
347 o 0 base
348 a
348 a
349 $ hg debugpathcopies 1 5
349 $ hg debugpathcopies 1 5
350 x -> y (no-filelog no-sidedata !)
350 x -> y (no-filelog !)
351 #if no-filelog no-sidedata
351 #if no-filelog
352 $ hg graft -r 2
352 $ hg graft -r 2
353 grafting 2:* "modify x" (glob)
353 grafting 2:* "modify x" (glob)
354 merging y and x to y
354 merging y and x to y
355 #else
355 #else
356 BROKEN: This should succeed and merge the changes from x into y
356 BROKEN: This should succeed and merge the changes from x into y
357 $ hg graft -r 2
357 $ hg graft -r 2
358 grafting 2:* "modify x" (glob)
358 grafting 2:* "modify x" (glob)
359 file 'x' was deleted in local [local] but was modified in other [graft].
359 file 'x' was deleted in local [local] but was modified in other [graft].
360 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
360 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
361 What do you want to do? u
361 What do you want to do? u
362 abort: unresolved conflicts, can't continue
362 abort: unresolved conflicts, can't continue
363 (use 'hg resolve' and 'hg graft --continue')
363 (use 'hg resolve' and 'hg graft --continue')
364 [255]
364 [255]
365 #endif
365 #endif
366 $ hg co -qC 2
366 $ hg co -qC 2
367 BROKEN: This should succeed and merge the changes from x into y
367 BROKEN: This should succeed and merge the changes from x into y
368 $ hg graft -r 5
368 $ hg graft -r 5
369 grafting 5:* "rename x to y"* (glob)
369 grafting 5:* "rename x to y"* (glob)
370 file 'x' was deleted in other [graft] but was modified in local [local].
370 file 'x' was deleted in other [graft] but was modified in local [local].
371 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
371 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
372 What do you want to do? u
372 What do you want to do? u
373 abort: unresolved conflicts, can't continue
373 abort: unresolved conflicts, can't continue
374 (use 'hg resolve' and 'hg graft --continue')
374 (use 'hg resolve' and 'hg graft --continue')
375 [255]
375 [255]
376
376
377 Copies via null revision (there shouldn't be any)
377 Copies via null revision (there shouldn't be any)
378 $ newrepo
378 $ newrepo
379 $ echo x > x
379 $ echo x > x
380 $ hg ci -Aqm 'add x'
380 $ hg ci -Aqm 'add x'
381 $ hg cp x y
381 $ hg cp x y
382 $ hg ci -m 'copy x to y'
382 $ hg ci -m 'copy x to y'
383 $ hg co -q null
383 $ hg co -q null
384 $ echo x > x
384 $ echo x > x
385 $ hg ci -Aqm 'add x (again)'
385 $ hg ci -Aqm 'add x (again)'
386 $ hg l
386 $ hg l
387 @ 2 add x (again)
387 @ 2 add x (again)
388 x
388 x
389 o 1 copy x to y
389 o 1 copy x to y
390 | y
390 | y
391 o 0 add x
391 o 0 add x
392 x
392 x
393 $ hg debugpathcopies 1 2
393 $ hg debugpathcopies 1 2
394 $ hg debugpathcopies 2 1
394 $ hg debugpathcopies 2 1
395 $ hg graft -r 1
395 $ hg graft -r 1
396 grafting 1:* "copy x to y" (glob)
396 grafting 1:* "copy x to y" (glob)
@@ -1,607 +1,606 b''
1 #testcases filelog compatibility changeset sidedata
1 #testcases filelog compatibility changeset sidedata
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > rebase=
5 > rebase=
6 > [alias]
6 > [alias]
7 > l = log -G -T '{rev} {desc}\n{files}\n'
7 > l = log -G -T '{rev} {desc}\n{files}\n'
8 > EOF
8 > EOF
9
9
10 #if compatibility
10 #if compatibility
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > copies.read-from = compatibility
13 > copies.read-from = compatibility
14 > EOF
14 > EOF
15 #endif
15 #endif
16
16
17 #if changeset
17 #if changeset
18 $ cat >> $HGRCPATH << EOF
18 $ cat >> $HGRCPATH << EOF
19 > [experimental]
19 > [experimental]
20 > copies.read-from = changeset-only
20 > copies.read-from = changeset-only
21 > copies.write-to = changeset-only
21 > copies.write-to = changeset-only
22 > EOF
22 > EOF
23 #endif
23 #endif
24
24
25 #if sidedata
25 #if sidedata
26 $ cat >> $HGRCPATH << EOF
26 $ cat >> $HGRCPATH << EOF
27 > [format]
27 > [format]
28 > exp-use-copies-side-data-changeset = yes
28 > exp-use-copies-side-data-changeset = yes
29 > EOF
29 > EOF
30 #endif
30 #endif
31
31
32 $ REPONUM=0
32 $ REPONUM=0
33 $ newrepo() {
33 $ newrepo() {
34 > cd $TESTTMP
34 > cd $TESTTMP
35 > REPONUM=`expr $REPONUM + 1`
35 > REPONUM=`expr $REPONUM + 1`
36 > hg init repo-$REPONUM
36 > hg init repo-$REPONUM
37 > cd repo-$REPONUM
37 > cd repo-$REPONUM
38 > }
38 > }
39
39
40 Simple rename case
40 Simple rename case
41 $ newrepo
41 $ newrepo
42 $ echo x > x
42 $ echo x > x
43 $ hg ci -Aqm 'add x'
43 $ hg ci -Aqm 'add x'
44 $ hg mv x y
44 $ hg mv x y
45 $ hg debugp1copies
45 $ hg debugp1copies
46 x -> y
46 x -> y
47 $ hg debugp2copies
47 $ hg debugp2copies
48 $ hg ci -m 'rename x to y'
48 $ hg ci -m 'rename x to y'
49 $ hg l
49 $ hg l
50 @ 1 rename x to y
50 @ 1 rename x to y
51 | x y
51 | x y
52 o 0 add x
52 o 0 add x
53 x
53 x
54 $ hg debugp1copies -r 1
54 $ hg debugp1copies -r 1
55 x -> y
55 x -> y
56 $ hg debugpathcopies 0 1
56 $ hg debugpathcopies 0 1
57 x -> y
57 x -> y
58 $ hg debugpathcopies 1 0
58 $ hg debugpathcopies 1 0
59 y -> x
59 y -> x
60 Test filtering copies by path. We do filtering by destination.
60 Test filtering copies by path. We do filtering by destination.
61 $ hg debugpathcopies 0 1 x
61 $ hg debugpathcopies 0 1 x
62 $ hg debugpathcopies 1 0 x
62 $ hg debugpathcopies 1 0 x
63 y -> x
63 y -> x
64 $ hg debugpathcopies 0 1 y
64 $ hg debugpathcopies 0 1 y
65 x -> y
65 x -> y
66 $ hg debugpathcopies 1 0 y
66 $ hg debugpathcopies 1 0 y
67
67
68 Copies not including commit changes
68 Copies not including commit changes
69 $ newrepo
69 $ newrepo
70 $ echo x > x
70 $ echo x > x
71 $ hg ci -Aqm 'add x'
71 $ hg ci -Aqm 'add x'
72 $ hg mv x y
72 $ hg mv x y
73 $ hg debugpathcopies . .
73 $ hg debugpathcopies . .
74 $ hg debugpathcopies . 'wdir()'
74 $ hg debugpathcopies . 'wdir()'
75 x -> y
75 x -> y
76 $ hg debugpathcopies 'wdir()' .
76 $ hg debugpathcopies 'wdir()' .
77 y -> x
77 y -> x
78
78
79 Copy a file onto another file
79 Copy a file onto another file
80 $ newrepo
80 $ newrepo
81 $ echo x > x
81 $ echo x > x
82 $ echo y > y
82 $ echo y > y
83 $ hg ci -Aqm 'add x and y'
83 $ hg ci -Aqm 'add x and y'
84 $ hg cp -f x y
84 $ hg cp -f x y
85 $ hg debugp1copies
85 $ hg debugp1copies
86 x -> y
86 x -> y
87 $ hg debugp2copies
87 $ hg debugp2copies
88 $ hg ci -m 'copy x onto y'
88 $ hg ci -m 'copy x onto y'
89 $ hg l
89 $ hg l
90 @ 1 copy x onto y
90 @ 1 copy x onto y
91 | y
91 | y
92 o 0 add x and y
92 o 0 add x and y
93 x y
93 x y
94 $ hg debugp1copies -r 1
94 $ hg debugp1copies -r 1
95 x -> y
95 x -> y
96 Incorrectly doesn't show the rename
96 Incorrectly doesn't show the rename
97 $ hg debugpathcopies 0 1
97 $ hg debugpathcopies 0 1
98
98
99 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
99 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
100 produce a new filelog entry. The changeset's "files" entry should still list the file.
100 produce a new filelog entry. The changeset's "files" entry should still list the file.
101 $ newrepo
101 $ newrepo
102 $ echo x > x
102 $ echo x > x
103 $ echo x > x2
103 $ echo x > x2
104 $ hg ci -Aqm 'add x and x2 with same content'
104 $ hg ci -Aqm 'add x and x2 with same content'
105 $ hg cp -f x x2
105 $ hg cp -f x x2
106 $ hg ci -m 'copy x onto x2'
106 $ hg ci -m 'copy x onto x2'
107 $ hg l
107 $ hg l
108 @ 1 copy x onto x2
108 @ 1 copy x onto x2
109 | x2
109 | x2
110 o 0 add x and x2 with same content
110 o 0 add x and x2 with same content
111 x x2
111 x x2
112 $ hg debugp1copies -r 1
112 $ hg debugp1copies -r 1
113 x -> x2
113 x -> x2
114 Incorrectly doesn't show the rename
114 Incorrectly doesn't show the rename
115 $ hg debugpathcopies 0 1
115 $ hg debugpathcopies 0 1
116
116
117 Rename file in a loop: x->y->z->x
117 Rename file in a loop: x->y->z->x
118 $ newrepo
118 $ newrepo
119 $ echo x > x
119 $ echo x > x
120 $ hg ci -Aqm 'add x'
120 $ hg ci -Aqm 'add x'
121 $ hg mv x y
121 $ hg mv x y
122 $ hg debugp1copies
122 $ hg debugp1copies
123 x -> y
123 x -> y
124 $ hg debugp2copies
124 $ hg debugp2copies
125 $ hg ci -m 'rename x to y'
125 $ hg ci -m 'rename x to y'
126 $ hg mv y z
126 $ hg mv y z
127 $ hg ci -m 'rename y to z'
127 $ hg ci -m 'rename y to z'
128 $ hg mv z x
128 $ hg mv z x
129 $ hg ci -m 'rename z to x'
129 $ hg ci -m 'rename z to x'
130 $ hg l
130 $ hg l
131 @ 3 rename z to x
131 @ 3 rename z to x
132 | x z
132 | x z
133 o 2 rename y to z
133 o 2 rename y to z
134 | y z
134 | y z
135 o 1 rename x to y
135 o 1 rename x to y
136 | x y
136 | x y
137 o 0 add x
137 o 0 add x
138 x
138 x
139 $ hg debugpathcopies 0 3
139 $ hg debugpathcopies 0 3
140
140
141 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
141 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
142 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
142 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
143 to the first commit that added the file. We should still report the copy as being from x2.
143 to the first commit that added the file. We should still report the copy as being from x2.
144 $ newrepo
144 $ newrepo
145 $ echo x > x
145 $ echo x > x
146 $ echo x > x2
146 $ echo x > x2
147 $ hg ci -Aqm 'add x and x2 with same content'
147 $ hg ci -Aqm 'add x and x2 with same content'
148 $ hg cp x z
148 $ hg cp x z
149 $ hg ci -qm 'copy x to z'
149 $ hg ci -qm 'copy x to z'
150 $ hg rm z
150 $ hg rm z
151 $ hg ci -m 'remove z'
151 $ hg ci -m 'remove z'
152 $ hg cp x2 z
152 $ hg cp x2 z
153 $ hg ci -m 'copy x2 to z'
153 $ hg ci -m 'copy x2 to z'
154 $ hg l
154 $ hg l
155 @ 3 copy x2 to z
155 @ 3 copy x2 to z
156 | z
156 | z
157 o 2 remove z
157 o 2 remove z
158 | z
158 | z
159 o 1 copy x to z
159 o 1 copy x to z
160 | z
160 | z
161 o 0 add x and x2 with same content
161 o 0 add x and x2 with same content
162 x x2
162 x x2
163 $ hg debugp1copies -r 3
163 $ hg debugp1copies -r 3
164 x2 -> z
164 x2 -> z
165 $ hg debugpathcopies 0 3
165 $ hg debugpathcopies 0 3
166 x2 -> z
166 x2 -> z
167
167
168 Create x and y, then rename them both to the same name, but on different sides of a fork
168 Create x and y, then rename them both to the same name, but on different sides of a fork
169 $ newrepo
169 $ newrepo
170 $ echo x > x
170 $ echo x > x
171 $ echo y > y
171 $ echo y > y
172 $ hg ci -Aqm 'add x and y'
172 $ hg ci -Aqm 'add x and y'
173 $ hg mv x z
173 $ hg mv x z
174 $ hg ci -qm 'rename x to z'
174 $ hg ci -qm 'rename x to z'
175 $ hg co -q 0
175 $ hg co -q 0
176 $ hg mv y z
176 $ hg mv y z
177 $ hg ci -qm 'rename y to z'
177 $ hg ci -qm 'rename y to z'
178 $ hg l
178 $ hg l
179 @ 2 rename y to z
179 @ 2 rename y to z
180 | y z
180 | y z
181 | o 1 rename x to z
181 | o 1 rename x to z
182 |/ x z
182 |/ x z
183 o 0 add x and y
183 o 0 add x and y
184 x y
184 x y
185 $ hg debugpathcopies 1 2
185 $ hg debugpathcopies 1 2
186 z -> x
186 z -> x
187 y -> z
187 y -> z
188
188
189 Fork renames x to y on one side and removes x on the other
189 Fork renames x to y on one side and removes x on the other
190 $ newrepo
190 $ newrepo
191 $ echo x > x
191 $ echo x > x
192 $ hg ci -Aqm 'add x'
192 $ hg ci -Aqm 'add x'
193 $ hg mv x y
193 $ hg mv x y
194 $ hg ci -m 'rename x to y'
194 $ hg ci -m 'rename x to y'
195 $ hg co -q 0
195 $ hg co -q 0
196 $ hg rm x
196 $ hg rm x
197 $ hg ci -m 'remove x'
197 $ hg ci -m 'remove x'
198 created new head
198 created new head
199 $ hg l
199 $ hg l
200 @ 2 remove x
200 @ 2 remove x
201 | x
201 | x
202 | o 1 rename x to y
202 | o 1 rename x to y
203 |/ x y
203 |/ x y
204 o 0 add x
204 o 0 add x
205 x
205 x
206 $ hg debugpathcopies 1 2
206 $ hg debugpathcopies 1 2
207
207
208 Merge rename from other branch
208 Merge rename from other branch
209 $ newrepo
209 $ newrepo
210 $ echo x > x
210 $ echo x > x
211 $ hg ci -Aqm 'add x'
211 $ hg ci -Aqm 'add x'
212 $ hg mv x y
212 $ hg mv x y
213 $ hg ci -m 'rename x to y'
213 $ hg ci -m 'rename x to y'
214 $ hg co -q 0
214 $ hg co -q 0
215 $ echo z > z
215 $ echo z > z
216 $ hg ci -Aqm 'add z'
216 $ hg ci -Aqm 'add z'
217 $ hg merge -q 1
217 $ hg merge -q 1
218 $ hg debugp1copies
218 $ hg debugp1copies
219 $ hg debugp2copies
219 $ hg debugp2copies
220 $ hg ci -m 'merge rename from p2'
220 $ hg ci -m 'merge rename from p2'
221 $ hg l
221 $ hg l
222 @ 3 merge rename from p2
222 @ 3 merge rename from p2
223 |\
223 |\
224 | o 2 add z
224 | o 2 add z
225 | | z
225 | | z
226 o | 1 rename x to y
226 o | 1 rename x to y
227 |/ x y
227 |/ x y
228 o 0 add x
228 o 0 add x
229 x
229 x
230 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
230 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
231 merges, so...
231 merges, so...
232 $ hg debugp1copies -r 3
232 $ hg debugp1copies -r 3
233 $ hg debugp2copies -r 3
233 $ hg debugp2copies -r 3
234 $ hg debugpathcopies 0 3
234 $ hg debugpathcopies 0 3
235 x -> y
235 x -> y
236 $ hg debugpathcopies 1 2
236 $ hg debugpathcopies 1 2
237 y -> x
237 y -> x
238 $ hg debugpathcopies 1 3
238 $ hg debugpathcopies 1 3
239 $ hg debugpathcopies 2 3
239 $ hg debugpathcopies 2 3
240 x -> y
240 x -> y
241
241
242 Copy file from either side in a merge
242 Copy file from either side in a merge
243 $ newrepo
243 $ newrepo
244 $ echo x > x
244 $ echo x > x
245 $ hg ci -Aqm 'add x'
245 $ hg ci -Aqm 'add x'
246 $ hg co -q null
246 $ hg co -q null
247 $ echo y > y
247 $ echo y > y
248 $ hg ci -Aqm 'add y'
248 $ hg ci -Aqm 'add y'
249 $ hg merge -q 0
249 $ hg merge -q 0
250 $ hg cp y z
250 $ hg cp y z
251 $ hg debugp1copies
251 $ hg debugp1copies
252 y -> z
252 y -> z
253 $ hg debugp2copies
253 $ hg debugp2copies
254 $ hg ci -m 'copy file from p1 in merge'
254 $ hg ci -m 'copy file from p1 in merge'
255 $ hg co -q 1
255 $ hg co -q 1
256 $ hg merge -q 0
256 $ hg merge -q 0
257 $ hg cp x z
257 $ hg cp x z
258 $ hg debugp1copies
258 $ hg debugp1copies
259 $ hg debugp2copies
259 $ hg debugp2copies
260 x -> z
260 x -> z
261 $ hg ci -qm 'copy file from p2 in merge'
261 $ hg ci -qm 'copy file from p2 in merge'
262 $ hg l
262 $ hg l
263 @ 3 copy file from p2 in merge
263 @ 3 copy file from p2 in merge
264 |\ z
264 |\ z
265 +---o 2 copy file from p1 in merge
265 +---o 2 copy file from p1 in merge
266 | |/ z
266 | |/ z
267 | o 1 add y
267 | o 1 add y
268 | y
268 | y
269 o 0 add x
269 o 0 add x
270 x
270 x
271 $ hg debugp1copies -r 2
271 $ hg debugp1copies -r 2
272 y -> z
272 y -> z
273 $ hg debugp2copies -r 2
273 $ hg debugp2copies -r 2
274 $ hg debugpathcopies 1 2
274 $ hg debugpathcopies 1 2
275 y -> z
275 y -> z
276 $ hg debugpathcopies 0 2
276 $ hg debugpathcopies 0 2
277 $ hg debugp1copies -r 3
277 $ hg debugp1copies -r 3
278 $ hg debugp2copies -r 3
278 $ hg debugp2copies -r 3
279 x -> z
279 x -> z
280 $ hg debugpathcopies 1 3
280 $ hg debugpathcopies 1 3
281 $ hg debugpathcopies 0 3
281 $ hg debugpathcopies 0 3
282 x -> z
282 x -> z
283
283
284 Copy file that exists on both sides of the merge, same content on both sides
284 Copy file that exists on both sides of the merge, same content on both sides
285 $ newrepo
285 $ newrepo
286 $ echo x > x
286 $ echo x > x
287 $ hg ci -Aqm 'add x on branch 1'
287 $ hg ci -Aqm 'add x on branch 1'
288 $ hg co -q null
288 $ hg co -q null
289 $ echo x > x
289 $ echo x > x
290 $ hg ci -Aqm 'add x on branch 2'
290 $ hg ci -Aqm 'add x on branch 2'
291 $ hg merge -q 0
291 $ hg merge -q 0
292 $ hg cp x z
292 $ hg cp x z
293 $ hg debugp1copies
293 $ hg debugp1copies
294 x -> z
294 x -> z
295 $ hg debugp2copies
295 $ hg debugp2copies
296 $ hg ci -qm 'merge'
296 $ hg ci -qm 'merge'
297 $ hg l
297 $ hg l
298 @ 2 merge
298 @ 2 merge
299 |\ z
299 |\ z
300 | o 1 add x on branch 2
300 | o 1 add x on branch 2
301 | x
301 | x
302 o 0 add x on branch 1
302 o 0 add x on branch 1
303 x
303 x
304 $ hg debugp1copies -r 2
304 $ hg debugp1copies -r 2
305 x -> z
305 x -> z
306 $ hg debugp2copies -r 2
306 $ hg debugp2copies -r 2
307 It's a little weird that it shows up on both sides
307 It's a little weird that it shows up on both sides
308 $ hg debugpathcopies 1 2
308 $ hg debugpathcopies 1 2
309 x -> z
309 x -> z
310 $ hg debugpathcopies 0 2
310 $ hg debugpathcopies 0 2
311 x -> z (filelog !)
311 x -> z (filelog !)
312 x -> z (sidedata !)
313
312
314 Copy file that exists on both sides of the merge, different content
313 Copy file that exists on both sides of the merge, different content
315 $ newrepo
314 $ newrepo
316 $ echo branch1 > x
315 $ echo branch1 > x
317 $ hg ci -Aqm 'add x on branch 1'
316 $ hg ci -Aqm 'add x on branch 1'
318 $ hg co -q null
317 $ hg co -q null
319 $ echo branch2 > x
318 $ echo branch2 > x
320 $ hg ci -Aqm 'add x on branch 2'
319 $ hg ci -Aqm 'add x on branch 2'
321 $ hg merge -q 0
320 $ hg merge -q 0
322 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
321 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
323 [1]
322 [1]
324 $ echo resolved > x
323 $ echo resolved > x
325 $ hg resolve -m x
324 $ hg resolve -m x
326 (no more unresolved files)
325 (no more unresolved files)
327 $ hg cp x z
326 $ hg cp x z
328 $ hg debugp1copies
327 $ hg debugp1copies
329 x -> z
328 x -> z
330 $ hg debugp2copies
329 $ hg debugp2copies
331 $ hg ci -qm 'merge'
330 $ hg ci -qm 'merge'
332 $ hg l
331 $ hg l
333 @ 2 merge
332 @ 2 merge
334 |\ x z
333 |\ x z
335 | o 1 add x on branch 2
334 | o 1 add x on branch 2
336 | x
335 | x
337 o 0 add x on branch 1
336 o 0 add x on branch 1
338 x
337 x
339 $ hg debugp1copies -r 2
338 $ hg debugp1copies -r 2
340 x -> z (changeset !)
339 x -> z (changeset !)
340 x -> z (sidedata !)
341 $ hg debugp2copies -r 2
341 $ hg debugp2copies -r 2
342 x -> z (no-changeset !)
342 x -> z (no-changeset no-sidedata !)
343 $ hg debugpathcopies 1 2
343 $ hg debugpathcopies 1 2
344 x -> z (changeset !)
344 x -> z (changeset !)
345 x -> z (sidedata !)
345 $ hg debugpathcopies 0 2
346 $ hg debugpathcopies 0 2
346 x -> z (no-changeset !)
347 x -> z (no-changeset no-sidedata !)
347
348
348 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
349 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
349 of the merge to the merge should include the copy from the other side.
350 of the merge to the merge should include the copy from the other side.
350 $ newrepo
351 $ newrepo
351 $ echo x > x
352 $ echo x > x
352 $ hg ci -Aqm 'add x'
353 $ hg ci -Aqm 'add x'
353 $ hg cp x y
354 $ hg cp x y
354 $ hg ci -qm 'copy x to y'
355 $ hg ci -qm 'copy x to y'
355 $ hg co -q 0
356 $ hg co -q 0
356 $ hg cp x z
357 $ hg cp x z
357 $ hg ci -qm 'copy x to z'
358 $ hg ci -qm 'copy x to z'
358 $ hg merge -q 1
359 $ hg merge -q 1
359 $ hg ci -m 'merge copy x->y and copy x->z'
360 $ hg ci -m 'merge copy x->y and copy x->z'
360 $ hg l
361 $ hg l
361 @ 3 merge copy x->y and copy x->z
362 @ 3 merge copy x->y and copy x->z
362 |\
363 |\
363 | o 2 copy x to z
364 | o 2 copy x to z
364 | | z
365 | | z
365 o | 1 copy x to y
366 o | 1 copy x to y
366 |/ y
367 |/ y
367 o 0 add x
368 o 0 add x
368 x
369 x
369 $ hg debugp1copies -r 3
370 $ hg debugp1copies -r 3
370 $ hg debugp2copies -r 3
371 $ hg debugp2copies -r 3
371 $ hg debugpathcopies 2 3
372 $ hg debugpathcopies 2 3
372 x -> y
373 x -> y
373 $ hg debugpathcopies 1 3
374 $ hg debugpathcopies 1 3
374 x -> z
375 x -> z
375
376
376 Copy x to y on one side of merge, create y and rename to z on the other side.
377 Copy x to y on one side of merge, create y and rename to z on the other side.
377 $ newrepo
378 $ newrepo
378 $ echo x > x
379 $ echo x > x
379 $ hg ci -Aqm 'add x'
380 $ hg ci -Aqm 'add x'
380 $ hg cp x y
381 $ hg cp x y
381 $ hg ci -qm 'copy x to y'
382 $ hg ci -qm 'copy x to y'
382 $ hg co -q 0
383 $ hg co -q 0
383 $ echo y > y
384 $ echo y > y
384 $ hg ci -Aqm 'add y'
385 $ hg ci -Aqm 'add y'
385 $ hg mv y z
386 $ hg mv y z
386 $ hg ci -m 'rename y to z'
387 $ hg ci -m 'rename y to z'
387 $ hg merge -q 1
388 $ hg merge -q 1
388 $ hg ci -m 'merge'
389 $ hg ci -m 'merge'
389 $ hg l
390 $ hg l
390 @ 4 merge
391 @ 4 merge
391 |\
392 |\
392 | o 3 rename y to z
393 | o 3 rename y to z
393 | | y z
394 | | y z
394 | o 2 add y
395 | o 2 add y
395 | | y
396 | | y
396 o | 1 copy x to y
397 o | 1 copy x to y
397 |/ y
398 |/ y
398 o 0 add x
399 o 0 add x
399 x
400 x
400 $ hg debugp1copies -r 3
401 $ hg debugp1copies -r 3
401 y -> z
402 y -> z
402 $ hg debugp2copies -r 3
403 $ hg debugp2copies -r 3
403 $ hg debugpathcopies 2 3
404 $ hg debugpathcopies 2 3
404 y -> z
405 y -> z
405 $ hg debugpathcopies 1 3
406 $ hg debugpathcopies 1 3
406 y -> z (no-filelog no-sidedata !)
407 y -> z (no-filelog !)
407
408
408 Create x and y, then rename x to z on one side of merge, and rename y to z and
409 Create x and y, then rename x to z on one side of merge, and rename y to z and
409 modify z on the other side. When storing copies in the changeset, we don't
410 modify z on the other side. When storing copies in the changeset, we don't
410 filter out copies whose target was created on the other side of the merge.
411 filter out copies whose target was created on the other side of the merge.
411 $ newrepo
412 $ newrepo
412 $ echo x > x
413 $ echo x > x
413 $ echo y > y
414 $ echo y > y
414 $ hg ci -Aqm 'add x and y'
415 $ hg ci -Aqm 'add x and y'
415 $ hg mv x z
416 $ hg mv x z
416 $ hg ci -qm 'rename x to z'
417 $ hg ci -qm 'rename x to z'
417 $ hg co -q 0
418 $ hg co -q 0
418 $ hg mv y z
419 $ hg mv y z
419 $ hg ci -qm 'rename y to z'
420 $ hg ci -qm 'rename y to z'
420 $ echo z >> z
421 $ echo z >> z
421 $ hg ci -m 'modify z'
422 $ hg ci -m 'modify z'
422 $ hg merge -q 1
423 $ hg merge -q 1
423 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
424 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
424 [1]
425 [1]
425 $ echo z > z
426 $ echo z > z
426 $ hg resolve -qm z
427 $ hg resolve -qm z
427 $ hg ci -m 'merge 1 into 3'
428 $ hg ci -m 'merge 1 into 3'
428 Try merging the other direction too
429 Try merging the other direction too
429 $ hg co -q 1
430 $ hg co -q 1
430 $ hg merge -q 3
431 $ hg merge -q 3
431 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
432 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
432 [1]
433 [1]
433 $ echo z > z
434 $ echo z > z
434 $ hg resolve -qm z
435 $ hg resolve -qm z
435 $ hg ci -m 'merge 3 into 1'
436 $ hg ci -m 'merge 3 into 1'
436 created new head
437 created new head
437 $ hg l
438 $ hg l
438 @ 5 merge 3 into 1
439 @ 5 merge 3 into 1
439 |\ z
440 |\ z
440 +---o 4 merge 1 into 3
441 +---o 4 merge 1 into 3
441 | |/ z
442 | |/ z
442 | o 3 modify z
443 | o 3 modify z
443 | | z
444 | | z
444 | o 2 rename y to z
445 | o 2 rename y to z
445 | | y z
446 | | y z
446 o | 1 rename x to z
447 o | 1 rename x to z
447 |/ x z
448 |/ x z
448 o 0 add x and y
449 o 0 add x and y
449 x y
450 x y
450 $ hg debugpathcopies 1 4
451 $ hg debugpathcopies 1 4
451 y -> z (no-filelog no-sidedata !)
452 y -> z (no-filelog !)
452 $ hg debugpathcopies 2 4
453 $ hg debugpathcopies 2 4
453 x -> z (no-filelog no-sidedata !)
454 x -> z (no-filelog !)
454 $ hg debugpathcopies 0 4
455 $ hg debugpathcopies 0 4
455 x -> z (filelog !)
456 x -> z (filelog !)
456 x -> z (sidedata !)
457 y -> z (no-filelog !)
457 y -> z (compatibility !)
458 y -> z (changeset !)
459 $ hg debugpathcopies 1 5
458 $ hg debugpathcopies 1 5
460 y -> z (no-filelog no-sidedata !)
459 y -> z (no-filelog !)
461 $ hg debugpathcopies 2 5
460 $ hg debugpathcopies 2 5
462 x -> z (no-filelog no-sidedata !)
461 x -> z (no-filelog !)
463 $ hg debugpathcopies 0 5
462 $ hg debugpathcopies 0 5
464 x -> z
463 x -> z
465
464
466
465
467 Test for a case in fullcopytracing algorithm where neither of the merging csets
466 Test for a case in fullcopytracing algorithm where neither of the merging csets
468 is a descendant of the merge base. This test reflects that the algorithm
467 is a descendant of the merge base. This test reflects that the algorithm
469 correctly finds the copies:
468 correctly finds the copies:
470
469
471 $ cat >> $HGRCPATH << EOF
470 $ cat >> $HGRCPATH << EOF
472 > [experimental]
471 > [experimental]
473 > evolution.createmarkers=True
472 > evolution.createmarkers=True
474 > evolution.allowunstable=True
473 > evolution.allowunstable=True
475 > EOF
474 > EOF
476
475
477 $ newrepo
476 $ newrepo
478 $ echo a > a
477 $ echo a > a
479 $ hg add a
478 $ hg add a
480 $ hg ci -m "added a"
479 $ hg ci -m "added a"
481 $ echo b > b
480 $ echo b > b
482 $ hg add b
481 $ hg add b
483 $ hg ci -m "added b"
482 $ hg ci -m "added b"
484
483
485 $ hg mv b b1
484 $ hg mv b b1
486 $ hg ci -m "rename b to b1"
485 $ hg ci -m "rename b to b1"
487
486
488 $ hg up ".^"
487 $ hg up ".^"
489 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
488 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
490 $ echo d > d
489 $ echo d > d
491 $ hg add d
490 $ hg add d
492 $ hg ci -m "added d"
491 $ hg ci -m "added d"
493 created new head
492 created new head
494
493
495 $ echo baba >> b
494 $ echo baba >> b
496 $ hg ci --amend -m "added d, modified b"
495 $ hg ci --amend -m "added d, modified b"
497
496
498 $ hg l --hidden
497 $ hg l --hidden
499 @ 4 added d, modified b
498 @ 4 added d, modified b
500 | b d
499 | b d
501 | x 3 added d
500 | x 3 added d
502 |/ d
501 |/ d
503 | o 2 rename b to b1
502 | o 2 rename b to b1
504 |/ b b1
503 |/ b b1
505 o 1 added b
504 o 1 added b
506 | b
505 | b
507 o 0 added a
506 o 0 added a
508 a
507 a
509
508
510 Grafting revision 4 on top of revision 2, showing that it respect the rename:
509 Grafting revision 4 on top of revision 2, showing that it respect the rename:
511
510
512 $ hg up 2 -q
511 $ hg up 2 -q
513 $ hg graft -r 4 --base 3 --hidden
512 $ hg graft -r 4 --base 3 --hidden
514 grafting 4:af28412ec03c "added d, modified b" (tip) (no-changeset !)
513 grafting 4:af28412ec03c "added d, modified b" (tip) (no-changeset !)
515 grafting 4:6325ca0b7a1c "added d, modified b" (tip) (changeset !)
514 grafting 4:6325ca0b7a1c "added d, modified b" (tip) (changeset !)
516 merging b1 and b to b1
515 merging b1 and b to b1
517
516
518 $ hg l -l1 -p
517 $ hg l -l1 -p
519 @ 5 added d, modified b
518 @ 5 added d, modified b
520 | b1
519 | b1
521 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1 (no-changeset !)
520 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1 (no-changeset !)
522 ~ diff -r 0a0ed3b3251c -r d544fb655520 b1 (changeset !)
521 ~ diff -r 0a0ed3b3251c -r d544fb655520 b1 (changeset !)
523 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
522 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
524 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
523 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
525 @@ -1,1 +1,2 @@
524 @@ -1,1 +1,2 @@
526 b
525 b
527 +baba
526 +baba
528
527
529 Test to make sure that fullcopytracing algorithm doesn't fail when neither of the
528 Test to make sure that fullcopytracing algorithm doesn't fail when neither of the
530 merging csets is a descendant of the base.
529 merging csets is a descendant of the base.
531 -------------------------------------------------------------------------------------------------
530 -------------------------------------------------------------------------------------------------
532
531
533 $ newrepo
532 $ newrepo
534 $ echo a > a
533 $ echo a > a
535 $ hg add a
534 $ hg add a
536 $ hg ci -m "added a"
535 $ hg ci -m "added a"
537 $ echo b > b
536 $ echo b > b
538 $ hg add b
537 $ hg add b
539 $ hg ci -m "added b"
538 $ hg ci -m "added b"
540
539
541 $ echo foobar > willconflict
540 $ echo foobar > willconflict
542 $ hg add willconflict
541 $ hg add willconflict
543 $ hg ci -m "added willconflict"
542 $ hg ci -m "added willconflict"
544 $ echo c > c
543 $ echo c > c
545 $ hg add c
544 $ hg add c
546 $ hg ci -m "added c"
545 $ hg ci -m "added c"
547
546
548 $ hg l
547 $ hg l
549 @ 3 added c
548 @ 3 added c
550 | c
549 | c
551 o 2 added willconflict
550 o 2 added willconflict
552 | willconflict
551 | willconflict
553 o 1 added b
552 o 1 added b
554 | b
553 | b
555 o 0 added a
554 o 0 added a
556 a
555 a
557
556
558 $ hg up ".^^"
557 $ hg up ".^^"
559 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
558 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
560 $ echo d > d
559 $ echo d > d
561 $ hg add d
560 $ hg add d
562 $ hg ci -m "added d"
561 $ hg ci -m "added d"
563 created new head
562 created new head
564
563
565 $ echo barfoo > willconflict
564 $ echo barfoo > willconflict
566 $ hg add willconflict
565 $ hg add willconflict
567 $ hg ci --amend -m "added willconflict and d"
566 $ hg ci --amend -m "added willconflict and d"
568
567
569 $ hg l
568 $ hg l
570 @ 5 added willconflict and d
569 @ 5 added willconflict and d
571 | d willconflict
570 | d willconflict
572 | o 3 added c
571 | o 3 added c
573 | | c
572 | | c
574 | o 2 added willconflict
573 | o 2 added willconflict
575 |/ willconflict
574 |/ willconflict
576 o 1 added b
575 o 1 added b
577 | b
576 | b
578 o 0 added a
577 o 0 added a
579 a
578 a
580
579
581 $ hg rebase -r . -d 2 -t :other
580 $ hg rebase -r . -d 2 -t :other
582 rebasing 5:5018b1509e94 "added willconflict and d" (tip) (no-changeset !)
581 rebasing 5:5018b1509e94 "added willconflict and d" (tip) (no-changeset !)
583 rebasing 5:af8d273bf580 "added willconflict and d" (tip) (changeset !)
582 rebasing 5:af8d273bf580 "added willconflict and d" (tip) (changeset !)
584
583
585 $ hg up 3 -q
584 $ hg up 3 -q
586 $ hg l --hidden
585 $ hg l --hidden
587 o 6 added willconflict and d
586 o 6 added willconflict and d
588 | d willconflict
587 | d willconflict
589 | x 5 added willconflict and d
588 | x 5 added willconflict and d
590 | | d willconflict
589 | | d willconflict
591 | | x 4 added d
590 | | x 4 added d
592 | |/ d
591 | |/ d
593 +---@ 3 added c
592 +---@ 3 added c
594 | | c
593 | | c
595 o | 2 added willconflict
594 o | 2 added willconflict
596 |/ willconflict
595 |/ willconflict
597 o 1 added b
596 o 1 added b
598 | b
597 | b
599 o 0 added a
598 o 0 added a
600 a
599 a
601
600
602 Now if we trigger a merge between revision 3 and 6 using base revision 4,
601 Now if we trigger a merge between revision 3 and 6 using base revision 4,
603 neither of the merging csets will be a descendant of the base revision:
602 neither of the merging csets will be a descendant of the base revision:
604
603
605 $ hg graft -r 6 --base 4 --hidden -t :other
604 $ hg graft -r 6 --base 4 --hidden -t :other
606 grafting 6:99802e4f1e46 "added willconflict and d" (tip) (no-changeset !)
605 grafting 6:99802e4f1e46 "added willconflict and d" (tip) (no-changeset !)
607 grafting 6:b19f0df72728 "added willconflict and d" (tip) (changeset !)
606 grafting 6:b19f0df72728 "added willconflict and d" (tip) (changeset !)
General Comments 0
You need to be logged in to leave comments. Login now