##// END OF EJS Templates
changelog: parse copy metadata if available in extras...
Martin von Zweigbergk -
r42313:c2165551 default draft
parent child Browse files
Show More
@@ -1,599 +1,621 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16 from .thirdparty import (
16 from .thirdparty import (
17 attr,
17 attr,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 util,
25 util,
26 )
26 )
27 from .utils import (
27 from .utils import (
28 dateutil,
28 dateutil,
29 stringutil,
29 stringutil,
30 )
30 )
31
31
32 _defaultextra = {'branch': 'default'}
32 _defaultextra = {'branch': 'default'}
33
33
34 def _string_escape(text):
34 def _string_escape(text):
35 """
35 """
36 >>> from .pycompat import bytechr as chr
36 >>> from .pycompat import bytechr as chr
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
37 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
38 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
39 >>> s
39 >>> s
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
40 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
41 >>> res = _string_escape(s)
41 >>> res = _string_escape(s)
42 >>> s == _string_unescape(res)
42 >>> s == _string_unescape(res)
43 True
43 True
44 """
44 """
45 # subset of the string_escape codec
45 # subset of the string_escape codec
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
46 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
47 return text.replace('\0', '\\0')
47 return text.replace('\0', '\\0')
48
48
49 def _string_unescape(text):
49 def _string_unescape(text):
50 if '\\0' in text:
50 if '\\0' in text:
51 # fix up \0 without getting into trouble with \\0
51 # fix up \0 without getting into trouble with \\0
52 text = text.replace('\\\\', '\\\\\n')
52 text = text.replace('\\\\', '\\\\\n')
53 text = text.replace('\\0', '\0')
53 text = text.replace('\\0', '\0')
54 text = text.replace('\n', '')
54 text = text.replace('\n', '')
55 return stringutil.unescapestr(text)
55 return stringutil.unescapestr(text)
56
56
57 def decodeextra(text):
57 def decodeextra(text):
58 """
58 """
59 >>> from .pycompat import bytechr as chr
59 >>> from .pycompat import bytechr as chr
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
60 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
61 ... ).items())
61 ... ).items())
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
62 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
63 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
64 ... b'baz': chr(92) + chr(0) + b'2'})
64 ... b'baz': chr(92) + chr(0) + b'2'})
65 ... ).items())
65 ... ).items())
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
66 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
67 """
67 """
68 extra = _defaultextra.copy()
68 extra = _defaultextra.copy()
69 for l in text.split('\0'):
69 for l in text.split('\0'):
70 if l:
70 if l:
71 k, v = _string_unescape(l).split(':', 1)
71 k, v = _string_unescape(l).split(':', 1)
72 extra[k] = v
72 extra[k] = v
73 return extra
73 return extra
74
74
75 def encodeextra(d):
75 def encodeextra(d):
76 # keys must be sorted to produce a deterministic changelog entry
76 # keys must be sorted to produce a deterministic changelog entry
77 items = [
77 items = [
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
78 _string_escape('%s:%s' % (k, pycompat.bytestr(d[k])))
79 for k in sorted(d)
79 for k in sorted(d)
80 ]
80 ]
81 return "\0".join(items)
81 return "\0".join(items)
82
82
83 def encodecopies(copies):
83 def encodecopies(copies):
84 items = [
84 items = [
85 '%s\0%s' % (k, copies[k])
85 '%s\0%s' % (k, copies[k])
86 for k in sorted(copies)
86 for k in sorted(copies)
87 ]
87 ]
88 return "\n".join(items)
88 return "\n".join(items)
89
89
90 def decodecopies(data):
91 try:
92 copies = {}
93 for l in data.split('\n'):
94 k, v = l.split('\0')
95 copies[k] = v
96 return copies
97 except ValueError:
98 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
99 # used different syntax for the value.
100 return None
101
90 def stripdesc(desc):
102 def stripdesc(desc):
91 """strip trailing whitespace and leading and trailing empty lines"""
103 """strip trailing whitespace and leading and trailing empty lines"""
92 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
104 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
93
105
94 class appender(object):
106 class appender(object):
95 '''the changelog index must be updated last on disk, so we use this class
107 '''the changelog index must be updated last on disk, so we use this class
96 to delay writes to it'''
108 to delay writes to it'''
97 def __init__(self, vfs, name, mode, buf):
109 def __init__(self, vfs, name, mode, buf):
98 self.data = buf
110 self.data = buf
99 fp = vfs(name, mode)
111 fp = vfs(name, mode)
100 self.fp = fp
112 self.fp = fp
101 self.offset = fp.tell()
113 self.offset = fp.tell()
102 self.size = vfs.fstat(fp).st_size
114 self.size = vfs.fstat(fp).st_size
103 self._end = self.size
115 self._end = self.size
104
116
105 def end(self):
117 def end(self):
106 return self._end
118 return self._end
107 def tell(self):
119 def tell(self):
108 return self.offset
120 return self.offset
109 def flush(self):
121 def flush(self):
110 pass
122 pass
111
123
112 @property
124 @property
113 def closed(self):
125 def closed(self):
114 return self.fp.closed
126 return self.fp.closed
115
127
116 def close(self):
128 def close(self):
117 self.fp.close()
129 self.fp.close()
118
130
119 def seek(self, offset, whence=0):
131 def seek(self, offset, whence=0):
120 '''virtual file offset spans real file and data'''
132 '''virtual file offset spans real file and data'''
121 if whence == 0:
133 if whence == 0:
122 self.offset = offset
134 self.offset = offset
123 elif whence == 1:
135 elif whence == 1:
124 self.offset += offset
136 self.offset += offset
125 elif whence == 2:
137 elif whence == 2:
126 self.offset = self.end() + offset
138 self.offset = self.end() + offset
127 if self.offset < self.size:
139 if self.offset < self.size:
128 self.fp.seek(self.offset)
140 self.fp.seek(self.offset)
129
141
130 def read(self, count=-1):
142 def read(self, count=-1):
131 '''only trick here is reads that span real file and data'''
143 '''only trick here is reads that span real file and data'''
132 ret = ""
144 ret = ""
133 if self.offset < self.size:
145 if self.offset < self.size:
134 s = self.fp.read(count)
146 s = self.fp.read(count)
135 ret = s
147 ret = s
136 self.offset += len(s)
148 self.offset += len(s)
137 if count > 0:
149 if count > 0:
138 count -= len(s)
150 count -= len(s)
139 if count != 0:
151 if count != 0:
140 doff = self.offset - self.size
152 doff = self.offset - self.size
141 self.data.insert(0, "".join(self.data))
153 self.data.insert(0, "".join(self.data))
142 del self.data[1:]
154 del self.data[1:]
143 s = self.data[0][doff:doff + count]
155 s = self.data[0][doff:doff + count]
144 self.offset += len(s)
156 self.offset += len(s)
145 ret += s
157 ret += s
146 return ret
158 return ret
147
159
148 def write(self, s):
160 def write(self, s):
149 self.data.append(bytes(s))
161 self.data.append(bytes(s))
150 self.offset += len(s)
162 self.offset += len(s)
151 self._end += len(s)
163 self._end += len(s)
152
164
153 def __enter__(self):
165 def __enter__(self):
154 self.fp.__enter__()
166 self.fp.__enter__()
155 return self
167 return self
156
168
157 def __exit__(self, *args):
169 def __exit__(self, *args):
158 return self.fp.__exit__(*args)
170 return self.fp.__exit__(*args)
159
171
160 def _divertopener(opener, target):
172 def _divertopener(opener, target):
161 """build an opener that writes in 'target.a' instead of 'target'"""
173 """build an opener that writes in 'target.a' instead of 'target'"""
162 def _divert(name, mode='r', checkambig=False):
174 def _divert(name, mode='r', checkambig=False):
163 if name != target:
175 if name != target:
164 return opener(name, mode)
176 return opener(name, mode)
165 return opener(name + ".a", mode)
177 return opener(name + ".a", mode)
166 return _divert
178 return _divert
167
179
168 def _delayopener(opener, target, buf):
180 def _delayopener(opener, target, buf):
169 """build an opener that stores chunks in 'buf' instead of 'target'"""
181 """build an opener that stores chunks in 'buf' instead of 'target'"""
170 def _delay(name, mode='r', checkambig=False):
182 def _delay(name, mode='r', checkambig=False):
171 if name != target:
183 if name != target:
172 return opener(name, mode)
184 return opener(name, mode)
173 return appender(opener, name, mode, buf)
185 return appender(opener, name, mode, buf)
174 return _delay
186 return _delay
175
187
176 @attr.s
188 @attr.s
177 class _changelogrevision(object):
189 class _changelogrevision(object):
178 # Extensions might modify _defaultextra, so let the constructor below pass
190 # Extensions might modify _defaultextra, so let the constructor below pass
179 # it in
191 # it in
180 extra = attr.ib()
192 extra = attr.ib()
181 manifest = attr.ib(default=nullid)
193 manifest = attr.ib(default=nullid)
182 user = attr.ib(default='')
194 user = attr.ib(default='')
183 date = attr.ib(default=(0, 0))
195 date = attr.ib(default=(0, 0))
184 files = attr.ib(default=attr.Factory(list))
196 files = attr.ib(default=attr.Factory(list))
185 description = attr.ib(default='')
197 description = attr.ib(default='')
186
198
187 class changelogrevision(object):
199 class changelogrevision(object):
188 """Holds results of a parsed changelog revision.
200 """Holds results of a parsed changelog revision.
189
201
190 Changelog revisions consist of multiple pieces of data, including
202 Changelog revisions consist of multiple pieces of data, including
191 the manifest node, user, and date. This object exposes a view into
203 the manifest node, user, and date. This object exposes a view into
192 the parsed object.
204 the parsed object.
193 """
205 """
194
206
195 __slots__ = (
207 __slots__ = (
196 r'_offsets',
208 r'_offsets',
197 r'_text',
209 r'_text',
198 )
210 )
199
211
200 def __new__(cls, text):
212 def __new__(cls, text):
201 if not text:
213 if not text:
202 return _changelogrevision(extra=_defaultextra)
214 return _changelogrevision(extra=_defaultextra)
203
215
204 self = super(changelogrevision, cls).__new__(cls)
216 self = super(changelogrevision, cls).__new__(cls)
205 # We could return here and implement the following as an __init__.
217 # We could return here and implement the following as an __init__.
206 # But doing it here is equivalent and saves an extra function call.
218 # But doing it here is equivalent and saves an extra function call.
207
219
208 # format used:
220 # format used:
209 # nodeid\n : manifest node in ascii
221 # nodeid\n : manifest node in ascii
210 # user\n : user, no \n or \r allowed
222 # user\n : user, no \n or \r allowed
211 # time tz extra\n : date (time is int or float, timezone is int)
223 # time tz extra\n : date (time is int or float, timezone is int)
212 # : extra is metadata, encoded and separated by '\0'
224 # : extra is metadata, encoded and separated by '\0'
213 # : older versions ignore it
225 # : older versions ignore it
214 # files\n\n : files modified by the cset, no \n or \r allowed
226 # files\n\n : files modified by the cset, no \n or \r allowed
215 # (.*) : comment (free text, ideally utf-8)
227 # (.*) : comment (free text, ideally utf-8)
216 #
228 #
217 # changelog v0 doesn't use extra
229 # changelog v0 doesn't use extra
218
230
219 nl1 = text.index('\n')
231 nl1 = text.index('\n')
220 nl2 = text.index('\n', nl1 + 1)
232 nl2 = text.index('\n', nl1 + 1)
221 nl3 = text.index('\n', nl2 + 1)
233 nl3 = text.index('\n', nl2 + 1)
222
234
223 # The list of files may be empty. Which means nl3 is the first of the
235 # The list of files may be empty. Which means nl3 is the first of the
224 # double newline that precedes the description.
236 # double newline that precedes the description.
225 if text[nl3 + 1:nl3 + 2] == '\n':
237 if text[nl3 + 1:nl3 + 2] == '\n':
226 doublenl = nl3
238 doublenl = nl3
227 else:
239 else:
228 doublenl = text.index('\n\n', nl3 + 1)
240 doublenl = text.index('\n\n', nl3 + 1)
229
241
230 self._offsets = (nl1, nl2, nl3, doublenl)
242 self._offsets = (nl1, nl2, nl3, doublenl)
231 self._text = text
243 self._text = text
232
244
233 return self
245 return self
234
246
235 @property
247 @property
236 def manifest(self):
248 def manifest(self):
237 return bin(self._text[0:self._offsets[0]])
249 return bin(self._text[0:self._offsets[0]])
238
250
239 @property
251 @property
240 def user(self):
252 def user(self):
241 off = self._offsets
253 off = self._offsets
242 return encoding.tolocal(self._text[off[0] + 1:off[1]])
254 return encoding.tolocal(self._text[off[0] + 1:off[1]])
243
255
244 @property
256 @property
245 def _rawdate(self):
257 def _rawdate(self):
246 off = self._offsets
258 off = self._offsets
247 dateextra = self._text[off[1] + 1:off[2]]
259 dateextra = self._text[off[1] + 1:off[2]]
248 return dateextra.split(' ', 2)[0:2]
260 return dateextra.split(' ', 2)[0:2]
249
261
250 @property
262 @property
251 def _rawextra(self):
263 def _rawextra(self):
252 off = self._offsets
264 off = self._offsets
253 dateextra = self._text[off[1] + 1:off[2]]
265 dateextra = self._text[off[1] + 1:off[2]]
254 fields = dateextra.split(' ', 2)
266 fields = dateextra.split(' ', 2)
255 if len(fields) != 3:
267 if len(fields) != 3:
256 return None
268 return None
257
269
258 return fields[2]
270 return fields[2]
259
271
260 @property
272 @property
261 def date(self):
273 def date(self):
262 raw = self._rawdate
274 raw = self._rawdate
263 time = float(raw[0])
275 time = float(raw[0])
264 # Various tools did silly things with the timezone.
276 # Various tools did silly things with the timezone.
265 try:
277 try:
266 timezone = int(raw[1])
278 timezone = int(raw[1])
267 except ValueError:
279 except ValueError:
268 timezone = 0
280 timezone = 0
269
281
270 return time, timezone
282 return time, timezone
271
283
272 @property
284 @property
273 def extra(self):
285 def extra(self):
274 raw = self._rawextra
286 raw = self._rawextra
275 if raw is None:
287 if raw is None:
276 return _defaultextra
288 return _defaultextra
277
289
278 return decodeextra(raw)
290 return decodeextra(raw)
279
291
280 @property
292 @property
281 def files(self):
293 def files(self):
282 off = self._offsets
294 off = self._offsets
283 if off[2] == off[3]:
295 if off[2] == off[3]:
284 return []
296 return []
285
297
286 return self._text[off[2] + 1:off[3]].split('\n')
298 return self._text[off[2] + 1:off[3]].split('\n')
287
299
288 @property
300 @property
301 def p1copies(self):
302 rawcopies = self.extra.get('p1copies')
303 return rawcopies and decodecopies(rawcopies)
304
305 @property
306 def p2copies(self):
307 rawcopies = self.extra.get('p2copies')
308 return rawcopies and decodecopies(rawcopies)
309
310 @property
289 def description(self):
311 def description(self):
290 return encoding.tolocal(self._text[self._offsets[3] + 2:])
312 return encoding.tolocal(self._text[self._offsets[3] + 2:])
291
313
292 class changelog(revlog.revlog):
314 class changelog(revlog.revlog):
293 def __init__(self, opener, trypending=False):
315 def __init__(self, opener, trypending=False):
294 """Load a changelog revlog using an opener.
316 """Load a changelog revlog using an opener.
295
317
296 If ``trypending`` is true, we attempt to load the index from a
318 If ``trypending`` is true, we attempt to load the index from a
297 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
319 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
298 The ``00changelog.i.a`` file contains index (and possibly inline
320 The ``00changelog.i.a`` file contains index (and possibly inline
299 revision) data for a transaction that hasn't been finalized yet.
321 revision) data for a transaction that hasn't been finalized yet.
300 It exists in a separate file to facilitate readers (such as
322 It exists in a separate file to facilitate readers (such as
301 hooks processes) accessing data before a transaction is finalized.
323 hooks processes) accessing data before a transaction is finalized.
302 """
324 """
303 if trypending and opener.exists('00changelog.i.a'):
325 if trypending and opener.exists('00changelog.i.a'):
304 indexfile = '00changelog.i.a'
326 indexfile = '00changelog.i.a'
305 else:
327 else:
306 indexfile = '00changelog.i'
328 indexfile = '00changelog.i'
307
329
308 datafile = '00changelog.d'
330 datafile = '00changelog.d'
309 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
331 revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
310 checkambig=True, mmaplargeindex=True)
332 checkambig=True, mmaplargeindex=True)
311
333
312 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
334 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
313 # changelogs don't benefit from generaldelta.
335 # changelogs don't benefit from generaldelta.
314
336
315 self.version &= ~revlog.FLAG_GENERALDELTA
337 self.version &= ~revlog.FLAG_GENERALDELTA
316 self._generaldelta = False
338 self._generaldelta = False
317
339
318 # Delta chains for changelogs tend to be very small because entries
340 # Delta chains for changelogs tend to be very small because entries
319 # tend to be small and don't delta well with each. So disable delta
341 # tend to be small and don't delta well with each. So disable delta
320 # chains.
342 # chains.
321 self._storedeltachains = False
343 self._storedeltachains = False
322
344
323 self._realopener = opener
345 self._realopener = opener
324 self._delayed = False
346 self._delayed = False
325 self._delaybuf = None
347 self._delaybuf = None
326 self._divert = False
348 self._divert = False
327 self.filteredrevs = frozenset()
349 self.filteredrevs = frozenset()
328
350
329 def tiprev(self):
351 def tiprev(self):
330 for i in pycompat.xrange(len(self) -1, -2, -1):
352 for i in pycompat.xrange(len(self) -1, -2, -1):
331 if i not in self.filteredrevs:
353 if i not in self.filteredrevs:
332 return i
354 return i
333
355
334 def tip(self):
356 def tip(self):
335 """filtered version of revlog.tip"""
357 """filtered version of revlog.tip"""
336 return self.node(self.tiprev())
358 return self.node(self.tiprev())
337
359
338 def __contains__(self, rev):
360 def __contains__(self, rev):
339 """filtered version of revlog.__contains__"""
361 """filtered version of revlog.__contains__"""
340 return (0 <= rev < len(self)
362 return (0 <= rev < len(self)
341 and rev not in self.filteredrevs)
363 and rev not in self.filteredrevs)
342
364
343 def __iter__(self):
365 def __iter__(self):
344 """filtered version of revlog.__iter__"""
366 """filtered version of revlog.__iter__"""
345 if len(self.filteredrevs) == 0:
367 if len(self.filteredrevs) == 0:
346 return revlog.revlog.__iter__(self)
368 return revlog.revlog.__iter__(self)
347
369
348 def filterediter():
370 def filterediter():
349 for i in pycompat.xrange(len(self)):
371 for i in pycompat.xrange(len(self)):
350 if i not in self.filteredrevs:
372 if i not in self.filteredrevs:
351 yield i
373 yield i
352
374
353 return filterediter()
375 return filterediter()
354
376
355 def revs(self, start=0, stop=None):
377 def revs(self, start=0, stop=None):
356 """filtered version of revlog.revs"""
378 """filtered version of revlog.revs"""
357 for i in super(changelog, self).revs(start, stop):
379 for i in super(changelog, self).revs(start, stop):
358 if i not in self.filteredrevs:
380 if i not in self.filteredrevs:
359 yield i
381 yield i
360
382
361 def reachableroots(self, minroot, heads, roots, includepath=False):
383 def reachableroots(self, minroot, heads, roots, includepath=False):
362 return self.index.reachableroots2(minroot, heads, roots, includepath)
384 return self.index.reachableroots2(minroot, heads, roots, includepath)
363
385
364 def _checknofilteredinrevs(self, revs):
386 def _checknofilteredinrevs(self, revs):
365 """raise the appropriate error if 'revs' contains a filtered revision
387 """raise the appropriate error if 'revs' contains a filtered revision
366
388
367 This returns a version of 'revs' to be used thereafter by the caller.
389 This returns a version of 'revs' to be used thereafter by the caller.
368 In particular, if revs is an iterator, it is converted into a set.
390 In particular, if revs is an iterator, it is converted into a set.
369 """
391 """
370 safehasattr = util.safehasattr
392 safehasattr = util.safehasattr
371 if safehasattr(revs, '__next__'):
393 if safehasattr(revs, '__next__'):
372 # Note that inspect.isgenerator() is not true for iterators,
394 # Note that inspect.isgenerator() is not true for iterators,
373 revs = set(revs)
395 revs = set(revs)
374
396
375 filteredrevs = self.filteredrevs
397 filteredrevs = self.filteredrevs
376 if safehasattr(revs, 'first'): # smartset
398 if safehasattr(revs, 'first'): # smartset
377 offenders = revs & filteredrevs
399 offenders = revs & filteredrevs
378 else:
400 else:
379 offenders = filteredrevs.intersection(revs)
401 offenders = filteredrevs.intersection(revs)
380
402
381 for rev in offenders:
403 for rev in offenders:
382 raise error.FilteredIndexError(rev)
404 raise error.FilteredIndexError(rev)
383 return revs
405 return revs
384
406
385 def headrevs(self, revs=None):
407 def headrevs(self, revs=None):
386 if revs is None and self.filteredrevs:
408 if revs is None and self.filteredrevs:
387 try:
409 try:
388 return self.index.headrevsfiltered(self.filteredrevs)
410 return self.index.headrevsfiltered(self.filteredrevs)
389 # AttributeError covers non-c-extension environments and
411 # AttributeError covers non-c-extension environments and
390 # old c extensions without filter handling.
412 # old c extensions without filter handling.
391 except AttributeError:
413 except AttributeError:
392 return self._headrevs()
414 return self._headrevs()
393
415
394 if self.filteredrevs:
416 if self.filteredrevs:
395 revs = self._checknofilteredinrevs(revs)
417 revs = self._checknofilteredinrevs(revs)
396 return super(changelog, self).headrevs(revs)
418 return super(changelog, self).headrevs(revs)
397
419
398 def strip(self, *args, **kwargs):
420 def strip(self, *args, **kwargs):
399 # XXX make something better than assert
421 # XXX make something better than assert
400 # We can't expect proper strip behavior if we are filtered.
422 # We can't expect proper strip behavior if we are filtered.
401 assert not self.filteredrevs
423 assert not self.filteredrevs
402 super(changelog, self).strip(*args, **kwargs)
424 super(changelog, self).strip(*args, **kwargs)
403
425
404 def rev(self, node):
426 def rev(self, node):
405 """filtered version of revlog.rev"""
427 """filtered version of revlog.rev"""
406 r = super(changelog, self).rev(node)
428 r = super(changelog, self).rev(node)
407 if r in self.filteredrevs:
429 if r in self.filteredrevs:
408 raise error.FilteredLookupError(hex(node), self.indexfile,
430 raise error.FilteredLookupError(hex(node), self.indexfile,
409 _('filtered node'))
431 _('filtered node'))
410 return r
432 return r
411
433
412 def node(self, rev):
434 def node(self, rev):
413 """filtered version of revlog.node"""
435 """filtered version of revlog.node"""
414 if rev in self.filteredrevs:
436 if rev in self.filteredrevs:
415 raise error.FilteredIndexError(rev)
437 raise error.FilteredIndexError(rev)
416 return super(changelog, self).node(rev)
438 return super(changelog, self).node(rev)
417
439
418 def linkrev(self, rev):
440 def linkrev(self, rev):
419 """filtered version of revlog.linkrev"""
441 """filtered version of revlog.linkrev"""
420 if rev in self.filteredrevs:
442 if rev in self.filteredrevs:
421 raise error.FilteredIndexError(rev)
443 raise error.FilteredIndexError(rev)
422 return super(changelog, self).linkrev(rev)
444 return super(changelog, self).linkrev(rev)
423
445
424 def parentrevs(self, rev):
446 def parentrevs(self, rev):
425 """filtered version of revlog.parentrevs"""
447 """filtered version of revlog.parentrevs"""
426 if rev in self.filteredrevs:
448 if rev in self.filteredrevs:
427 raise error.FilteredIndexError(rev)
449 raise error.FilteredIndexError(rev)
428 return super(changelog, self).parentrevs(rev)
450 return super(changelog, self).parentrevs(rev)
429
451
430 def flags(self, rev):
452 def flags(self, rev):
431 """filtered version of revlog.flags"""
453 """filtered version of revlog.flags"""
432 if rev in self.filteredrevs:
454 if rev in self.filteredrevs:
433 raise error.FilteredIndexError(rev)
455 raise error.FilteredIndexError(rev)
434 return super(changelog, self).flags(rev)
456 return super(changelog, self).flags(rev)
435
457
436 def delayupdate(self, tr):
458 def delayupdate(self, tr):
437 "delay visibility of index updates to other readers"
459 "delay visibility of index updates to other readers"
438
460
439 if not self._delayed:
461 if not self._delayed:
440 if len(self) == 0:
462 if len(self) == 0:
441 self._divert = True
463 self._divert = True
442 if self._realopener.exists(self.indexfile + '.a'):
464 if self._realopener.exists(self.indexfile + '.a'):
443 self._realopener.unlink(self.indexfile + '.a')
465 self._realopener.unlink(self.indexfile + '.a')
444 self.opener = _divertopener(self._realopener, self.indexfile)
466 self.opener = _divertopener(self._realopener, self.indexfile)
445 else:
467 else:
446 self._delaybuf = []
468 self._delaybuf = []
447 self.opener = _delayopener(self._realopener, self.indexfile,
469 self.opener = _delayopener(self._realopener, self.indexfile,
448 self._delaybuf)
470 self._delaybuf)
449 self._delayed = True
471 self._delayed = True
450 tr.addpending('cl-%i' % id(self), self._writepending)
472 tr.addpending('cl-%i' % id(self), self._writepending)
451 tr.addfinalize('cl-%i' % id(self), self._finalize)
473 tr.addfinalize('cl-%i' % id(self), self._finalize)
452
474
453 def _finalize(self, tr):
475 def _finalize(self, tr):
454 "finalize index updates"
476 "finalize index updates"
455 self._delayed = False
477 self._delayed = False
456 self.opener = self._realopener
478 self.opener = self._realopener
457 # move redirected index data back into place
479 # move redirected index data back into place
458 if self._divert:
480 if self._divert:
459 assert not self._delaybuf
481 assert not self._delaybuf
460 tmpname = self.indexfile + ".a"
482 tmpname = self.indexfile + ".a"
461 nfile = self.opener.open(tmpname)
483 nfile = self.opener.open(tmpname)
462 nfile.close()
484 nfile.close()
463 self.opener.rename(tmpname, self.indexfile, checkambig=True)
485 self.opener.rename(tmpname, self.indexfile, checkambig=True)
464 elif self._delaybuf:
486 elif self._delaybuf:
465 fp = self.opener(self.indexfile, 'a', checkambig=True)
487 fp = self.opener(self.indexfile, 'a', checkambig=True)
466 fp.write("".join(self._delaybuf))
488 fp.write("".join(self._delaybuf))
467 fp.close()
489 fp.close()
468 self._delaybuf = None
490 self._delaybuf = None
469 self._divert = False
491 self._divert = False
470 # split when we're done
492 # split when we're done
471 self._enforceinlinesize(tr)
493 self._enforceinlinesize(tr)
472
494
473 def _writepending(self, tr):
495 def _writepending(self, tr):
474 "create a file containing the unfinalized state for pretxnchangegroup"
496 "create a file containing the unfinalized state for pretxnchangegroup"
475 if self._delaybuf:
497 if self._delaybuf:
476 # make a temporary copy of the index
498 # make a temporary copy of the index
477 fp1 = self._realopener(self.indexfile)
499 fp1 = self._realopener(self.indexfile)
478 pendingfilename = self.indexfile + ".a"
500 pendingfilename = self.indexfile + ".a"
479 # register as a temp file to ensure cleanup on failure
501 # register as a temp file to ensure cleanup on failure
480 tr.registertmp(pendingfilename)
502 tr.registertmp(pendingfilename)
481 # write existing data
503 # write existing data
482 fp2 = self._realopener(pendingfilename, "w")
504 fp2 = self._realopener(pendingfilename, "w")
483 fp2.write(fp1.read())
505 fp2.write(fp1.read())
484 # add pending data
506 # add pending data
485 fp2.write("".join(self._delaybuf))
507 fp2.write("".join(self._delaybuf))
486 fp2.close()
508 fp2.close()
487 # switch modes so finalize can simply rename
509 # switch modes so finalize can simply rename
488 self._delaybuf = None
510 self._delaybuf = None
489 self._divert = True
511 self._divert = True
490 self.opener = _divertopener(self._realopener, self.indexfile)
512 self.opener = _divertopener(self._realopener, self.indexfile)
491
513
492 if self._divert:
514 if self._divert:
493 return True
515 return True
494
516
495 return False
517 return False
496
518
497 def _enforceinlinesize(self, tr, fp=None):
519 def _enforceinlinesize(self, tr, fp=None):
498 if not self._delayed:
520 if not self._delayed:
499 revlog.revlog._enforceinlinesize(self, tr, fp)
521 revlog.revlog._enforceinlinesize(self, tr, fp)
500
522
501 def read(self, node):
523 def read(self, node):
502 """Obtain data from a parsed changelog revision.
524 """Obtain data from a parsed changelog revision.
503
525
504 Returns a 6-tuple of:
526 Returns a 6-tuple of:
505
527
506 - manifest node in binary
528 - manifest node in binary
507 - author/user as a localstr
529 - author/user as a localstr
508 - date as a 2-tuple of (time, timezone)
530 - date as a 2-tuple of (time, timezone)
509 - list of files
531 - list of files
510 - commit message as a localstr
532 - commit message as a localstr
511 - dict of extra metadata
533 - dict of extra metadata
512
534
513 Unless you need to access all fields, consider calling
535 Unless you need to access all fields, consider calling
514 ``changelogrevision`` instead, as it is faster for partial object
536 ``changelogrevision`` instead, as it is faster for partial object
515 access.
537 access.
516 """
538 """
517 c = changelogrevision(self.revision(node))
539 c = changelogrevision(self.revision(node))
518 return (
540 return (
519 c.manifest,
541 c.manifest,
520 c.user,
542 c.user,
521 c.date,
543 c.date,
522 c.files,
544 c.files,
523 c.description,
545 c.description,
524 c.extra
546 c.extra
525 )
547 )
526
548
527 def changelogrevision(self, nodeorrev):
549 def changelogrevision(self, nodeorrev):
528 """Obtain a ``changelogrevision`` for a node or revision."""
550 """Obtain a ``changelogrevision`` for a node or revision."""
529 return changelogrevision(self.revision(nodeorrev))
551 return changelogrevision(self.revision(nodeorrev))
530
552
531 def readfiles(self, node):
553 def readfiles(self, node):
532 """
554 """
533 short version of read that only returns the files modified by the cset
555 short version of read that only returns the files modified by the cset
534 """
556 """
535 text = self.revision(node)
557 text = self.revision(node)
536 if not text:
558 if not text:
537 return []
559 return []
538 last = text.index("\n\n")
560 last = text.index("\n\n")
539 l = text[:last].split('\n')
561 l = text[:last].split('\n')
540 return l[3:]
562 return l[3:]
541
563
542 def add(self, manifest, files, desc, transaction, p1, p2,
564 def add(self, manifest, files, desc, transaction, p1, p2,
543 user, date=None, extra=None, p1copies=None, p2copies=None):
565 user, date=None, extra=None, p1copies=None, p2copies=None):
544 # Convert to UTF-8 encoded bytestrings as the very first
566 # Convert to UTF-8 encoded bytestrings as the very first
545 # thing: calling any method on a localstr object will turn it
567 # thing: calling any method on a localstr object will turn it
546 # into a str object and the cached UTF-8 string is thus lost.
568 # into a str object and the cached UTF-8 string is thus lost.
547 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
569 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
548
570
549 user = user.strip()
571 user = user.strip()
550 # An empty username or a username with a "\n" will make the
572 # An empty username or a username with a "\n" will make the
551 # revision text contain two "\n\n" sequences -> corrupt
573 # revision text contain two "\n\n" sequences -> corrupt
552 # repository since read cannot unpack the revision.
574 # repository since read cannot unpack the revision.
553 if not user:
575 if not user:
554 raise error.StorageError(_("empty username"))
576 raise error.StorageError(_("empty username"))
555 if "\n" in user:
577 if "\n" in user:
556 raise error.StorageError(_("username %r contains a newline")
578 raise error.StorageError(_("username %r contains a newline")
557 % pycompat.bytestr(user))
579 % pycompat.bytestr(user))
558
580
559 desc = stripdesc(desc)
581 desc = stripdesc(desc)
560
582
561 if date:
583 if date:
562 parseddate = "%d %d" % dateutil.parsedate(date)
584 parseddate = "%d %d" % dateutil.parsedate(date)
563 else:
585 else:
564 parseddate = "%d %d" % dateutil.makedate()
586 parseddate = "%d %d" % dateutil.makedate()
565 if extra:
587 if extra:
566 branch = extra.get("branch")
588 branch = extra.get("branch")
567 if branch in ("default", ""):
589 if branch in ("default", ""):
568 del extra["branch"]
590 del extra["branch"]
569 elif branch in (".", "null", "tip"):
591 elif branch in (".", "null", "tip"):
570 raise error.StorageError(_('the name \'%s\' is reserved')
592 raise error.StorageError(_('the name \'%s\' is reserved')
571 % branch)
593 % branch)
572 if (p1copies or p2copies) and extra is None:
594 if (p1copies or p2copies) and extra is None:
573 extra = {}
595 extra = {}
574 if p1copies:
596 if p1copies:
575 extra['p1copies'] = encodecopies(p1copies)
597 extra['p1copies'] = encodecopies(p1copies)
576 if p2copies:
598 if p2copies:
577 extra['p2copies'] = encodecopies(p2copies)
599 extra['p2copies'] = encodecopies(p2copies)
578
600
579 if extra:
601 if extra:
580 extra = encodeextra(extra)
602 extra = encodeextra(extra)
581 parseddate = "%s %s" % (parseddate, extra)
603 parseddate = "%s %s" % (parseddate, extra)
582 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
604 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
583 text = "\n".join(l)
605 text = "\n".join(l)
584 return self.addrevision(text, transaction, len(self), p1, p2)
606 return self.addrevision(text, transaction, len(self), p1, p2)
585
607
586 def branchinfo(self, rev):
608 def branchinfo(self, rev):
587 """return the branch name and open/close state of a revision
609 """return the branch name and open/close state of a revision
588
610
589 This function exists because creating a changectx object
611 This function exists because creating a changectx object
590 just to access this is costly."""
612 just to access this is costly."""
591 extra = self.read(rev)[5]
613 extra = self.read(rev)[5]
592 return encoding.tolocal(extra.get("branch")), 'close' in extra
614 return encoding.tolocal(extra.get("branch")), 'close' in extra
593
615
594 def _nodeduplicatecallback(self, transaction, node):
616 def _nodeduplicatecallback(self, transaction, node):
595 # keep track of revisions that got "re-added", eg: unbunde of know rev.
617 # keep track of revisions that got "re-added", eg: unbunde of know rev.
596 #
618 #
597 # We track them in a list to preserve their order from the source bundle
619 # We track them in a list to preserve their order from the source bundle
598 duplicates = transaction.changes.setdefault('revduplicates', [])
620 duplicates = transaction.changes.setdefault('revduplicates', [])
599 duplicates.append(self.rev(node))
621 duplicates.append(self.rev(node))
@@ -1,2551 +1,2566 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, pathfn=None, copy=None,
297 losedatafn=None, pathfn=None, copy=None,
298 copysourcematch=None, hunksfilterfn=None):
298 copysourcematch=None, hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
305 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
306 copy=copy, copysourcematch=copysourcematch,
306 copy=copy, copysourcematch=copysourcematch,
307 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = self._repo.narrowmatch(match)
346 match = self._repo.narrowmatch(match)
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 for l in r:
374 for l in r:
375 l.sort()
375 l.sort()
376
376
377 return r
377 return r
378
378
379 class changectx(basectx):
379 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
382 the repo."""
382 the repo."""
383 def __init__(self, repo, rev, node):
383 def __init__(self, repo, rev, node):
384 super(changectx, self).__init__(repo)
384 super(changectx, self).__init__(repo)
385 self._rev = rev
385 self._rev = rev
386 self._node = node
386 self._node = node
387
387
388 def __hash__(self):
388 def __hash__(self):
389 try:
389 try:
390 return hash(self._rev)
390 return hash(self._rev)
391 except AttributeError:
391 except AttributeError:
392 return id(self)
392 return id(self)
393
393
394 def __nonzero__(self):
394 def __nonzero__(self):
395 return self._rev != nullrev
395 return self._rev != nullrev
396
396
397 __bool__ = __nonzero__
397 __bool__ = __nonzero__
398
398
399 @propertycache
399 @propertycache
400 def _changeset(self):
400 def _changeset(self):
401 return self._repo.changelog.changelogrevision(self.rev())
401 return self._repo.changelog.changelogrevision(self.rev())
402
402
403 @propertycache
403 @propertycache
404 def _manifest(self):
404 def _manifest(self):
405 return self._manifestctx.read()
405 return self._manifestctx.read()
406
406
407 @property
407 @property
408 def _manifestctx(self):
408 def _manifestctx(self):
409 return self._repo.manifestlog[self._changeset.manifest]
409 return self._repo.manifestlog[self._changeset.manifest]
410
410
411 @propertycache
411 @propertycache
412 def _manifestdelta(self):
412 def _manifestdelta(self):
413 return self._manifestctx.readdelta()
413 return self._manifestctx.readdelta()
414
414
415 @propertycache
415 @propertycache
416 def _parents(self):
416 def _parents(self):
417 repo = self._repo
417 repo = self._repo
418 p1, p2 = repo.changelog.parentrevs(self._rev)
418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 if p2 == nullrev:
419 if p2 == nullrev:
420 return [repo[p1]]
420 return [repo[p1]]
421 return [repo[p1], repo[p2]]
421 return [repo[p1], repo[p2]]
422
422
423 def changeset(self):
423 def changeset(self):
424 c = self._changeset
424 c = self._changeset
425 return (
425 return (
426 c.manifest,
426 c.manifest,
427 c.user,
427 c.user,
428 c.date,
428 c.date,
429 c.files,
429 c.files,
430 c.description,
430 c.description,
431 c.extra,
431 c.extra,
432 )
432 )
433 def manifestnode(self):
433 def manifestnode(self):
434 return self._changeset.manifest
434 return self._changeset.manifest
435
435
436 def user(self):
436 def user(self):
437 return self._changeset.user
437 return self._changeset.user
438 def date(self):
438 def date(self):
439 return self._changeset.date
439 return self._changeset.date
440 def files(self):
440 def files(self):
441 return self._changeset.files
441 return self._changeset.files
442 @propertycache
442 @propertycache
443 def _copies(self):
443 def _copies(self):
444 source = self._repo.ui.config('experimental', 'copies.read-from')
445 p1copies = self._changeset.p1copies
446 p2copies = self._changeset.p2copies
447 # If config says to get copy metadata only from changeset, then return
448 # that, defaulting to {} if there was no copy metadata.
449 # In compatibility mode, we return copy data from the changeset if
450 # it was recorded there, and otherwise we fall back to getting it from
451 # the filelogs (below).
452 if (source == 'changeset-only' or
453 (source == 'compatibility' and p1copies is not None)):
454 return p1copies or {}, p2copies or {}
455
456 # Otherwise (config said to read only from filelog, or we are in
457 # compatiblity mode and there is not data in the changeset), we get
458 # the copy metadata from the filelogs.
444 p1copies = {}
459 p1copies = {}
445 p2copies = {}
460 p2copies = {}
446 p1 = self.p1()
461 p1 = self.p1()
447 p2 = self.p2()
462 p2 = self.p2()
448 narrowmatch = self._repo.narrowmatch()
463 narrowmatch = self._repo.narrowmatch()
449 for dst in self.files():
464 for dst in self.files():
450 if not narrowmatch(dst) or dst not in self:
465 if not narrowmatch(dst) or dst not in self:
451 continue
466 continue
452 copied = self[dst].renamed()
467 copied = self[dst].renamed()
453 if not copied:
468 if not copied:
454 continue
469 continue
455 src, srcnode = copied
470 src, srcnode = copied
456 if src in p1 and p1[src].filenode() == srcnode:
471 if src in p1 and p1[src].filenode() == srcnode:
457 p1copies[dst] = src
472 p1copies[dst] = src
458 elif src in p2 and p2[src].filenode() == srcnode:
473 elif src in p2 and p2[src].filenode() == srcnode:
459 p2copies[dst] = src
474 p2copies[dst] = src
460 return p1copies, p2copies
475 return p1copies, p2copies
461 def p1copies(self):
476 def p1copies(self):
462 return self._copies[0]
477 return self._copies[0]
463 def p2copies(self):
478 def p2copies(self):
464 return self._copies[1]
479 return self._copies[1]
465 def description(self):
480 def description(self):
466 return self._changeset.description
481 return self._changeset.description
467 def branch(self):
482 def branch(self):
468 return encoding.tolocal(self._changeset.extra.get("branch"))
483 return encoding.tolocal(self._changeset.extra.get("branch"))
469 def closesbranch(self):
484 def closesbranch(self):
470 return 'close' in self._changeset.extra
485 return 'close' in self._changeset.extra
471 def extra(self):
486 def extra(self):
472 """Return a dict of extra information."""
487 """Return a dict of extra information."""
473 return self._changeset.extra
488 return self._changeset.extra
474 def tags(self):
489 def tags(self):
475 """Return a list of byte tag names"""
490 """Return a list of byte tag names"""
476 return self._repo.nodetags(self._node)
491 return self._repo.nodetags(self._node)
477 def bookmarks(self):
492 def bookmarks(self):
478 """Return a list of byte bookmark names."""
493 """Return a list of byte bookmark names."""
479 return self._repo.nodebookmarks(self._node)
494 return self._repo.nodebookmarks(self._node)
480 def phase(self):
495 def phase(self):
481 return self._repo._phasecache.phase(self._repo, self._rev)
496 return self._repo._phasecache.phase(self._repo, self._rev)
482 def hidden(self):
497 def hidden(self):
483 return self._rev in repoview.filterrevs(self._repo, 'visible')
498 return self._rev in repoview.filterrevs(self._repo, 'visible')
484
499
485 def isinmemory(self):
500 def isinmemory(self):
486 return False
501 return False
487
502
488 def children(self):
503 def children(self):
489 """return list of changectx contexts for each child changeset.
504 """return list of changectx contexts for each child changeset.
490
505
491 This returns only the immediate child changesets. Use descendants() to
506 This returns only the immediate child changesets. Use descendants() to
492 recursively walk children.
507 recursively walk children.
493 """
508 """
494 c = self._repo.changelog.children(self._node)
509 c = self._repo.changelog.children(self._node)
495 return [self._repo[x] for x in c]
510 return [self._repo[x] for x in c]
496
511
497 def ancestors(self):
512 def ancestors(self):
498 for a in self._repo.changelog.ancestors([self._rev]):
513 for a in self._repo.changelog.ancestors([self._rev]):
499 yield self._repo[a]
514 yield self._repo[a]
500
515
501 def descendants(self):
516 def descendants(self):
502 """Recursively yield all children of the changeset.
517 """Recursively yield all children of the changeset.
503
518
504 For just the immediate children, use children()
519 For just the immediate children, use children()
505 """
520 """
506 for d in self._repo.changelog.descendants([self._rev]):
521 for d in self._repo.changelog.descendants([self._rev]):
507 yield self._repo[d]
522 yield self._repo[d]
508
523
509 def filectx(self, path, fileid=None, filelog=None):
524 def filectx(self, path, fileid=None, filelog=None):
510 """get a file context from this changeset"""
525 """get a file context from this changeset"""
511 if fileid is None:
526 if fileid is None:
512 fileid = self.filenode(path)
527 fileid = self.filenode(path)
513 return filectx(self._repo, path, fileid=fileid,
528 return filectx(self._repo, path, fileid=fileid,
514 changectx=self, filelog=filelog)
529 changectx=self, filelog=filelog)
515
530
516 def ancestor(self, c2, warn=False):
531 def ancestor(self, c2, warn=False):
517 """return the "best" ancestor context of self and c2
532 """return the "best" ancestor context of self and c2
518
533
519 If there are multiple candidates, it will show a message and check
534 If there are multiple candidates, it will show a message and check
520 merge.preferancestor configuration before falling back to the
535 merge.preferancestor configuration before falling back to the
521 revlog ancestor."""
536 revlog ancestor."""
522 # deal with workingctxs
537 # deal with workingctxs
523 n2 = c2._node
538 n2 = c2._node
524 if n2 is None:
539 if n2 is None:
525 n2 = c2._parents[0]._node
540 n2 = c2._parents[0]._node
526 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
541 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
527 if not cahs:
542 if not cahs:
528 anc = nullid
543 anc = nullid
529 elif len(cahs) == 1:
544 elif len(cahs) == 1:
530 anc = cahs[0]
545 anc = cahs[0]
531 else:
546 else:
532 # experimental config: merge.preferancestor
547 # experimental config: merge.preferancestor
533 for r in self._repo.ui.configlist('merge', 'preferancestor'):
548 for r in self._repo.ui.configlist('merge', 'preferancestor'):
534 try:
549 try:
535 ctx = scmutil.revsymbol(self._repo, r)
550 ctx = scmutil.revsymbol(self._repo, r)
536 except error.RepoLookupError:
551 except error.RepoLookupError:
537 continue
552 continue
538 anc = ctx.node()
553 anc = ctx.node()
539 if anc in cahs:
554 if anc in cahs:
540 break
555 break
541 else:
556 else:
542 anc = self._repo.changelog.ancestor(self._node, n2)
557 anc = self._repo.changelog.ancestor(self._node, n2)
543 if warn:
558 if warn:
544 self._repo.ui.status(
559 self._repo.ui.status(
545 (_("note: using %s as ancestor of %s and %s\n") %
560 (_("note: using %s as ancestor of %s and %s\n") %
546 (short(anc), short(self._node), short(n2))) +
561 (short(anc), short(self._node), short(n2))) +
547 ''.join(_(" alternatively, use --config "
562 ''.join(_(" alternatively, use --config "
548 "merge.preferancestor=%s\n") %
563 "merge.preferancestor=%s\n") %
549 short(n) for n in sorted(cahs) if n != anc))
564 short(n) for n in sorted(cahs) if n != anc))
550 return self._repo[anc]
565 return self._repo[anc]
551
566
552 def isancestorof(self, other):
567 def isancestorof(self, other):
553 """True if this changeset is an ancestor of other"""
568 """True if this changeset is an ancestor of other"""
554 return self._repo.changelog.isancestorrev(self._rev, other._rev)
569 return self._repo.changelog.isancestorrev(self._rev, other._rev)
555
570
556 def walk(self, match):
571 def walk(self, match):
557 '''Generates matching file names.'''
572 '''Generates matching file names.'''
558
573
559 # Wrap match.bad method to have message with nodeid
574 # Wrap match.bad method to have message with nodeid
560 def bad(fn, msg):
575 def bad(fn, msg):
561 # The manifest doesn't know about subrepos, so don't complain about
576 # The manifest doesn't know about subrepos, so don't complain about
562 # paths into valid subrepos.
577 # paths into valid subrepos.
563 if any(fn == s or fn.startswith(s + '/')
578 if any(fn == s or fn.startswith(s + '/')
564 for s in self.substate):
579 for s in self.substate):
565 return
580 return
566 match.bad(fn, _('no such file in rev %s') % self)
581 match.bad(fn, _('no such file in rev %s') % self)
567
582
568 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
583 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
569 return self._manifest.walk(m)
584 return self._manifest.walk(m)
570
585
571 def matches(self, match):
586 def matches(self, match):
572 return self.walk(match)
587 return self.walk(match)
573
588
574 class basefilectx(object):
589 class basefilectx(object):
575 """A filecontext object represents the common logic for its children:
590 """A filecontext object represents the common logic for its children:
576 filectx: read-only access to a filerevision that is already present
591 filectx: read-only access to a filerevision that is already present
577 in the repo,
592 in the repo,
578 workingfilectx: a filecontext that represents files from the working
593 workingfilectx: a filecontext that represents files from the working
579 directory,
594 directory,
580 memfilectx: a filecontext that represents files in-memory,
595 memfilectx: a filecontext that represents files in-memory,
581 """
596 """
582 @propertycache
597 @propertycache
583 def _filelog(self):
598 def _filelog(self):
584 return self._repo.file(self._path)
599 return self._repo.file(self._path)
585
600
586 @propertycache
601 @propertycache
587 def _changeid(self):
602 def _changeid(self):
588 if r'_changectx' in self.__dict__:
603 if r'_changectx' in self.__dict__:
589 return self._changectx.rev()
604 return self._changectx.rev()
590 elif r'_descendantrev' in self.__dict__:
605 elif r'_descendantrev' in self.__dict__:
591 # this file context was created from a revision with a known
606 # this file context was created from a revision with a known
592 # descendant, we can (lazily) correct for linkrev aliases
607 # descendant, we can (lazily) correct for linkrev aliases
593 return self._adjustlinkrev(self._descendantrev)
608 return self._adjustlinkrev(self._descendantrev)
594 else:
609 else:
595 return self._filelog.linkrev(self._filerev)
610 return self._filelog.linkrev(self._filerev)
596
611
597 @propertycache
612 @propertycache
598 def _filenode(self):
613 def _filenode(self):
599 if r'_fileid' in self.__dict__:
614 if r'_fileid' in self.__dict__:
600 return self._filelog.lookup(self._fileid)
615 return self._filelog.lookup(self._fileid)
601 else:
616 else:
602 return self._changectx.filenode(self._path)
617 return self._changectx.filenode(self._path)
603
618
604 @propertycache
619 @propertycache
605 def _filerev(self):
620 def _filerev(self):
606 return self._filelog.rev(self._filenode)
621 return self._filelog.rev(self._filenode)
607
622
608 @propertycache
623 @propertycache
609 def _repopath(self):
624 def _repopath(self):
610 return self._path
625 return self._path
611
626
612 def __nonzero__(self):
627 def __nonzero__(self):
613 try:
628 try:
614 self._filenode
629 self._filenode
615 return True
630 return True
616 except error.LookupError:
631 except error.LookupError:
617 # file is missing
632 # file is missing
618 return False
633 return False
619
634
620 __bool__ = __nonzero__
635 __bool__ = __nonzero__
621
636
622 def __bytes__(self):
637 def __bytes__(self):
623 try:
638 try:
624 return "%s@%s" % (self.path(), self._changectx)
639 return "%s@%s" % (self.path(), self._changectx)
625 except error.LookupError:
640 except error.LookupError:
626 return "%s@???" % self.path()
641 return "%s@???" % self.path()
627
642
628 __str__ = encoding.strmethod(__bytes__)
643 __str__ = encoding.strmethod(__bytes__)
629
644
630 def __repr__(self):
645 def __repr__(self):
631 return r"<%s %s>" % (type(self).__name__, str(self))
646 return r"<%s %s>" % (type(self).__name__, str(self))
632
647
633 def __hash__(self):
648 def __hash__(self):
634 try:
649 try:
635 return hash((self._path, self._filenode))
650 return hash((self._path, self._filenode))
636 except AttributeError:
651 except AttributeError:
637 return id(self)
652 return id(self)
638
653
639 def __eq__(self, other):
654 def __eq__(self, other):
640 try:
655 try:
641 return (type(self) == type(other) and self._path == other._path
656 return (type(self) == type(other) and self._path == other._path
642 and self._filenode == other._filenode)
657 and self._filenode == other._filenode)
643 except AttributeError:
658 except AttributeError:
644 return False
659 return False
645
660
646 def __ne__(self, other):
661 def __ne__(self, other):
647 return not (self == other)
662 return not (self == other)
648
663
649 def filerev(self):
664 def filerev(self):
650 return self._filerev
665 return self._filerev
651 def filenode(self):
666 def filenode(self):
652 return self._filenode
667 return self._filenode
653 @propertycache
668 @propertycache
654 def _flags(self):
669 def _flags(self):
655 return self._changectx.flags(self._path)
670 return self._changectx.flags(self._path)
656 def flags(self):
671 def flags(self):
657 return self._flags
672 return self._flags
658 def filelog(self):
673 def filelog(self):
659 return self._filelog
674 return self._filelog
660 def rev(self):
675 def rev(self):
661 return self._changeid
676 return self._changeid
662 def linkrev(self):
677 def linkrev(self):
663 return self._filelog.linkrev(self._filerev)
678 return self._filelog.linkrev(self._filerev)
664 def node(self):
679 def node(self):
665 return self._changectx.node()
680 return self._changectx.node()
666 def hex(self):
681 def hex(self):
667 return self._changectx.hex()
682 return self._changectx.hex()
668 def user(self):
683 def user(self):
669 return self._changectx.user()
684 return self._changectx.user()
670 def date(self):
685 def date(self):
671 return self._changectx.date()
686 return self._changectx.date()
672 def files(self):
687 def files(self):
673 return self._changectx.files()
688 return self._changectx.files()
674 def description(self):
689 def description(self):
675 return self._changectx.description()
690 return self._changectx.description()
676 def branch(self):
691 def branch(self):
677 return self._changectx.branch()
692 return self._changectx.branch()
678 def extra(self):
693 def extra(self):
679 return self._changectx.extra()
694 return self._changectx.extra()
680 def phase(self):
695 def phase(self):
681 return self._changectx.phase()
696 return self._changectx.phase()
682 def phasestr(self):
697 def phasestr(self):
683 return self._changectx.phasestr()
698 return self._changectx.phasestr()
684 def obsolete(self):
699 def obsolete(self):
685 return self._changectx.obsolete()
700 return self._changectx.obsolete()
686 def instabilities(self):
701 def instabilities(self):
687 return self._changectx.instabilities()
702 return self._changectx.instabilities()
688 def manifest(self):
703 def manifest(self):
689 return self._changectx.manifest()
704 return self._changectx.manifest()
690 def changectx(self):
705 def changectx(self):
691 return self._changectx
706 return self._changectx
692 def renamed(self):
707 def renamed(self):
693 return self._copied
708 return self._copied
694 def copysource(self):
709 def copysource(self):
695 return self._copied and self._copied[0]
710 return self._copied and self._copied[0]
696 def repo(self):
711 def repo(self):
697 return self._repo
712 return self._repo
698 def size(self):
713 def size(self):
699 return len(self.data())
714 return len(self.data())
700
715
701 def path(self):
716 def path(self):
702 return self._path
717 return self._path
703
718
704 def isbinary(self):
719 def isbinary(self):
705 try:
720 try:
706 return stringutil.binary(self.data())
721 return stringutil.binary(self.data())
707 except IOError:
722 except IOError:
708 return False
723 return False
709 def isexec(self):
724 def isexec(self):
710 return 'x' in self.flags()
725 return 'x' in self.flags()
711 def islink(self):
726 def islink(self):
712 return 'l' in self.flags()
727 return 'l' in self.flags()
713
728
714 def isabsent(self):
729 def isabsent(self):
715 """whether this filectx represents a file not in self._changectx
730 """whether this filectx represents a file not in self._changectx
716
731
717 This is mainly for merge code to detect change/delete conflicts. This is
732 This is mainly for merge code to detect change/delete conflicts. This is
718 expected to be True for all subclasses of basectx."""
733 expected to be True for all subclasses of basectx."""
719 return False
734 return False
720
735
721 _customcmp = False
736 _customcmp = False
722 def cmp(self, fctx):
737 def cmp(self, fctx):
723 """compare with other file context
738 """compare with other file context
724
739
725 returns True if different than fctx.
740 returns True if different than fctx.
726 """
741 """
727 if fctx._customcmp:
742 if fctx._customcmp:
728 return fctx.cmp(self)
743 return fctx.cmp(self)
729
744
730 if self._filenode is None:
745 if self._filenode is None:
731 raise error.ProgrammingError(
746 raise error.ProgrammingError(
732 'filectx.cmp() must be reimplemented if not backed by revlog')
747 'filectx.cmp() must be reimplemented if not backed by revlog')
733
748
734 if fctx._filenode is None:
749 if fctx._filenode is None:
735 if self._repo._encodefilterpats:
750 if self._repo._encodefilterpats:
736 # can't rely on size() because wdir content may be decoded
751 # can't rely on size() because wdir content may be decoded
737 return self._filelog.cmp(self._filenode, fctx.data())
752 return self._filelog.cmp(self._filenode, fctx.data())
738 if self.size() - 4 == fctx.size():
753 if self.size() - 4 == fctx.size():
739 # size() can match:
754 # size() can match:
740 # if file data starts with '\1\n', empty metadata block is
755 # if file data starts with '\1\n', empty metadata block is
741 # prepended, which adds 4 bytes to filelog.size().
756 # prepended, which adds 4 bytes to filelog.size().
742 return self._filelog.cmp(self._filenode, fctx.data())
757 return self._filelog.cmp(self._filenode, fctx.data())
743 if self.size() == fctx.size():
758 if self.size() == fctx.size():
744 # size() matches: need to compare content
759 # size() matches: need to compare content
745 return self._filelog.cmp(self._filenode, fctx.data())
760 return self._filelog.cmp(self._filenode, fctx.data())
746
761
747 # size() differs
762 # size() differs
748 return True
763 return True
749
764
750 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
765 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
751 """return the first ancestor of <srcrev> introducing <fnode>
766 """return the first ancestor of <srcrev> introducing <fnode>
752
767
753 If the linkrev of the file revision does not point to an ancestor of
768 If the linkrev of the file revision does not point to an ancestor of
754 srcrev, we'll walk down the ancestors until we find one introducing
769 srcrev, we'll walk down the ancestors until we find one introducing
755 this file revision.
770 this file revision.
756
771
757 :srcrev: the changeset revision we search ancestors from
772 :srcrev: the changeset revision we search ancestors from
758 :inclusive: if true, the src revision will also be checked
773 :inclusive: if true, the src revision will also be checked
759 :stoprev: an optional revision to stop the walk at. If no introduction
774 :stoprev: an optional revision to stop the walk at. If no introduction
760 of this file content could be found before this floor
775 of this file content could be found before this floor
761 revision, the function will returns "None" and stops its
776 revision, the function will returns "None" and stops its
762 iteration.
777 iteration.
763 """
778 """
764 repo = self._repo
779 repo = self._repo
765 cl = repo.unfiltered().changelog
780 cl = repo.unfiltered().changelog
766 mfl = repo.manifestlog
781 mfl = repo.manifestlog
767 # fetch the linkrev
782 # fetch the linkrev
768 lkr = self.linkrev()
783 lkr = self.linkrev()
769 if srcrev == lkr:
784 if srcrev == lkr:
770 return lkr
785 return lkr
771 # hack to reuse ancestor computation when searching for renames
786 # hack to reuse ancestor computation when searching for renames
772 memberanc = getattr(self, '_ancestrycontext', None)
787 memberanc = getattr(self, '_ancestrycontext', None)
773 iteranc = None
788 iteranc = None
774 if srcrev is None:
789 if srcrev is None:
775 # wctx case, used by workingfilectx during mergecopy
790 # wctx case, used by workingfilectx during mergecopy
776 revs = [p.rev() for p in self._repo[None].parents()]
791 revs = [p.rev() for p in self._repo[None].parents()]
777 inclusive = True # we skipped the real (revless) source
792 inclusive = True # we skipped the real (revless) source
778 else:
793 else:
779 revs = [srcrev]
794 revs = [srcrev]
780 if memberanc is None:
795 if memberanc is None:
781 memberanc = iteranc = cl.ancestors(revs, lkr,
796 memberanc = iteranc = cl.ancestors(revs, lkr,
782 inclusive=inclusive)
797 inclusive=inclusive)
783 # check if this linkrev is an ancestor of srcrev
798 # check if this linkrev is an ancestor of srcrev
784 if lkr not in memberanc:
799 if lkr not in memberanc:
785 if iteranc is None:
800 if iteranc is None:
786 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
801 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
787 fnode = self._filenode
802 fnode = self._filenode
788 path = self._path
803 path = self._path
789 for a in iteranc:
804 for a in iteranc:
790 if stoprev is not None and a < stoprev:
805 if stoprev is not None and a < stoprev:
791 return None
806 return None
792 ac = cl.read(a) # get changeset data (we avoid object creation)
807 ac = cl.read(a) # get changeset data (we avoid object creation)
793 if path in ac[3]: # checking the 'files' field.
808 if path in ac[3]: # checking the 'files' field.
794 # The file has been touched, check if the content is
809 # The file has been touched, check if the content is
795 # similar to the one we search for.
810 # similar to the one we search for.
796 if fnode == mfl[ac[0]].readfast().get(path):
811 if fnode == mfl[ac[0]].readfast().get(path):
797 return a
812 return a
798 # In theory, we should never get out of that loop without a result.
813 # In theory, we should never get out of that loop without a result.
799 # But if manifest uses a buggy file revision (not children of the
814 # But if manifest uses a buggy file revision (not children of the
800 # one it replaces) we could. Such a buggy situation will likely
815 # one it replaces) we could. Such a buggy situation will likely
801 # result is crash somewhere else at to some point.
816 # result is crash somewhere else at to some point.
802 return lkr
817 return lkr
803
818
804 def isintroducedafter(self, changelogrev):
819 def isintroducedafter(self, changelogrev):
805 """True if a filectx has been introduced after a given floor revision
820 """True if a filectx has been introduced after a given floor revision
806 """
821 """
807 if self.linkrev() >= changelogrev:
822 if self.linkrev() >= changelogrev:
808 return True
823 return True
809 introrev = self._introrev(stoprev=changelogrev)
824 introrev = self._introrev(stoprev=changelogrev)
810 if introrev is None:
825 if introrev is None:
811 return False
826 return False
812 return introrev >= changelogrev
827 return introrev >= changelogrev
813
828
814 def introrev(self):
829 def introrev(self):
815 """return the rev of the changeset which introduced this file revision
830 """return the rev of the changeset which introduced this file revision
816
831
817 This method is different from linkrev because it take into account the
832 This method is different from linkrev because it take into account the
818 changeset the filectx was created from. It ensures the returned
833 changeset the filectx was created from. It ensures the returned
819 revision is one of its ancestors. This prevents bugs from
834 revision is one of its ancestors. This prevents bugs from
820 'linkrev-shadowing' when a file revision is used by multiple
835 'linkrev-shadowing' when a file revision is used by multiple
821 changesets.
836 changesets.
822 """
837 """
823 return self._introrev()
838 return self._introrev()
824
839
825 def _introrev(self, stoprev=None):
840 def _introrev(self, stoprev=None):
826 """
841 """
827 Same as `introrev` but, with an extra argument to limit changelog
842 Same as `introrev` but, with an extra argument to limit changelog
828 iteration range in some internal usecase.
843 iteration range in some internal usecase.
829
844
830 If `stoprev` is set, the `introrev` will not be searched past that
845 If `stoprev` is set, the `introrev` will not be searched past that
831 `stoprev` revision and "None" might be returned. This is useful to
846 `stoprev` revision and "None" might be returned. This is useful to
832 limit the iteration range.
847 limit the iteration range.
833 """
848 """
834 toprev = None
849 toprev = None
835 attrs = vars(self)
850 attrs = vars(self)
836 if r'_changeid' in attrs:
851 if r'_changeid' in attrs:
837 # We have a cached value already
852 # We have a cached value already
838 toprev = self._changeid
853 toprev = self._changeid
839 elif r'_changectx' in attrs:
854 elif r'_changectx' in attrs:
840 # We know which changelog entry we are coming from
855 # We know which changelog entry we are coming from
841 toprev = self._changectx.rev()
856 toprev = self._changectx.rev()
842
857
843 if toprev is not None:
858 if toprev is not None:
844 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
859 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
845 elif r'_descendantrev' in attrs:
860 elif r'_descendantrev' in attrs:
846 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
861 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
847 # be nice and cache the result of the computation
862 # be nice and cache the result of the computation
848 if introrev is not None:
863 if introrev is not None:
849 self._changeid = introrev
864 self._changeid = introrev
850 return introrev
865 return introrev
851 else:
866 else:
852 return self.linkrev()
867 return self.linkrev()
853
868
854 def introfilectx(self):
869 def introfilectx(self):
855 """Return filectx having identical contents, but pointing to the
870 """Return filectx having identical contents, but pointing to the
856 changeset revision where this filectx was introduced"""
871 changeset revision where this filectx was introduced"""
857 introrev = self.introrev()
872 introrev = self.introrev()
858 if self.rev() == introrev:
873 if self.rev() == introrev:
859 return self
874 return self
860 return self.filectx(self.filenode(), changeid=introrev)
875 return self.filectx(self.filenode(), changeid=introrev)
861
876
862 def _parentfilectx(self, path, fileid, filelog):
877 def _parentfilectx(self, path, fileid, filelog):
863 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
864 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
865 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
880 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
866 # If self is associated with a changeset (probably explicitly
881 # If self is associated with a changeset (probably explicitly
867 # fed), ensure the created filectx is associated with a
882 # fed), ensure the created filectx is associated with a
868 # changeset that is an ancestor of self.changectx.
883 # changeset that is an ancestor of self.changectx.
869 # This lets us later use _adjustlinkrev to get a correct link.
884 # This lets us later use _adjustlinkrev to get a correct link.
870 fctx._descendantrev = self.rev()
885 fctx._descendantrev = self.rev()
871 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
872 elif r'_descendantrev' in vars(self):
887 elif r'_descendantrev' in vars(self):
873 # Otherwise propagate _descendantrev if we have one associated.
888 # Otherwise propagate _descendantrev if we have one associated.
874 fctx._descendantrev = self._descendantrev
889 fctx._descendantrev = self._descendantrev
875 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
876 return fctx
891 return fctx
877
892
878 def parents(self):
893 def parents(self):
879 _path = self._path
894 _path = self._path
880 fl = self._filelog
895 fl = self._filelog
881 parents = self._filelog.parents(self._filenode)
896 parents = self._filelog.parents(self._filenode)
882 pl = [(_path, node, fl) for node in parents if node != nullid]
897 pl = [(_path, node, fl) for node in parents if node != nullid]
883
898
884 r = fl.renamed(self._filenode)
899 r = fl.renamed(self._filenode)
885 if r:
900 if r:
886 # - In the simple rename case, both parent are nullid, pl is empty.
901 # - In the simple rename case, both parent are nullid, pl is empty.
887 # - In case of merge, only one of the parent is null id and should
902 # - In case of merge, only one of the parent is null id and should
888 # be replaced with the rename information. This parent is -always-
903 # be replaced with the rename information. This parent is -always-
889 # the first one.
904 # the first one.
890 #
905 #
891 # As null id have always been filtered out in the previous list
906 # As null id have always been filtered out in the previous list
892 # comprehension, inserting to 0 will always result in "replacing
907 # comprehension, inserting to 0 will always result in "replacing
893 # first nullid parent with rename information.
908 # first nullid parent with rename information.
894 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
895
910
896 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
897
912
898 def p1(self):
913 def p1(self):
899 return self.parents()[0]
914 return self.parents()[0]
900
915
901 def p2(self):
916 def p2(self):
902 p = self.parents()
917 p = self.parents()
903 if len(p) == 2:
918 if len(p) == 2:
904 return p[1]
919 return p[1]
905 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
906
921
907 def annotate(self, follow=False, skiprevs=None, diffopts=None):
922 def annotate(self, follow=False, skiprevs=None, diffopts=None):
908 """Returns a list of annotateline objects for each line in the file
923 """Returns a list of annotateline objects for each line in the file
909
924
910 - line.fctx is the filectx of the node where that line was last changed
925 - line.fctx is the filectx of the node where that line was last changed
911 - line.lineno is the line number at the first appearance in the managed
926 - line.lineno is the line number at the first appearance in the managed
912 file
927 file
913 - line.text is the data on that line (including newline character)
928 - line.text is the data on that line (including newline character)
914 """
929 """
915 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
930 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
916
931
917 def parents(f):
932 def parents(f):
918 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
933 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
919 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
934 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
920 # from the topmost introrev (= srcrev) down to p.linkrev() if it
935 # from the topmost introrev (= srcrev) down to p.linkrev() if it
921 # isn't an ancestor of the srcrev.
936 # isn't an ancestor of the srcrev.
922 f._changeid
937 f._changeid
923 pl = f.parents()
938 pl = f.parents()
924
939
925 # Don't return renamed parents if we aren't following.
940 # Don't return renamed parents if we aren't following.
926 if not follow:
941 if not follow:
927 pl = [p for p in pl if p.path() == f.path()]
942 pl = [p for p in pl if p.path() == f.path()]
928
943
929 # renamed filectx won't have a filelog yet, so set it
944 # renamed filectx won't have a filelog yet, so set it
930 # from the cache to save time
945 # from the cache to save time
931 for p in pl:
946 for p in pl:
932 if not r'_filelog' in p.__dict__:
947 if not r'_filelog' in p.__dict__:
933 p._filelog = getlog(p.path())
948 p._filelog = getlog(p.path())
934
949
935 return pl
950 return pl
936
951
937 # use linkrev to find the first changeset where self appeared
952 # use linkrev to find the first changeset where self appeared
938 base = self.introfilectx()
953 base = self.introfilectx()
939 if getattr(base, '_ancestrycontext', None) is None:
954 if getattr(base, '_ancestrycontext', None) is None:
940 cl = self._repo.changelog
955 cl = self._repo.changelog
941 if base.rev() is None:
956 if base.rev() is None:
942 # wctx is not inclusive, but works because _ancestrycontext
957 # wctx is not inclusive, but works because _ancestrycontext
943 # is used to test filelog revisions
958 # is used to test filelog revisions
944 ac = cl.ancestors([p.rev() for p in base.parents()],
959 ac = cl.ancestors([p.rev() for p in base.parents()],
945 inclusive=True)
960 inclusive=True)
946 else:
961 else:
947 ac = cl.ancestors([base.rev()], inclusive=True)
962 ac = cl.ancestors([base.rev()], inclusive=True)
948 base._ancestrycontext = ac
963 base._ancestrycontext = ac
949
964
950 return dagop.annotate(base, parents, skiprevs=skiprevs,
965 return dagop.annotate(base, parents, skiprevs=skiprevs,
951 diffopts=diffopts)
966 diffopts=diffopts)
952
967
953 def ancestors(self, followfirst=False):
968 def ancestors(self, followfirst=False):
954 visit = {}
969 visit = {}
955 c = self
970 c = self
956 if followfirst:
971 if followfirst:
957 cut = 1
972 cut = 1
958 else:
973 else:
959 cut = None
974 cut = None
960
975
961 while True:
976 while True:
962 for parent in c.parents()[:cut]:
977 for parent in c.parents()[:cut]:
963 visit[(parent.linkrev(), parent.filenode())] = parent
978 visit[(parent.linkrev(), parent.filenode())] = parent
964 if not visit:
979 if not visit:
965 break
980 break
966 c = visit.pop(max(visit))
981 c = visit.pop(max(visit))
967 yield c
982 yield c
968
983
969 def decodeddata(self):
984 def decodeddata(self):
970 """Returns `data()` after running repository decoding filters.
985 """Returns `data()` after running repository decoding filters.
971
986
972 This is often equivalent to how the data would be expressed on disk.
987 This is often equivalent to how the data would be expressed on disk.
973 """
988 """
974 return self._repo.wwritedata(self.path(), self.data())
989 return self._repo.wwritedata(self.path(), self.data())
975
990
976 class filectx(basefilectx):
991 class filectx(basefilectx):
977 """A filecontext object makes access to data related to a particular
992 """A filecontext object makes access to data related to a particular
978 filerevision convenient."""
993 filerevision convenient."""
979 def __init__(self, repo, path, changeid=None, fileid=None,
994 def __init__(self, repo, path, changeid=None, fileid=None,
980 filelog=None, changectx=None):
995 filelog=None, changectx=None):
981 """changeid must be a revision number, if specified.
996 """changeid must be a revision number, if specified.
982 fileid can be a file revision or node."""
997 fileid can be a file revision or node."""
983 self._repo = repo
998 self._repo = repo
984 self._path = path
999 self._path = path
985
1000
986 assert (changeid is not None
1001 assert (changeid is not None
987 or fileid is not None
1002 or fileid is not None
988 or changectx is not None), (
1003 or changectx is not None), (
989 "bad args: changeid=%r, fileid=%r, changectx=%r"
1004 "bad args: changeid=%r, fileid=%r, changectx=%r"
990 % (changeid, fileid, changectx))
1005 % (changeid, fileid, changectx))
991
1006
992 if filelog is not None:
1007 if filelog is not None:
993 self._filelog = filelog
1008 self._filelog = filelog
994
1009
995 if changeid is not None:
1010 if changeid is not None:
996 self._changeid = changeid
1011 self._changeid = changeid
997 if changectx is not None:
1012 if changectx is not None:
998 self._changectx = changectx
1013 self._changectx = changectx
999 if fileid is not None:
1014 if fileid is not None:
1000 self._fileid = fileid
1015 self._fileid = fileid
1001
1016
1002 @propertycache
1017 @propertycache
1003 def _changectx(self):
1018 def _changectx(self):
1004 try:
1019 try:
1005 return self._repo[self._changeid]
1020 return self._repo[self._changeid]
1006 except error.FilteredRepoLookupError:
1021 except error.FilteredRepoLookupError:
1007 # Linkrev may point to any revision in the repository. When the
1022 # Linkrev may point to any revision in the repository. When the
1008 # repository is filtered this may lead to `filectx` trying to build
1023 # repository is filtered this may lead to `filectx` trying to build
1009 # `changectx` for filtered revision. In such case we fallback to
1024 # `changectx` for filtered revision. In such case we fallback to
1010 # creating `changectx` on the unfiltered version of the reposition.
1025 # creating `changectx` on the unfiltered version of the reposition.
1011 # This fallback should not be an issue because `changectx` from
1026 # This fallback should not be an issue because `changectx` from
1012 # `filectx` are not used in complex operations that care about
1027 # `filectx` are not used in complex operations that care about
1013 # filtering.
1028 # filtering.
1014 #
1029 #
1015 # This fallback is a cheap and dirty fix that prevent several
1030 # This fallback is a cheap and dirty fix that prevent several
1016 # crashes. It does not ensure the behavior is correct. However the
1031 # crashes. It does not ensure the behavior is correct. However the
1017 # behavior was not correct before filtering either and "incorrect
1032 # behavior was not correct before filtering either and "incorrect
1018 # behavior" is seen as better as "crash"
1033 # behavior" is seen as better as "crash"
1019 #
1034 #
1020 # Linkrevs have several serious troubles with filtering that are
1035 # Linkrevs have several serious troubles with filtering that are
1021 # complicated to solve. Proper handling of the issue here should be
1036 # complicated to solve. Proper handling of the issue here should be
1022 # considered when solving linkrev issue are on the table.
1037 # considered when solving linkrev issue are on the table.
1023 return self._repo.unfiltered()[self._changeid]
1038 return self._repo.unfiltered()[self._changeid]
1024
1039
1025 def filectx(self, fileid, changeid=None):
1040 def filectx(self, fileid, changeid=None):
1026 '''opens an arbitrary revision of the file without
1041 '''opens an arbitrary revision of the file without
1027 opening a new filelog'''
1042 opening a new filelog'''
1028 return filectx(self._repo, self._path, fileid=fileid,
1043 return filectx(self._repo, self._path, fileid=fileid,
1029 filelog=self._filelog, changeid=changeid)
1044 filelog=self._filelog, changeid=changeid)
1030
1045
1031 def rawdata(self):
1046 def rawdata(self):
1032 return self._filelog.revision(self._filenode, raw=True)
1047 return self._filelog.revision(self._filenode, raw=True)
1033
1048
1034 def rawflags(self):
1049 def rawflags(self):
1035 """low-level revlog flags"""
1050 """low-level revlog flags"""
1036 return self._filelog.flags(self._filerev)
1051 return self._filelog.flags(self._filerev)
1037
1052
1038 def data(self):
1053 def data(self):
1039 try:
1054 try:
1040 return self._filelog.read(self._filenode)
1055 return self._filelog.read(self._filenode)
1041 except error.CensoredNodeError:
1056 except error.CensoredNodeError:
1042 if self._repo.ui.config("censor", "policy") == "ignore":
1057 if self._repo.ui.config("censor", "policy") == "ignore":
1043 return ""
1058 return ""
1044 raise error.Abort(_("censored node: %s") % short(self._filenode),
1059 raise error.Abort(_("censored node: %s") % short(self._filenode),
1045 hint=_("set censor.policy to ignore errors"))
1060 hint=_("set censor.policy to ignore errors"))
1046
1061
1047 def size(self):
1062 def size(self):
1048 return self._filelog.size(self._filerev)
1063 return self._filelog.size(self._filerev)
1049
1064
1050 @propertycache
1065 @propertycache
1051 def _copied(self):
1066 def _copied(self):
1052 """check if file was actually renamed in this changeset revision
1067 """check if file was actually renamed in this changeset revision
1053
1068
1054 If rename logged in file revision, we report copy for changeset only
1069 If rename logged in file revision, we report copy for changeset only
1055 if file revisions linkrev points back to the changeset in question
1070 if file revisions linkrev points back to the changeset in question
1056 or both changeset parents contain different file revisions.
1071 or both changeset parents contain different file revisions.
1057 """
1072 """
1058
1073
1059 renamed = self._filelog.renamed(self._filenode)
1074 renamed = self._filelog.renamed(self._filenode)
1060 if not renamed:
1075 if not renamed:
1061 return None
1076 return None
1062
1077
1063 if self.rev() == self.linkrev():
1078 if self.rev() == self.linkrev():
1064 return renamed
1079 return renamed
1065
1080
1066 name = self.path()
1081 name = self.path()
1067 fnode = self._filenode
1082 fnode = self._filenode
1068 for p in self._changectx.parents():
1083 for p in self._changectx.parents():
1069 try:
1084 try:
1070 if fnode == p.filenode(name):
1085 if fnode == p.filenode(name):
1071 return None
1086 return None
1072 except error.LookupError:
1087 except error.LookupError:
1073 pass
1088 pass
1074 return renamed
1089 return renamed
1075
1090
1076 def children(self):
1091 def children(self):
1077 # hard for renames
1092 # hard for renames
1078 c = self._filelog.children(self._filenode)
1093 c = self._filelog.children(self._filenode)
1079 return [filectx(self._repo, self._path, fileid=x,
1094 return [filectx(self._repo, self._path, fileid=x,
1080 filelog=self._filelog) for x in c]
1095 filelog=self._filelog) for x in c]
1081
1096
1082 class committablectx(basectx):
1097 class committablectx(basectx):
1083 """A committablectx object provides common functionality for a context that
1098 """A committablectx object provides common functionality for a context that
1084 wants the ability to commit, e.g. workingctx or memctx."""
1099 wants the ability to commit, e.g. workingctx or memctx."""
1085 def __init__(self, repo, text="", user=None, date=None, extra=None,
1100 def __init__(self, repo, text="", user=None, date=None, extra=None,
1086 changes=None):
1101 changes=None):
1087 super(committablectx, self).__init__(repo)
1102 super(committablectx, self).__init__(repo)
1088 self._rev = None
1103 self._rev = None
1089 self._node = None
1104 self._node = None
1090 self._text = text
1105 self._text = text
1091 if date:
1106 if date:
1092 self._date = dateutil.parsedate(date)
1107 self._date = dateutil.parsedate(date)
1093 if user:
1108 if user:
1094 self._user = user
1109 self._user = user
1095 if changes:
1110 if changes:
1096 self._status = changes
1111 self._status = changes
1097
1112
1098 self._extra = {}
1113 self._extra = {}
1099 if extra:
1114 if extra:
1100 self._extra = extra.copy()
1115 self._extra = extra.copy()
1101 if 'branch' not in self._extra:
1116 if 'branch' not in self._extra:
1102 try:
1117 try:
1103 branch = encoding.fromlocal(self._repo.dirstate.branch())
1118 branch = encoding.fromlocal(self._repo.dirstate.branch())
1104 except UnicodeDecodeError:
1119 except UnicodeDecodeError:
1105 raise error.Abort(_('branch name not in UTF-8!'))
1120 raise error.Abort(_('branch name not in UTF-8!'))
1106 self._extra['branch'] = branch
1121 self._extra['branch'] = branch
1107 if self._extra['branch'] == '':
1122 if self._extra['branch'] == '':
1108 self._extra['branch'] = 'default'
1123 self._extra['branch'] = 'default'
1109
1124
1110 def __bytes__(self):
1125 def __bytes__(self):
1111 return bytes(self._parents[0]) + "+"
1126 return bytes(self._parents[0]) + "+"
1112
1127
1113 __str__ = encoding.strmethod(__bytes__)
1128 __str__ = encoding.strmethod(__bytes__)
1114
1129
1115 def __nonzero__(self):
1130 def __nonzero__(self):
1116 return True
1131 return True
1117
1132
1118 __bool__ = __nonzero__
1133 __bool__ = __nonzero__
1119
1134
1120 def _buildflagfunc(self):
1135 def _buildflagfunc(self):
1121 # Create a fallback function for getting file flags when the
1136 # Create a fallback function for getting file flags when the
1122 # filesystem doesn't support them
1137 # filesystem doesn't support them
1123
1138
1124 copiesget = self._repo.dirstate.copies().get
1139 copiesget = self._repo.dirstate.copies().get
1125 parents = self.parents()
1140 parents = self.parents()
1126 if len(parents) < 2:
1141 if len(parents) < 2:
1127 # when we have one parent, it's easy: copy from parent
1142 # when we have one parent, it's easy: copy from parent
1128 man = parents[0].manifest()
1143 man = parents[0].manifest()
1129 def func(f):
1144 def func(f):
1130 f = copiesget(f, f)
1145 f = copiesget(f, f)
1131 return man.flags(f)
1146 return man.flags(f)
1132 else:
1147 else:
1133 # merges are tricky: we try to reconstruct the unstored
1148 # merges are tricky: we try to reconstruct the unstored
1134 # result from the merge (issue1802)
1149 # result from the merge (issue1802)
1135 p1, p2 = parents
1150 p1, p2 = parents
1136 pa = p1.ancestor(p2)
1151 pa = p1.ancestor(p2)
1137 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1152 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1138
1153
1139 def func(f):
1154 def func(f):
1140 f = copiesget(f, f) # may be wrong for merges with copies
1155 f = copiesget(f, f) # may be wrong for merges with copies
1141 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1156 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1142 if fl1 == fl2:
1157 if fl1 == fl2:
1143 return fl1
1158 return fl1
1144 if fl1 == fla:
1159 if fl1 == fla:
1145 return fl2
1160 return fl2
1146 if fl2 == fla:
1161 if fl2 == fla:
1147 return fl1
1162 return fl1
1148 return '' # punt for conflicts
1163 return '' # punt for conflicts
1149
1164
1150 return func
1165 return func
1151
1166
1152 @propertycache
1167 @propertycache
1153 def _flagfunc(self):
1168 def _flagfunc(self):
1154 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1169 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1155
1170
1156 @propertycache
1171 @propertycache
1157 def _status(self):
1172 def _status(self):
1158 return self._repo.status()
1173 return self._repo.status()
1159
1174
1160 @propertycache
1175 @propertycache
1161 def _user(self):
1176 def _user(self):
1162 return self._repo.ui.username()
1177 return self._repo.ui.username()
1163
1178
1164 @propertycache
1179 @propertycache
1165 def _date(self):
1180 def _date(self):
1166 ui = self._repo.ui
1181 ui = self._repo.ui
1167 date = ui.configdate('devel', 'default-date')
1182 date = ui.configdate('devel', 'default-date')
1168 if date is None:
1183 if date is None:
1169 date = dateutil.makedate()
1184 date = dateutil.makedate()
1170 return date
1185 return date
1171
1186
1172 def subrev(self, subpath):
1187 def subrev(self, subpath):
1173 return None
1188 return None
1174
1189
1175 def manifestnode(self):
1190 def manifestnode(self):
1176 return None
1191 return None
1177 def user(self):
1192 def user(self):
1178 return self._user or self._repo.ui.username()
1193 return self._user or self._repo.ui.username()
1179 def date(self):
1194 def date(self):
1180 return self._date
1195 return self._date
1181 def description(self):
1196 def description(self):
1182 return self._text
1197 return self._text
1183 def files(self):
1198 def files(self):
1184 return sorted(self._status.modified + self._status.added +
1199 return sorted(self._status.modified + self._status.added +
1185 self._status.removed)
1200 self._status.removed)
1186 def modified(self):
1201 def modified(self):
1187 return self._status.modified
1202 return self._status.modified
1188 def added(self):
1203 def added(self):
1189 return self._status.added
1204 return self._status.added
1190 def removed(self):
1205 def removed(self):
1191 return self._status.removed
1206 return self._status.removed
1192 def deleted(self):
1207 def deleted(self):
1193 return self._status.deleted
1208 return self._status.deleted
1194 @propertycache
1209 @propertycache
1195 def _copies(self):
1210 def _copies(self):
1196 p1copies = {}
1211 p1copies = {}
1197 p2copies = {}
1212 p2copies = {}
1198 parents = self._repo.dirstate.parents()
1213 parents = self._repo.dirstate.parents()
1199 p1manifest = self._repo[parents[0]].manifest()
1214 p1manifest = self._repo[parents[0]].manifest()
1200 p2manifest = self._repo[parents[1]].manifest()
1215 p2manifest = self._repo[parents[1]].manifest()
1201 narrowmatch = self._repo.narrowmatch()
1216 narrowmatch = self._repo.narrowmatch()
1202 for dst, src in self._repo.dirstate.copies().items():
1217 for dst, src in self._repo.dirstate.copies().items():
1203 if not narrowmatch(dst):
1218 if not narrowmatch(dst):
1204 continue
1219 continue
1205 if src in p1manifest:
1220 if src in p1manifest:
1206 p1copies[dst] = src
1221 p1copies[dst] = src
1207 elif src in p2manifest:
1222 elif src in p2manifest:
1208 p2copies[dst] = src
1223 p2copies[dst] = src
1209 return p1copies, p2copies
1224 return p1copies, p2copies
1210 def p1copies(self):
1225 def p1copies(self):
1211 return self._copies[0]
1226 return self._copies[0]
1212 def p2copies(self):
1227 def p2copies(self):
1213 return self._copies[1]
1228 return self._copies[1]
1214 def branch(self):
1229 def branch(self):
1215 return encoding.tolocal(self._extra['branch'])
1230 return encoding.tolocal(self._extra['branch'])
1216 def closesbranch(self):
1231 def closesbranch(self):
1217 return 'close' in self._extra
1232 return 'close' in self._extra
1218 def extra(self):
1233 def extra(self):
1219 return self._extra
1234 return self._extra
1220
1235
1221 def isinmemory(self):
1236 def isinmemory(self):
1222 return False
1237 return False
1223
1238
1224 def tags(self):
1239 def tags(self):
1225 return []
1240 return []
1226
1241
1227 def bookmarks(self):
1242 def bookmarks(self):
1228 b = []
1243 b = []
1229 for p in self.parents():
1244 for p in self.parents():
1230 b.extend(p.bookmarks())
1245 b.extend(p.bookmarks())
1231 return b
1246 return b
1232
1247
1233 def phase(self):
1248 def phase(self):
1234 phase = phases.draft # default phase to draft
1249 phase = phases.draft # default phase to draft
1235 for p in self.parents():
1250 for p in self.parents():
1236 phase = max(phase, p.phase())
1251 phase = max(phase, p.phase())
1237 return phase
1252 return phase
1238
1253
1239 def hidden(self):
1254 def hidden(self):
1240 return False
1255 return False
1241
1256
1242 def children(self):
1257 def children(self):
1243 return []
1258 return []
1244
1259
1245 def flags(self, path):
1260 def flags(self, path):
1246 if r'_manifest' in self.__dict__:
1261 if r'_manifest' in self.__dict__:
1247 try:
1262 try:
1248 return self._manifest.flags(path)
1263 return self._manifest.flags(path)
1249 except KeyError:
1264 except KeyError:
1250 return ''
1265 return ''
1251
1266
1252 try:
1267 try:
1253 return self._flagfunc(path)
1268 return self._flagfunc(path)
1254 except OSError:
1269 except OSError:
1255 return ''
1270 return ''
1256
1271
1257 def ancestor(self, c2):
1272 def ancestor(self, c2):
1258 """return the "best" ancestor context of self and c2"""
1273 """return the "best" ancestor context of self and c2"""
1259 return self._parents[0].ancestor(c2) # punt on two parents for now
1274 return self._parents[0].ancestor(c2) # punt on two parents for now
1260
1275
1261 def walk(self, match):
1276 def walk(self, match):
1262 '''Generates matching file names.'''
1277 '''Generates matching file names.'''
1263 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1278 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1264 subrepos=sorted(self.substate),
1279 subrepos=sorted(self.substate),
1265 unknown=True, ignored=False))
1280 unknown=True, ignored=False))
1266
1281
1267 def matches(self, match):
1282 def matches(self, match):
1268 match = self._repo.narrowmatch(match)
1283 match = self._repo.narrowmatch(match)
1269 ds = self._repo.dirstate
1284 ds = self._repo.dirstate
1270 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1285 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1271
1286
1272 def ancestors(self):
1287 def ancestors(self):
1273 for p in self._parents:
1288 for p in self._parents:
1274 yield p
1289 yield p
1275 for a in self._repo.changelog.ancestors(
1290 for a in self._repo.changelog.ancestors(
1276 [p.rev() for p in self._parents]):
1291 [p.rev() for p in self._parents]):
1277 yield self._repo[a]
1292 yield self._repo[a]
1278
1293
1279 def markcommitted(self, node):
1294 def markcommitted(self, node):
1280 """Perform post-commit cleanup necessary after committing this ctx
1295 """Perform post-commit cleanup necessary after committing this ctx
1281
1296
1282 Specifically, this updates backing stores this working context
1297 Specifically, this updates backing stores this working context
1283 wraps to reflect the fact that the changes reflected by this
1298 wraps to reflect the fact that the changes reflected by this
1284 workingctx have been committed. For example, it marks
1299 workingctx have been committed. For example, it marks
1285 modified and added files as normal in the dirstate.
1300 modified and added files as normal in the dirstate.
1286
1301
1287 """
1302 """
1288
1303
1289 with self._repo.dirstate.parentchange():
1304 with self._repo.dirstate.parentchange():
1290 for f in self.modified() + self.added():
1305 for f in self.modified() + self.added():
1291 self._repo.dirstate.normal(f)
1306 self._repo.dirstate.normal(f)
1292 for f in self.removed():
1307 for f in self.removed():
1293 self._repo.dirstate.drop(f)
1308 self._repo.dirstate.drop(f)
1294 self._repo.dirstate.setparents(node)
1309 self._repo.dirstate.setparents(node)
1295
1310
1296 # write changes out explicitly, because nesting wlock at
1311 # write changes out explicitly, because nesting wlock at
1297 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1312 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1298 # from immediately doing so for subsequent changing files
1313 # from immediately doing so for subsequent changing files
1299 self._repo.dirstate.write(self._repo.currenttransaction())
1314 self._repo.dirstate.write(self._repo.currenttransaction())
1300
1315
1301 def dirty(self, missing=False, merge=True, branch=True):
1316 def dirty(self, missing=False, merge=True, branch=True):
1302 return False
1317 return False
1303
1318
1304 class workingctx(committablectx):
1319 class workingctx(committablectx):
1305 """A workingctx object makes access to data related to
1320 """A workingctx object makes access to data related to
1306 the current working directory convenient.
1321 the current working directory convenient.
1307 date - any valid date string or (unixtime, offset), or None.
1322 date - any valid date string or (unixtime, offset), or None.
1308 user - username string, or None.
1323 user - username string, or None.
1309 extra - a dictionary of extra values, or None.
1324 extra - a dictionary of extra values, or None.
1310 changes - a list of file lists as returned by localrepo.status()
1325 changes - a list of file lists as returned by localrepo.status()
1311 or None to use the repository status.
1326 or None to use the repository status.
1312 """
1327 """
1313 def __init__(self, repo, text="", user=None, date=None, extra=None,
1328 def __init__(self, repo, text="", user=None, date=None, extra=None,
1314 changes=None):
1329 changes=None):
1315 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1330 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1316
1331
1317 def __iter__(self):
1332 def __iter__(self):
1318 d = self._repo.dirstate
1333 d = self._repo.dirstate
1319 for f in d:
1334 for f in d:
1320 if d[f] != 'r':
1335 if d[f] != 'r':
1321 yield f
1336 yield f
1322
1337
1323 def __contains__(self, key):
1338 def __contains__(self, key):
1324 return self._repo.dirstate[key] not in "?r"
1339 return self._repo.dirstate[key] not in "?r"
1325
1340
1326 def hex(self):
1341 def hex(self):
1327 return wdirhex
1342 return wdirhex
1328
1343
1329 @propertycache
1344 @propertycache
1330 def _parents(self):
1345 def _parents(self):
1331 p = self._repo.dirstate.parents()
1346 p = self._repo.dirstate.parents()
1332 if p[1] == nullid:
1347 if p[1] == nullid:
1333 p = p[:-1]
1348 p = p[:-1]
1334 # use unfiltered repo to delay/avoid loading obsmarkers
1349 # use unfiltered repo to delay/avoid loading obsmarkers
1335 unfi = self._repo.unfiltered()
1350 unfi = self._repo.unfiltered()
1336 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1351 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1337
1352
1338 def _fileinfo(self, path):
1353 def _fileinfo(self, path):
1339 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1354 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1340 self._manifest
1355 self._manifest
1341 return super(workingctx, self)._fileinfo(path)
1356 return super(workingctx, self)._fileinfo(path)
1342
1357
1343 def filectx(self, path, filelog=None):
1358 def filectx(self, path, filelog=None):
1344 """get a file context from the working directory"""
1359 """get a file context from the working directory"""
1345 return workingfilectx(self._repo, path, workingctx=self,
1360 return workingfilectx(self._repo, path, workingctx=self,
1346 filelog=filelog)
1361 filelog=filelog)
1347
1362
1348 def dirty(self, missing=False, merge=True, branch=True):
1363 def dirty(self, missing=False, merge=True, branch=True):
1349 "check whether a working directory is modified"
1364 "check whether a working directory is modified"
1350 # check subrepos first
1365 # check subrepos first
1351 for s in sorted(self.substate):
1366 for s in sorted(self.substate):
1352 if self.sub(s).dirty(missing=missing):
1367 if self.sub(s).dirty(missing=missing):
1353 return True
1368 return True
1354 # check current working dir
1369 # check current working dir
1355 return ((merge and self.p2()) or
1370 return ((merge and self.p2()) or
1356 (branch and self.branch() != self.p1().branch()) or
1371 (branch and self.branch() != self.p1().branch()) or
1357 self.modified() or self.added() or self.removed() or
1372 self.modified() or self.added() or self.removed() or
1358 (missing and self.deleted()))
1373 (missing and self.deleted()))
1359
1374
1360 def add(self, list, prefix=""):
1375 def add(self, list, prefix=""):
1361 with self._repo.wlock():
1376 with self._repo.wlock():
1362 ui, ds = self._repo.ui, self._repo.dirstate
1377 ui, ds = self._repo.ui, self._repo.dirstate
1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1378 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1364 rejected = []
1379 rejected = []
1365 lstat = self._repo.wvfs.lstat
1380 lstat = self._repo.wvfs.lstat
1366 for f in list:
1381 for f in list:
1367 # ds.pathto() returns an absolute file when this is invoked from
1382 # ds.pathto() returns an absolute file when this is invoked from
1368 # the keyword extension. That gets flagged as non-portable on
1383 # the keyword extension. That gets flagged as non-portable on
1369 # Windows, since it contains the drive letter and colon.
1384 # Windows, since it contains the drive letter and colon.
1370 scmutil.checkportable(ui, os.path.join(prefix, f))
1385 scmutil.checkportable(ui, os.path.join(prefix, f))
1371 try:
1386 try:
1372 st = lstat(f)
1387 st = lstat(f)
1373 except OSError:
1388 except OSError:
1374 ui.warn(_("%s does not exist!\n") % uipath(f))
1389 ui.warn(_("%s does not exist!\n") % uipath(f))
1375 rejected.append(f)
1390 rejected.append(f)
1376 continue
1391 continue
1377 limit = ui.configbytes('ui', 'large-file-limit')
1392 limit = ui.configbytes('ui', 'large-file-limit')
1378 if limit != 0 and st.st_size > limit:
1393 if limit != 0 and st.st_size > limit:
1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1394 ui.warn(_("%s: up to %d MB of RAM may be required "
1380 "to manage this file\n"
1395 "to manage this file\n"
1381 "(use 'hg revert %s' to cancel the "
1396 "(use 'hg revert %s' to cancel the "
1382 "pending addition)\n")
1397 "pending addition)\n")
1383 % (f, 3 * st.st_size // 1000000, uipath(f)))
1398 % (f, 3 * st.st_size // 1000000, uipath(f)))
1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1399 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1385 ui.warn(_("%s not added: only files and symlinks "
1400 ui.warn(_("%s not added: only files and symlinks "
1386 "supported currently\n") % uipath(f))
1401 "supported currently\n") % uipath(f))
1387 rejected.append(f)
1402 rejected.append(f)
1388 elif ds[f] in 'amn':
1403 elif ds[f] in 'amn':
1389 ui.warn(_("%s already tracked!\n") % uipath(f))
1404 ui.warn(_("%s already tracked!\n") % uipath(f))
1390 elif ds[f] == 'r':
1405 elif ds[f] == 'r':
1391 ds.normallookup(f)
1406 ds.normallookup(f)
1392 else:
1407 else:
1393 ds.add(f)
1408 ds.add(f)
1394 return rejected
1409 return rejected
1395
1410
1396 def forget(self, files, prefix=""):
1411 def forget(self, files, prefix=""):
1397 with self._repo.wlock():
1412 with self._repo.wlock():
1398 ds = self._repo.dirstate
1413 ds = self._repo.dirstate
1399 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1414 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1400 rejected = []
1415 rejected = []
1401 for f in files:
1416 for f in files:
1402 if f not in ds:
1417 if f not in ds:
1403 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1418 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1404 rejected.append(f)
1419 rejected.append(f)
1405 elif ds[f] != 'a':
1420 elif ds[f] != 'a':
1406 ds.remove(f)
1421 ds.remove(f)
1407 else:
1422 else:
1408 ds.drop(f)
1423 ds.drop(f)
1409 return rejected
1424 return rejected
1410
1425
1411 def copy(self, source, dest):
1426 def copy(self, source, dest):
1412 try:
1427 try:
1413 st = self._repo.wvfs.lstat(dest)
1428 st = self._repo.wvfs.lstat(dest)
1414 except OSError as err:
1429 except OSError as err:
1415 if err.errno != errno.ENOENT:
1430 if err.errno != errno.ENOENT:
1416 raise
1431 raise
1417 self._repo.ui.warn(_("%s does not exist!\n")
1432 self._repo.ui.warn(_("%s does not exist!\n")
1418 % self._repo.dirstate.pathto(dest))
1433 % self._repo.dirstate.pathto(dest))
1419 return
1434 return
1420 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1435 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1421 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1436 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1422 "symbolic link\n")
1437 "symbolic link\n")
1423 % self._repo.dirstate.pathto(dest))
1438 % self._repo.dirstate.pathto(dest))
1424 else:
1439 else:
1425 with self._repo.wlock():
1440 with self._repo.wlock():
1426 ds = self._repo.dirstate
1441 ds = self._repo.dirstate
1427 if ds[dest] in '?':
1442 if ds[dest] in '?':
1428 ds.add(dest)
1443 ds.add(dest)
1429 elif ds[dest] in 'r':
1444 elif ds[dest] in 'r':
1430 ds.normallookup(dest)
1445 ds.normallookup(dest)
1431 ds.copy(source, dest)
1446 ds.copy(source, dest)
1432
1447
1433 def match(self, pats=None, include=None, exclude=None, default='glob',
1448 def match(self, pats=None, include=None, exclude=None, default='glob',
1434 listsubrepos=False, badfn=None):
1449 listsubrepos=False, badfn=None):
1435 r = self._repo
1450 r = self._repo
1436
1451
1437 # Only a case insensitive filesystem needs magic to translate user input
1452 # Only a case insensitive filesystem needs magic to translate user input
1438 # to actual case in the filesystem.
1453 # to actual case in the filesystem.
1439 icasefs = not util.fscasesensitive(r.root)
1454 icasefs = not util.fscasesensitive(r.root)
1440 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1455 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1441 default, auditor=r.auditor, ctx=self,
1456 default, auditor=r.auditor, ctx=self,
1442 listsubrepos=listsubrepos, badfn=badfn,
1457 listsubrepos=listsubrepos, badfn=badfn,
1443 icasefs=icasefs)
1458 icasefs=icasefs)
1444
1459
1445 def _filtersuspectsymlink(self, files):
1460 def _filtersuspectsymlink(self, files):
1446 if not files or self._repo.dirstate._checklink:
1461 if not files or self._repo.dirstate._checklink:
1447 return files
1462 return files
1448
1463
1449 # Symlink placeholders may get non-symlink-like contents
1464 # Symlink placeholders may get non-symlink-like contents
1450 # via user error or dereferencing by NFS or Samba servers,
1465 # via user error or dereferencing by NFS or Samba servers,
1451 # so we filter out any placeholders that don't look like a
1466 # so we filter out any placeholders that don't look like a
1452 # symlink
1467 # symlink
1453 sane = []
1468 sane = []
1454 for f in files:
1469 for f in files:
1455 if self.flags(f) == 'l':
1470 if self.flags(f) == 'l':
1456 d = self[f].data()
1471 d = self[f].data()
1457 if (d == '' or len(d) >= 1024 or '\n' in d
1472 if (d == '' or len(d) >= 1024 or '\n' in d
1458 or stringutil.binary(d)):
1473 or stringutil.binary(d)):
1459 self._repo.ui.debug('ignoring suspect symlink placeholder'
1474 self._repo.ui.debug('ignoring suspect symlink placeholder'
1460 ' "%s"\n' % f)
1475 ' "%s"\n' % f)
1461 continue
1476 continue
1462 sane.append(f)
1477 sane.append(f)
1463 return sane
1478 return sane
1464
1479
1465 def _checklookup(self, files):
1480 def _checklookup(self, files):
1466 # check for any possibly clean files
1481 # check for any possibly clean files
1467 if not files:
1482 if not files:
1468 return [], [], []
1483 return [], [], []
1469
1484
1470 modified = []
1485 modified = []
1471 deleted = []
1486 deleted = []
1472 fixup = []
1487 fixup = []
1473 pctx = self._parents[0]
1488 pctx = self._parents[0]
1474 # do a full compare of any files that might have changed
1489 # do a full compare of any files that might have changed
1475 for f in sorted(files):
1490 for f in sorted(files):
1476 try:
1491 try:
1477 # This will return True for a file that got replaced by a
1492 # This will return True for a file that got replaced by a
1478 # directory in the interim, but fixing that is pretty hard.
1493 # directory in the interim, but fixing that is pretty hard.
1479 if (f not in pctx or self.flags(f) != pctx.flags(f)
1494 if (f not in pctx or self.flags(f) != pctx.flags(f)
1480 or pctx[f].cmp(self[f])):
1495 or pctx[f].cmp(self[f])):
1481 modified.append(f)
1496 modified.append(f)
1482 else:
1497 else:
1483 fixup.append(f)
1498 fixup.append(f)
1484 except (IOError, OSError):
1499 except (IOError, OSError):
1485 # A file become inaccessible in between? Mark it as deleted,
1500 # A file become inaccessible in between? Mark it as deleted,
1486 # matching dirstate behavior (issue5584).
1501 # matching dirstate behavior (issue5584).
1487 # The dirstate has more complex behavior around whether a
1502 # The dirstate has more complex behavior around whether a
1488 # missing file matches a directory, etc, but we don't need to
1503 # missing file matches a directory, etc, but we don't need to
1489 # bother with that: if f has made it to this point, we're sure
1504 # bother with that: if f has made it to this point, we're sure
1490 # it's in the dirstate.
1505 # it's in the dirstate.
1491 deleted.append(f)
1506 deleted.append(f)
1492
1507
1493 return modified, deleted, fixup
1508 return modified, deleted, fixup
1494
1509
1495 def _poststatusfixup(self, status, fixup):
1510 def _poststatusfixup(self, status, fixup):
1496 """update dirstate for files that are actually clean"""
1511 """update dirstate for files that are actually clean"""
1497 poststatus = self._repo.postdsstatus()
1512 poststatus = self._repo.postdsstatus()
1498 if fixup or poststatus:
1513 if fixup or poststatus:
1499 try:
1514 try:
1500 oldid = self._repo.dirstate.identity()
1515 oldid = self._repo.dirstate.identity()
1501
1516
1502 # updating the dirstate is optional
1517 # updating the dirstate is optional
1503 # so we don't wait on the lock
1518 # so we don't wait on the lock
1504 # wlock can invalidate the dirstate, so cache normal _after_
1519 # wlock can invalidate the dirstate, so cache normal _after_
1505 # taking the lock
1520 # taking the lock
1506 with self._repo.wlock(False):
1521 with self._repo.wlock(False):
1507 if self._repo.dirstate.identity() == oldid:
1522 if self._repo.dirstate.identity() == oldid:
1508 if fixup:
1523 if fixup:
1509 normal = self._repo.dirstate.normal
1524 normal = self._repo.dirstate.normal
1510 for f in fixup:
1525 for f in fixup:
1511 normal(f)
1526 normal(f)
1512 # write changes out explicitly, because nesting
1527 # write changes out explicitly, because nesting
1513 # wlock at runtime may prevent 'wlock.release()'
1528 # wlock at runtime may prevent 'wlock.release()'
1514 # after this block from doing so for subsequent
1529 # after this block from doing so for subsequent
1515 # changing files
1530 # changing files
1516 tr = self._repo.currenttransaction()
1531 tr = self._repo.currenttransaction()
1517 self._repo.dirstate.write(tr)
1532 self._repo.dirstate.write(tr)
1518
1533
1519 if poststatus:
1534 if poststatus:
1520 for ps in poststatus:
1535 for ps in poststatus:
1521 ps(self, status)
1536 ps(self, status)
1522 else:
1537 else:
1523 # in this case, writing changes out breaks
1538 # in this case, writing changes out breaks
1524 # consistency, because .hg/dirstate was
1539 # consistency, because .hg/dirstate was
1525 # already changed simultaneously after last
1540 # already changed simultaneously after last
1526 # caching (see also issue5584 for detail)
1541 # caching (see also issue5584 for detail)
1527 self._repo.ui.debug('skip updating dirstate: '
1542 self._repo.ui.debug('skip updating dirstate: '
1528 'identity mismatch\n')
1543 'identity mismatch\n')
1529 except error.LockError:
1544 except error.LockError:
1530 pass
1545 pass
1531 finally:
1546 finally:
1532 # Even if the wlock couldn't be grabbed, clear out the list.
1547 # Even if the wlock couldn't be grabbed, clear out the list.
1533 self._repo.clearpostdsstatus()
1548 self._repo.clearpostdsstatus()
1534
1549
1535 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1550 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1536 '''Gets the status from the dirstate -- internal use only.'''
1551 '''Gets the status from the dirstate -- internal use only.'''
1537 subrepos = []
1552 subrepos = []
1538 if '.hgsub' in self:
1553 if '.hgsub' in self:
1539 subrepos = sorted(self.substate)
1554 subrepos = sorted(self.substate)
1540 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1555 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1541 clean=clean, unknown=unknown)
1556 clean=clean, unknown=unknown)
1542
1557
1543 # check for any possibly clean files
1558 # check for any possibly clean files
1544 fixup = []
1559 fixup = []
1545 if cmp:
1560 if cmp:
1546 modified2, deleted2, fixup = self._checklookup(cmp)
1561 modified2, deleted2, fixup = self._checklookup(cmp)
1547 s.modified.extend(modified2)
1562 s.modified.extend(modified2)
1548 s.deleted.extend(deleted2)
1563 s.deleted.extend(deleted2)
1549
1564
1550 if fixup and clean:
1565 if fixup and clean:
1551 s.clean.extend(fixup)
1566 s.clean.extend(fixup)
1552
1567
1553 self._poststatusfixup(s, fixup)
1568 self._poststatusfixup(s, fixup)
1554
1569
1555 if match.always():
1570 if match.always():
1556 # cache for performance
1571 # cache for performance
1557 if s.unknown or s.ignored or s.clean:
1572 if s.unknown or s.ignored or s.clean:
1558 # "_status" is cached with list*=False in the normal route
1573 # "_status" is cached with list*=False in the normal route
1559 self._status = scmutil.status(s.modified, s.added, s.removed,
1574 self._status = scmutil.status(s.modified, s.added, s.removed,
1560 s.deleted, [], [], [])
1575 s.deleted, [], [], [])
1561 else:
1576 else:
1562 self._status = s
1577 self._status = s
1563
1578
1564 return s
1579 return s
1565
1580
1566 @propertycache
1581 @propertycache
1567 def _manifest(self):
1582 def _manifest(self):
1568 """generate a manifest corresponding to the values in self._status
1583 """generate a manifest corresponding to the values in self._status
1569
1584
1570 This reuse the file nodeid from parent, but we use special node
1585 This reuse the file nodeid from parent, but we use special node
1571 identifiers for added and modified files. This is used by manifests
1586 identifiers for added and modified files. This is used by manifests
1572 merge to see that files are different and by update logic to avoid
1587 merge to see that files are different and by update logic to avoid
1573 deleting newly added files.
1588 deleting newly added files.
1574 """
1589 """
1575 return self._buildstatusmanifest(self._status)
1590 return self._buildstatusmanifest(self._status)
1576
1591
1577 def _buildstatusmanifest(self, status):
1592 def _buildstatusmanifest(self, status):
1578 """Builds a manifest that includes the given status results."""
1593 """Builds a manifest that includes the given status results."""
1579 parents = self.parents()
1594 parents = self.parents()
1580
1595
1581 man = parents[0].manifest().copy()
1596 man = parents[0].manifest().copy()
1582
1597
1583 ff = self._flagfunc
1598 ff = self._flagfunc
1584 for i, l in ((addednodeid, status.added),
1599 for i, l in ((addednodeid, status.added),
1585 (modifiednodeid, status.modified)):
1600 (modifiednodeid, status.modified)):
1586 for f in l:
1601 for f in l:
1587 man[f] = i
1602 man[f] = i
1588 try:
1603 try:
1589 man.setflag(f, ff(f))
1604 man.setflag(f, ff(f))
1590 except OSError:
1605 except OSError:
1591 pass
1606 pass
1592
1607
1593 for f in status.deleted + status.removed:
1608 for f in status.deleted + status.removed:
1594 if f in man:
1609 if f in man:
1595 del man[f]
1610 del man[f]
1596
1611
1597 return man
1612 return man
1598
1613
1599 def _buildstatus(self, other, s, match, listignored, listclean,
1614 def _buildstatus(self, other, s, match, listignored, listclean,
1600 listunknown):
1615 listunknown):
1601 """build a status with respect to another context
1616 """build a status with respect to another context
1602
1617
1603 This includes logic for maintaining the fast path of status when
1618 This includes logic for maintaining the fast path of status when
1604 comparing the working directory against its parent, which is to skip
1619 comparing the working directory against its parent, which is to skip
1605 building a new manifest if self (working directory) is not comparing
1620 building a new manifest if self (working directory) is not comparing
1606 against its parent (repo['.']).
1621 against its parent (repo['.']).
1607 """
1622 """
1608 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1623 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1609 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1624 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1610 # might have accidentally ended up with the entire contents of the file
1625 # might have accidentally ended up with the entire contents of the file
1611 # they are supposed to be linking to.
1626 # they are supposed to be linking to.
1612 s.modified[:] = self._filtersuspectsymlink(s.modified)
1627 s.modified[:] = self._filtersuspectsymlink(s.modified)
1613 if other != self._repo['.']:
1628 if other != self._repo['.']:
1614 s = super(workingctx, self)._buildstatus(other, s, match,
1629 s = super(workingctx, self)._buildstatus(other, s, match,
1615 listignored, listclean,
1630 listignored, listclean,
1616 listunknown)
1631 listunknown)
1617 return s
1632 return s
1618
1633
1619 def _matchstatus(self, other, match):
1634 def _matchstatus(self, other, match):
1620 """override the match method with a filter for directory patterns
1635 """override the match method with a filter for directory patterns
1621
1636
1622 We use inheritance to customize the match.bad method only in cases of
1637 We use inheritance to customize the match.bad method only in cases of
1623 workingctx since it belongs only to the working directory when
1638 workingctx since it belongs only to the working directory when
1624 comparing against the parent changeset.
1639 comparing against the parent changeset.
1625
1640
1626 If we aren't comparing against the working directory's parent, then we
1641 If we aren't comparing against the working directory's parent, then we
1627 just use the default match object sent to us.
1642 just use the default match object sent to us.
1628 """
1643 """
1629 if other != self._repo['.']:
1644 if other != self._repo['.']:
1630 def bad(f, msg):
1645 def bad(f, msg):
1631 # 'f' may be a directory pattern from 'match.files()',
1646 # 'f' may be a directory pattern from 'match.files()',
1632 # so 'f not in ctx1' is not enough
1647 # so 'f not in ctx1' is not enough
1633 if f not in other and not other.hasdir(f):
1648 if f not in other and not other.hasdir(f):
1634 self._repo.ui.warn('%s: %s\n' %
1649 self._repo.ui.warn('%s: %s\n' %
1635 (self._repo.dirstate.pathto(f), msg))
1650 (self._repo.dirstate.pathto(f), msg))
1636 match.bad = bad
1651 match.bad = bad
1637 return match
1652 return match
1638
1653
1639 def markcommitted(self, node):
1654 def markcommitted(self, node):
1640 super(workingctx, self).markcommitted(node)
1655 super(workingctx, self).markcommitted(node)
1641
1656
1642 sparse.aftercommit(self._repo, node)
1657 sparse.aftercommit(self._repo, node)
1643
1658
1644 class committablefilectx(basefilectx):
1659 class committablefilectx(basefilectx):
1645 """A committablefilectx provides common functionality for a file context
1660 """A committablefilectx provides common functionality for a file context
1646 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1661 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1647 def __init__(self, repo, path, filelog=None, ctx=None):
1662 def __init__(self, repo, path, filelog=None, ctx=None):
1648 self._repo = repo
1663 self._repo = repo
1649 self._path = path
1664 self._path = path
1650 self._changeid = None
1665 self._changeid = None
1651 self._filerev = self._filenode = None
1666 self._filerev = self._filenode = None
1652
1667
1653 if filelog is not None:
1668 if filelog is not None:
1654 self._filelog = filelog
1669 self._filelog = filelog
1655 if ctx:
1670 if ctx:
1656 self._changectx = ctx
1671 self._changectx = ctx
1657
1672
1658 def __nonzero__(self):
1673 def __nonzero__(self):
1659 return True
1674 return True
1660
1675
1661 __bool__ = __nonzero__
1676 __bool__ = __nonzero__
1662
1677
1663 def linkrev(self):
1678 def linkrev(self):
1664 # linked to self._changectx no matter if file is modified or not
1679 # linked to self._changectx no matter if file is modified or not
1665 return self.rev()
1680 return self.rev()
1666
1681
1667 def renamed(self):
1682 def renamed(self):
1668 path = self.copysource()
1683 path = self.copysource()
1669 if not path:
1684 if not path:
1670 return None
1685 return None
1671 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1686 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1672
1687
1673 def parents(self):
1688 def parents(self):
1674 '''return parent filectxs, following copies if necessary'''
1689 '''return parent filectxs, following copies if necessary'''
1675 def filenode(ctx, path):
1690 def filenode(ctx, path):
1676 return ctx._manifest.get(path, nullid)
1691 return ctx._manifest.get(path, nullid)
1677
1692
1678 path = self._path
1693 path = self._path
1679 fl = self._filelog
1694 fl = self._filelog
1680 pcl = self._changectx._parents
1695 pcl = self._changectx._parents
1681 renamed = self.renamed()
1696 renamed = self.renamed()
1682
1697
1683 if renamed:
1698 if renamed:
1684 pl = [renamed + (None,)]
1699 pl = [renamed + (None,)]
1685 else:
1700 else:
1686 pl = [(path, filenode(pcl[0], path), fl)]
1701 pl = [(path, filenode(pcl[0], path), fl)]
1687
1702
1688 for pc in pcl[1:]:
1703 for pc in pcl[1:]:
1689 pl.append((path, filenode(pc, path), fl))
1704 pl.append((path, filenode(pc, path), fl))
1690
1705
1691 return [self._parentfilectx(p, fileid=n, filelog=l)
1706 return [self._parentfilectx(p, fileid=n, filelog=l)
1692 for p, n, l in pl if n != nullid]
1707 for p, n, l in pl if n != nullid]
1693
1708
1694 def children(self):
1709 def children(self):
1695 return []
1710 return []
1696
1711
1697 class workingfilectx(committablefilectx):
1712 class workingfilectx(committablefilectx):
1698 """A workingfilectx object makes access to data related to a particular
1713 """A workingfilectx object makes access to data related to a particular
1699 file in the working directory convenient."""
1714 file in the working directory convenient."""
1700 def __init__(self, repo, path, filelog=None, workingctx=None):
1715 def __init__(self, repo, path, filelog=None, workingctx=None):
1701 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1716 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1702
1717
1703 @propertycache
1718 @propertycache
1704 def _changectx(self):
1719 def _changectx(self):
1705 return workingctx(self._repo)
1720 return workingctx(self._repo)
1706
1721
1707 def data(self):
1722 def data(self):
1708 return self._repo.wread(self._path)
1723 return self._repo.wread(self._path)
1709 def copysource(self):
1724 def copysource(self):
1710 return self._repo.dirstate.copied(self._path)
1725 return self._repo.dirstate.copied(self._path)
1711
1726
1712 def size(self):
1727 def size(self):
1713 return self._repo.wvfs.lstat(self._path).st_size
1728 return self._repo.wvfs.lstat(self._path).st_size
1714 def date(self):
1729 def date(self):
1715 t, tz = self._changectx.date()
1730 t, tz = self._changectx.date()
1716 try:
1731 try:
1717 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1732 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1718 except OSError as err:
1733 except OSError as err:
1719 if err.errno != errno.ENOENT:
1734 if err.errno != errno.ENOENT:
1720 raise
1735 raise
1721 return (t, tz)
1736 return (t, tz)
1722
1737
1723 def exists(self):
1738 def exists(self):
1724 return self._repo.wvfs.exists(self._path)
1739 return self._repo.wvfs.exists(self._path)
1725
1740
1726 def lexists(self):
1741 def lexists(self):
1727 return self._repo.wvfs.lexists(self._path)
1742 return self._repo.wvfs.lexists(self._path)
1728
1743
1729 def audit(self):
1744 def audit(self):
1730 return self._repo.wvfs.audit(self._path)
1745 return self._repo.wvfs.audit(self._path)
1731
1746
1732 def cmp(self, fctx):
1747 def cmp(self, fctx):
1733 """compare with other file context
1748 """compare with other file context
1734
1749
1735 returns True if different than fctx.
1750 returns True if different than fctx.
1736 """
1751 """
1737 # fctx should be a filectx (not a workingfilectx)
1752 # fctx should be a filectx (not a workingfilectx)
1738 # invert comparison to reuse the same code path
1753 # invert comparison to reuse the same code path
1739 return fctx.cmp(self)
1754 return fctx.cmp(self)
1740
1755
1741 def remove(self, ignoremissing=False):
1756 def remove(self, ignoremissing=False):
1742 """wraps unlink for a repo's working directory"""
1757 """wraps unlink for a repo's working directory"""
1743 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1758 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1744 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1759 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1745 rmdir=rmdir)
1760 rmdir=rmdir)
1746
1761
1747 def write(self, data, flags, backgroundclose=False, **kwargs):
1762 def write(self, data, flags, backgroundclose=False, **kwargs):
1748 """wraps repo.wwrite"""
1763 """wraps repo.wwrite"""
1749 self._repo.wwrite(self._path, data, flags,
1764 self._repo.wwrite(self._path, data, flags,
1750 backgroundclose=backgroundclose,
1765 backgroundclose=backgroundclose,
1751 **kwargs)
1766 **kwargs)
1752
1767
1753 def markcopied(self, src):
1768 def markcopied(self, src):
1754 """marks this file a copy of `src`"""
1769 """marks this file a copy of `src`"""
1755 if self._repo.dirstate[self._path] in "nma":
1770 if self._repo.dirstate[self._path] in "nma":
1756 self._repo.dirstate.copy(src, self._path)
1771 self._repo.dirstate.copy(src, self._path)
1757
1772
1758 def clearunknown(self):
1773 def clearunknown(self):
1759 """Removes conflicting items in the working directory so that
1774 """Removes conflicting items in the working directory so that
1760 ``write()`` can be called successfully.
1775 ``write()`` can be called successfully.
1761 """
1776 """
1762 wvfs = self._repo.wvfs
1777 wvfs = self._repo.wvfs
1763 f = self._path
1778 f = self._path
1764 wvfs.audit(f)
1779 wvfs.audit(f)
1765 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1780 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1766 # remove files under the directory as they should already be
1781 # remove files under the directory as they should already be
1767 # warned and backed up
1782 # warned and backed up
1768 if wvfs.isdir(f) and not wvfs.islink(f):
1783 if wvfs.isdir(f) and not wvfs.islink(f):
1769 wvfs.rmtree(f, forcibly=True)
1784 wvfs.rmtree(f, forcibly=True)
1770 for p in reversed(list(util.finddirs(f))):
1785 for p in reversed(list(util.finddirs(f))):
1771 if wvfs.isfileorlink(p):
1786 if wvfs.isfileorlink(p):
1772 wvfs.unlink(p)
1787 wvfs.unlink(p)
1773 break
1788 break
1774 else:
1789 else:
1775 # don't remove files if path conflicts are not processed
1790 # don't remove files if path conflicts are not processed
1776 if wvfs.isdir(f) and not wvfs.islink(f):
1791 if wvfs.isdir(f) and not wvfs.islink(f):
1777 wvfs.removedirs(f)
1792 wvfs.removedirs(f)
1778
1793
1779 def setflags(self, l, x):
1794 def setflags(self, l, x):
1780 self._repo.wvfs.setflags(self._path, l, x)
1795 self._repo.wvfs.setflags(self._path, l, x)
1781
1796
1782 class overlayworkingctx(committablectx):
1797 class overlayworkingctx(committablectx):
1783 """Wraps another mutable context with a write-back cache that can be
1798 """Wraps another mutable context with a write-back cache that can be
1784 converted into a commit context.
1799 converted into a commit context.
1785
1800
1786 self._cache[path] maps to a dict with keys: {
1801 self._cache[path] maps to a dict with keys: {
1787 'exists': bool?
1802 'exists': bool?
1788 'date': date?
1803 'date': date?
1789 'data': str?
1804 'data': str?
1790 'flags': str?
1805 'flags': str?
1791 'copied': str? (path or None)
1806 'copied': str? (path or None)
1792 }
1807 }
1793 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1808 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1794 is `False`, the file was deleted.
1809 is `False`, the file was deleted.
1795 """
1810 """
1796
1811
1797 def __init__(self, repo):
1812 def __init__(self, repo):
1798 super(overlayworkingctx, self).__init__(repo)
1813 super(overlayworkingctx, self).__init__(repo)
1799 self.clean()
1814 self.clean()
1800
1815
1801 def setbase(self, wrappedctx):
1816 def setbase(self, wrappedctx):
1802 self._wrappedctx = wrappedctx
1817 self._wrappedctx = wrappedctx
1803 self._parents = [wrappedctx]
1818 self._parents = [wrappedctx]
1804 # Drop old manifest cache as it is now out of date.
1819 # Drop old manifest cache as it is now out of date.
1805 # This is necessary when, e.g., rebasing several nodes with one
1820 # This is necessary when, e.g., rebasing several nodes with one
1806 # ``overlayworkingctx`` (e.g. with --collapse).
1821 # ``overlayworkingctx`` (e.g. with --collapse).
1807 util.clearcachedproperty(self, '_manifest')
1822 util.clearcachedproperty(self, '_manifest')
1808
1823
1809 def data(self, path):
1824 def data(self, path):
1810 if self.isdirty(path):
1825 if self.isdirty(path):
1811 if self._cache[path]['exists']:
1826 if self._cache[path]['exists']:
1812 if self._cache[path]['data']:
1827 if self._cache[path]['data']:
1813 return self._cache[path]['data']
1828 return self._cache[path]['data']
1814 else:
1829 else:
1815 # Must fallback here, too, because we only set flags.
1830 # Must fallback here, too, because we only set flags.
1816 return self._wrappedctx[path].data()
1831 return self._wrappedctx[path].data()
1817 else:
1832 else:
1818 raise error.ProgrammingError("No such file or directory: %s" %
1833 raise error.ProgrammingError("No such file or directory: %s" %
1819 path)
1834 path)
1820 else:
1835 else:
1821 return self._wrappedctx[path].data()
1836 return self._wrappedctx[path].data()
1822
1837
1823 @propertycache
1838 @propertycache
1824 def _manifest(self):
1839 def _manifest(self):
1825 parents = self.parents()
1840 parents = self.parents()
1826 man = parents[0].manifest().copy()
1841 man = parents[0].manifest().copy()
1827
1842
1828 flag = self._flagfunc
1843 flag = self._flagfunc
1829 for path in self.added():
1844 for path in self.added():
1830 man[path] = addednodeid
1845 man[path] = addednodeid
1831 man.setflag(path, flag(path))
1846 man.setflag(path, flag(path))
1832 for path in self.modified():
1847 for path in self.modified():
1833 man[path] = modifiednodeid
1848 man[path] = modifiednodeid
1834 man.setflag(path, flag(path))
1849 man.setflag(path, flag(path))
1835 for path in self.removed():
1850 for path in self.removed():
1836 del man[path]
1851 del man[path]
1837 return man
1852 return man
1838
1853
1839 @propertycache
1854 @propertycache
1840 def _flagfunc(self):
1855 def _flagfunc(self):
1841 def f(path):
1856 def f(path):
1842 return self._cache[path]['flags']
1857 return self._cache[path]['flags']
1843 return f
1858 return f
1844
1859
1845 def files(self):
1860 def files(self):
1846 return sorted(self.added() + self.modified() + self.removed())
1861 return sorted(self.added() + self.modified() + self.removed())
1847
1862
1848 def modified(self):
1863 def modified(self):
1849 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1864 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1850 self._existsinparent(f)]
1865 self._existsinparent(f)]
1851
1866
1852 def added(self):
1867 def added(self):
1853 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1868 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1854 not self._existsinparent(f)]
1869 not self._existsinparent(f)]
1855
1870
1856 def removed(self):
1871 def removed(self):
1857 return [f for f in self._cache.keys() if
1872 return [f for f in self._cache.keys() if
1858 not self._cache[f]['exists'] and self._existsinparent(f)]
1873 not self._cache[f]['exists'] and self._existsinparent(f)]
1859
1874
1860 def p1copies(self):
1875 def p1copies(self):
1861 copies = self._repo._wrappedctx.p1copies().copy()
1876 copies = self._repo._wrappedctx.p1copies().copy()
1862 narrowmatch = self._repo.narrowmatch()
1877 narrowmatch = self._repo.narrowmatch()
1863 for f in self._cache.keys():
1878 for f in self._cache.keys():
1864 if not narrowmatch(f):
1879 if not narrowmatch(f):
1865 continue
1880 continue
1866 copies.pop(f, None) # delete if it exists
1881 copies.pop(f, None) # delete if it exists
1867 source = self._cache[f]['copied']
1882 source = self._cache[f]['copied']
1868 if source:
1883 if source:
1869 copies[f] = source
1884 copies[f] = source
1870 return copies
1885 return copies
1871
1886
1872 def p2copies(self):
1887 def p2copies(self):
1873 copies = self._repo._wrappedctx.p2copies().copy()
1888 copies = self._repo._wrappedctx.p2copies().copy()
1874 narrowmatch = self._repo.narrowmatch()
1889 narrowmatch = self._repo.narrowmatch()
1875 for f in self._cache.keys():
1890 for f in self._cache.keys():
1876 if not narrowmatch(f):
1891 if not narrowmatch(f):
1877 continue
1892 continue
1878 copies.pop(f, None) # delete if it exists
1893 copies.pop(f, None) # delete if it exists
1879 source = self._cache[f]['copied']
1894 source = self._cache[f]['copied']
1880 if source:
1895 if source:
1881 copies[f] = source
1896 copies[f] = source
1882 return copies
1897 return copies
1883
1898
1884 def isinmemory(self):
1899 def isinmemory(self):
1885 return True
1900 return True
1886
1901
1887 def filedate(self, path):
1902 def filedate(self, path):
1888 if self.isdirty(path):
1903 if self.isdirty(path):
1889 return self._cache[path]['date']
1904 return self._cache[path]['date']
1890 else:
1905 else:
1891 return self._wrappedctx[path].date()
1906 return self._wrappedctx[path].date()
1892
1907
1893 def markcopied(self, path, origin):
1908 def markcopied(self, path, origin):
1894 self._markdirty(path, exists=True, date=self.filedate(path),
1909 self._markdirty(path, exists=True, date=self.filedate(path),
1895 flags=self.flags(path), copied=origin)
1910 flags=self.flags(path), copied=origin)
1896
1911
1897 def copydata(self, path):
1912 def copydata(self, path):
1898 if self.isdirty(path):
1913 if self.isdirty(path):
1899 return self._cache[path]['copied']
1914 return self._cache[path]['copied']
1900 else:
1915 else:
1901 raise error.ProgrammingError('copydata() called on clean context')
1916 raise error.ProgrammingError('copydata() called on clean context')
1902
1917
1903 def flags(self, path):
1918 def flags(self, path):
1904 if self.isdirty(path):
1919 if self.isdirty(path):
1905 if self._cache[path]['exists']:
1920 if self._cache[path]['exists']:
1906 return self._cache[path]['flags']
1921 return self._cache[path]['flags']
1907 else:
1922 else:
1908 raise error.ProgrammingError("No such file or directory: %s" %
1923 raise error.ProgrammingError("No such file or directory: %s" %
1909 self._path)
1924 self._path)
1910 else:
1925 else:
1911 return self._wrappedctx[path].flags()
1926 return self._wrappedctx[path].flags()
1912
1927
1913 def __contains__(self, key):
1928 def __contains__(self, key):
1914 if key in self._cache:
1929 if key in self._cache:
1915 return self._cache[key]['exists']
1930 return self._cache[key]['exists']
1916 return key in self.p1()
1931 return key in self.p1()
1917
1932
1918 def _existsinparent(self, path):
1933 def _existsinparent(self, path):
1919 try:
1934 try:
1920 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1935 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1921 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1936 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1922 # with an ``exists()`` function.
1937 # with an ``exists()`` function.
1923 self._wrappedctx[path]
1938 self._wrappedctx[path]
1924 return True
1939 return True
1925 except error.ManifestLookupError:
1940 except error.ManifestLookupError:
1926 return False
1941 return False
1927
1942
1928 def _auditconflicts(self, path):
1943 def _auditconflicts(self, path):
1929 """Replicates conflict checks done by wvfs.write().
1944 """Replicates conflict checks done by wvfs.write().
1930
1945
1931 Since we never write to the filesystem and never call `applyupdates` in
1946 Since we never write to the filesystem and never call `applyupdates` in
1932 IMM, we'll never check that a path is actually writable -- e.g., because
1947 IMM, we'll never check that a path is actually writable -- e.g., because
1933 it adds `a/foo`, but `a` is actually a file in the other commit.
1948 it adds `a/foo`, but `a` is actually a file in the other commit.
1934 """
1949 """
1935 def fail(path, component):
1950 def fail(path, component):
1936 # p1() is the base and we're receiving "writes" for p2()'s
1951 # p1() is the base and we're receiving "writes" for p2()'s
1937 # files.
1952 # files.
1938 if 'l' in self.p1()[component].flags():
1953 if 'l' in self.p1()[component].flags():
1939 raise error.Abort("error: %s conflicts with symlink %s "
1954 raise error.Abort("error: %s conflicts with symlink %s "
1940 "in %d." % (path, component,
1955 "in %d." % (path, component,
1941 self.p1().rev()))
1956 self.p1().rev()))
1942 else:
1957 else:
1943 raise error.Abort("error: '%s' conflicts with file '%s' in "
1958 raise error.Abort("error: '%s' conflicts with file '%s' in "
1944 "%d." % (path, component,
1959 "%d." % (path, component,
1945 self.p1().rev()))
1960 self.p1().rev()))
1946
1961
1947 # Test that each new directory to be created to write this path from p2
1962 # Test that each new directory to be created to write this path from p2
1948 # is not a file in p1.
1963 # is not a file in p1.
1949 components = path.split('/')
1964 components = path.split('/')
1950 for i in pycompat.xrange(len(components)):
1965 for i in pycompat.xrange(len(components)):
1951 component = "/".join(components[0:i])
1966 component = "/".join(components[0:i])
1952 if component in self:
1967 if component in self:
1953 fail(path, component)
1968 fail(path, component)
1954
1969
1955 # Test the other direction -- that this path from p2 isn't a directory
1970 # Test the other direction -- that this path from p2 isn't a directory
1956 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1971 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1957 match = self.match([path], default=b'path')
1972 match = self.match([path], default=b'path')
1958 matches = self.p1().manifest().matches(match)
1973 matches = self.p1().manifest().matches(match)
1959 mfiles = matches.keys()
1974 mfiles = matches.keys()
1960 if len(mfiles) > 0:
1975 if len(mfiles) > 0:
1961 if len(mfiles) == 1 and mfiles[0] == path:
1976 if len(mfiles) == 1 and mfiles[0] == path:
1962 return
1977 return
1963 # omit the files which are deleted in current IMM wctx
1978 # omit the files which are deleted in current IMM wctx
1964 mfiles = [m for m in mfiles if m in self]
1979 mfiles = [m for m in mfiles if m in self]
1965 if not mfiles:
1980 if not mfiles:
1966 return
1981 return
1967 raise error.Abort("error: file '%s' cannot be written because "
1982 raise error.Abort("error: file '%s' cannot be written because "
1968 " '%s/' is a directory in %s (containing %d "
1983 " '%s/' is a directory in %s (containing %d "
1969 "entries: %s)"
1984 "entries: %s)"
1970 % (path, path, self.p1(), len(mfiles),
1985 % (path, path, self.p1(), len(mfiles),
1971 ', '.join(mfiles)))
1986 ', '.join(mfiles)))
1972
1987
1973 def write(self, path, data, flags='', **kwargs):
1988 def write(self, path, data, flags='', **kwargs):
1974 if data is None:
1989 if data is None:
1975 raise error.ProgrammingError("data must be non-None")
1990 raise error.ProgrammingError("data must be non-None")
1976 self._auditconflicts(path)
1991 self._auditconflicts(path)
1977 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1992 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1978 flags=flags)
1993 flags=flags)
1979
1994
1980 def setflags(self, path, l, x):
1995 def setflags(self, path, l, x):
1981 flag = ''
1996 flag = ''
1982 if l:
1997 if l:
1983 flag = 'l'
1998 flag = 'l'
1984 elif x:
1999 elif x:
1985 flag = 'x'
2000 flag = 'x'
1986 self._markdirty(path, exists=True, date=dateutil.makedate(),
2001 self._markdirty(path, exists=True, date=dateutil.makedate(),
1987 flags=flag)
2002 flags=flag)
1988
2003
1989 def remove(self, path):
2004 def remove(self, path):
1990 self._markdirty(path, exists=False)
2005 self._markdirty(path, exists=False)
1991
2006
1992 def exists(self, path):
2007 def exists(self, path):
1993 """exists behaves like `lexists`, but needs to follow symlinks and
2008 """exists behaves like `lexists`, but needs to follow symlinks and
1994 return False if they are broken.
2009 return False if they are broken.
1995 """
2010 """
1996 if self.isdirty(path):
2011 if self.isdirty(path):
1997 # If this path exists and is a symlink, "follow" it by calling
2012 # If this path exists and is a symlink, "follow" it by calling
1998 # exists on the destination path.
2013 # exists on the destination path.
1999 if (self._cache[path]['exists'] and
2014 if (self._cache[path]['exists'] and
2000 'l' in self._cache[path]['flags']):
2015 'l' in self._cache[path]['flags']):
2001 return self.exists(self._cache[path]['data'].strip())
2016 return self.exists(self._cache[path]['data'].strip())
2002 else:
2017 else:
2003 return self._cache[path]['exists']
2018 return self._cache[path]['exists']
2004
2019
2005 return self._existsinparent(path)
2020 return self._existsinparent(path)
2006
2021
2007 def lexists(self, path):
2022 def lexists(self, path):
2008 """lexists returns True if the path exists"""
2023 """lexists returns True if the path exists"""
2009 if self.isdirty(path):
2024 if self.isdirty(path):
2010 return self._cache[path]['exists']
2025 return self._cache[path]['exists']
2011
2026
2012 return self._existsinparent(path)
2027 return self._existsinparent(path)
2013
2028
2014 def size(self, path):
2029 def size(self, path):
2015 if self.isdirty(path):
2030 if self.isdirty(path):
2016 if self._cache[path]['exists']:
2031 if self._cache[path]['exists']:
2017 return len(self._cache[path]['data'])
2032 return len(self._cache[path]['data'])
2018 else:
2033 else:
2019 raise error.ProgrammingError("No such file or directory: %s" %
2034 raise error.ProgrammingError("No such file or directory: %s" %
2020 self._path)
2035 self._path)
2021 return self._wrappedctx[path].size()
2036 return self._wrappedctx[path].size()
2022
2037
2023 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2038 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2024 user=None, editor=None):
2039 user=None, editor=None):
2025 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2040 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2026 committed.
2041 committed.
2027
2042
2028 ``text`` is the commit message.
2043 ``text`` is the commit message.
2029 ``parents`` (optional) are rev numbers.
2044 ``parents`` (optional) are rev numbers.
2030 """
2045 """
2031 # Default parents to the wrapped contexts' if not passed.
2046 # Default parents to the wrapped contexts' if not passed.
2032 if parents is None:
2047 if parents is None:
2033 parents = self._wrappedctx.parents()
2048 parents = self._wrappedctx.parents()
2034 if len(parents) == 1:
2049 if len(parents) == 1:
2035 parents = (parents[0], None)
2050 parents = (parents[0], None)
2036
2051
2037 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2052 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2038 if parents[1] is None:
2053 if parents[1] is None:
2039 parents = (self._repo[parents[0]], None)
2054 parents = (self._repo[parents[0]], None)
2040 else:
2055 else:
2041 parents = (self._repo[parents[0]], self._repo[parents[1]])
2056 parents = (self._repo[parents[0]], self._repo[parents[1]])
2042
2057
2043 files = self._cache.keys()
2058 files = self._cache.keys()
2044 def getfile(repo, memctx, path):
2059 def getfile(repo, memctx, path):
2045 if self._cache[path]['exists']:
2060 if self._cache[path]['exists']:
2046 return memfilectx(repo, memctx, path,
2061 return memfilectx(repo, memctx, path,
2047 self._cache[path]['data'],
2062 self._cache[path]['data'],
2048 'l' in self._cache[path]['flags'],
2063 'l' in self._cache[path]['flags'],
2049 'x' in self._cache[path]['flags'],
2064 'x' in self._cache[path]['flags'],
2050 self._cache[path]['copied'])
2065 self._cache[path]['copied'])
2051 else:
2066 else:
2052 # Returning None, but including the path in `files`, is
2067 # Returning None, but including the path in `files`, is
2053 # necessary for memctx to register a deletion.
2068 # necessary for memctx to register a deletion.
2054 return None
2069 return None
2055 return memctx(self._repo, parents, text, files, getfile, date=date,
2070 return memctx(self._repo, parents, text, files, getfile, date=date,
2056 extra=extra, user=user, branch=branch, editor=editor)
2071 extra=extra, user=user, branch=branch, editor=editor)
2057
2072
2058 def isdirty(self, path):
2073 def isdirty(self, path):
2059 return path in self._cache
2074 return path in self._cache
2060
2075
2061 def isempty(self):
2076 def isempty(self):
2062 # We need to discard any keys that are actually clean before the empty
2077 # We need to discard any keys that are actually clean before the empty
2063 # commit check.
2078 # commit check.
2064 self._compact()
2079 self._compact()
2065 return len(self._cache) == 0
2080 return len(self._cache) == 0
2066
2081
2067 def clean(self):
2082 def clean(self):
2068 self._cache = {}
2083 self._cache = {}
2069
2084
2070 def _compact(self):
2085 def _compact(self):
2071 """Removes keys from the cache that are actually clean, by comparing
2086 """Removes keys from the cache that are actually clean, by comparing
2072 them with the underlying context.
2087 them with the underlying context.
2073
2088
2074 This can occur during the merge process, e.g. by passing --tool :local
2089 This can occur during the merge process, e.g. by passing --tool :local
2075 to resolve a conflict.
2090 to resolve a conflict.
2076 """
2091 """
2077 keys = []
2092 keys = []
2078 # This won't be perfect, but can help performance significantly when
2093 # This won't be perfect, but can help performance significantly when
2079 # using things like remotefilelog.
2094 # using things like remotefilelog.
2080 scmutil.prefetchfiles(
2095 scmutil.prefetchfiles(
2081 self.repo(), [self.p1().rev()],
2096 self.repo(), [self.p1().rev()],
2082 scmutil.matchfiles(self.repo(), self._cache.keys()))
2097 scmutil.matchfiles(self.repo(), self._cache.keys()))
2083
2098
2084 for path in self._cache.keys():
2099 for path in self._cache.keys():
2085 cache = self._cache[path]
2100 cache = self._cache[path]
2086 try:
2101 try:
2087 underlying = self._wrappedctx[path]
2102 underlying = self._wrappedctx[path]
2088 if (underlying.data() == cache['data'] and
2103 if (underlying.data() == cache['data'] and
2089 underlying.flags() == cache['flags']):
2104 underlying.flags() == cache['flags']):
2090 keys.append(path)
2105 keys.append(path)
2091 except error.ManifestLookupError:
2106 except error.ManifestLookupError:
2092 # Path not in the underlying manifest (created).
2107 # Path not in the underlying manifest (created).
2093 continue
2108 continue
2094
2109
2095 for path in keys:
2110 for path in keys:
2096 del self._cache[path]
2111 del self._cache[path]
2097 return keys
2112 return keys
2098
2113
2099 def _markdirty(self, path, exists, data=None, date=None, flags='',
2114 def _markdirty(self, path, exists, data=None, date=None, flags='',
2100 copied=None):
2115 copied=None):
2101 # data not provided, let's see if we already have some; if not, let's
2116 # data not provided, let's see if we already have some; if not, let's
2102 # grab it from our underlying context, so that we always have data if
2117 # grab it from our underlying context, so that we always have data if
2103 # the file is marked as existing.
2118 # the file is marked as existing.
2104 if exists and data is None:
2119 if exists and data is None:
2105 oldentry = self._cache.get(path) or {}
2120 oldentry = self._cache.get(path) or {}
2106 data = oldentry.get('data') or self._wrappedctx[path].data()
2121 data = oldentry.get('data') or self._wrappedctx[path].data()
2107
2122
2108 self._cache[path] = {
2123 self._cache[path] = {
2109 'exists': exists,
2124 'exists': exists,
2110 'data': data,
2125 'data': data,
2111 'date': date,
2126 'date': date,
2112 'flags': flags,
2127 'flags': flags,
2113 'copied': copied,
2128 'copied': copied,
2114 }
2129 }
2115
2130
2116 def filectx(self, path, filelog=None):
2131 def filectx(self, path, filelog=None):
2117 return overlayworkingfilectx(self._repo, path, parent=self,
2132 return overlayworkingfilectx(self._repo, path, parent=self,
2118 filelog=filelog)
2133 filelog=filelog)
2119
2134
2120 class overlayworkingfilectx(committablefilectx):
2135 class overlayworkingfilectx(committablefilectx):
2121 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2136 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2122 cache, which can be flushed through later by calling ``flush()``."""
2137 cache, which can be flushed through later by calling ``flush()``."""
2123
2138
2124 def __init__(self, repo, path, filelog=None, parent=None):
2139 def __init__(self, repo, path, filelog=None, parent=None):
2125 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2140 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2126 parent)
2141 parent)
2127 self._repo = repo
2142 self._repo = repo
2128 self._parent = parent
2143 self._parent = parent
2129 self._path = path
2144 self._path = path
2130
2145
2131 def cmp(self, fctx):
2146 def cmp(self, fctx):
2132 return self.data() != fctx.data()
2147 return self.data() != fctx.data()
2133
2148
2134 def changectx(self):
2149 def changectx(self):
2135 return self._parent
2150 return self._parent
2136
2151
2137 def data(self):
2152 def data(self):
2138 return self._parent.data(self._path)
2153 return self._parent.data(self._path)
2139
2154
2140 def date(self):
2155 def date(self):
2141 return self._parent.filedate(self._path)
2156 return self._parent.filedate(self._path)
2142
2157
2143 def exists(self):
2158 def exists(self):
2144 return self.lexists()
2159 return self.lexists()
2145
2160
2146 def lexists(self):
2161 def lexists(self):
2147 return self._parent.exists(self._path)
2162 return self._parent.exists(self._path)
2148
2163
2149 def copysource(self):
2164 def copysource(self):
2150 return self._parent.copydata(self._path)
2165 return self._parent.copydata(self._path)
2151
2166
2152 def size(self):
2167 def size(self):
2153 return self._parent.size(self._path)
2168 return self._parent.size(self._path)
2154
2169
2155 def markcopied(self, origin):
2170 def markcopied(self, origin):
2156 self._parent.markcopied(self._path, origin)
2171 self._parent.markcopied(self._path, origin)
2157
2172
2158 def audit(self):
2173 def audit(self):
2159 pass
2174 pass
2160
2175
2161 def flags(self):
2176 def flags(self):
2162 return self._parent.flags(self._path)
2177 return self._parent.flags(self._path)
2163
2178
2164 def setflags(self, islink, isexec):
2179 def setflags(self, islink, isexec):
2165 return self._parent.setflags(self._path, islink, isexec)
2180 return self._parent.setflags(self._path, islink, isexec)
2166
2181
2167 def write(self, data, flags, backgroundclose=False, **kwargs):
2182 def write(self, data, flags, backgroundclose=False, **kwargs):
2168 return self._parent.write(self._path, data, flags, **kwargs)
2183 return self._parent.write(self._path, data, flags, **kwargs)
2169
2184
2170 def remove(self, ignoremissing=False):
2185 def remove(self, ignoremissing=False):
2171 return self._parent.remove(self._path)
2186 return self._parent.remove(self._path)
2172
2187
2173 def clearunknown(self):
2188 def clearunknown(self):
2174 pass
2189 pass
2175
2190
2176 class workingcommitctx(workingctx):
2191 class workingcommitctx(workingctx):
2177 """A workingcommitctx object makes access to data related to
2192 """A workingcommitctx object makes access to data related to
2178 the revision being committed convenient.
2193 the revision being committed convenient.
2179
2194
2180 This hides changes in the working directory, if they aren't
2195 This hides changes in the working directory, if they aren't
2181 committed in this context.
2196 committed in this context.
2182 """
2197 """
2183 def __init__(self, repo, changes,
2198 def __init__(self, repo, changes,
2184 text="", user=None, date=None, extra=None):
2199 text="", user=None, date=None, extra=None):
2185 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2200 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2186 changes)
2201 changes)
2187
2202
2188 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2203 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2189 """Return matched files only in ``self._status``
2204 """Return matched files only in ``self._status``
2190
2205
2191 Uncommitted files appear "clean" via this context, even if
2206 Uncommitted files appear "clean" via this context, even if
2192 they aren't actually so in the working directory.
2207 they aren't actually so in the working directory.
2193 """
2208 """
2194 if clean:
2209 if clean:
2195 clean = [f for f in self._manifest if f not in self._changedset]
2210 clean = [f for f in self._manifest if f not in self._changedset]
2196 else:
2211 else:
2197 clean = []
2212 clean = []
2198 return scmutil.status([f for f in self._status.modified if match(f)],
2213 return scmutil.status([f for f in self._status.modified if match(f)],
2199 [f for f in self._status.added if match(f)],
2214 [f for f in self._status.added if match(f)],
2200 [f for f in self._status.removed if match(f)],
2215 [f for f in self._status.removed if match(f)],
2201 [], [], [], clean)
2216 [], [], [], clean)
2202
2217
2203 @propertycache
2218 @propertycache
2204 def _changedset(self):
2219 def _changedset(self):
2205 """Return the set of files changed in this context
2220 """Return the set of files changed in this context
2206 """
2221 """
2207 changed = set(self._status.modified)
2222 changed = set(self._status.modified)
2208 changed.update(self._status.added)
2223 changed.update(self._status.added)
2209 changed.update(self._status.removed)
2224 changed.update(self._status.removed)
2210 return changed
2225 return changed
2211
2226
2212 def makecachingfilectxfn(func):
2227 def makecachingfilectxfn(func):
2213 """Create a filectxfn that caches based on the path.
2228 """Create a filectxfn that caches based on the path.
2214
2229
2215 We can't use util.cachefunc because it uses all arguments as the cache
2230 We can't use util.cachefunc because it uses all arguments as the cache
2216 key and this creates a cycle since the arguments include the repo and
2231 key and this creates a cycle since the arguments include the repo and
2217 memctx.
2232 memctx.
2218 """
2233 """
2219 cache = {}
2234 cache = {}
2220
2235
2221 def getfilectx(repo, memctx, path):
2236 def getfilectx(repo, memctx, path):
2222 if path not in cache:
2237 if path not in cache:
2223 cache[path] = func(repo, memctx, path)
2238 cache[path] = func(repo, memctx, path)
2224 return cache[path]
2239 return cache[path]
2225
2240
2226 return getfilectx
2241 return getfilectx
2227
2242
2228 def memfilefromctx(ctx):
2243 def memfilefromctx(ctx):
2229 """Given a context return a memfilectx for ctx[path]
2244 """Given a context return a memfilectx for ctx[path]
2230
2245
2231 This is a convenience method for building a memctx based on another
2246 This is a convenience method for building a memctx based on another
2232 context.
2247 context.
2233 """
2248 """
2234 def getfilectx(repo, memctx, path):
2249 def getfilectx(repo, memctx, path):
2235 fctx = ctx[path]
2250 fctx = ctx[path]
2236 copysource = fctx.copysource()
2251 copysource = fctx.copysource()
2237 return memfilectx(repo, memctx, path, fctx.data(),
2252 return memfilectx(repo, memctx, path, fctx.data(),
2238 islink=fctx.islink(), isexec=fctx.isexec(),
2253 islink=fctx.islink(), isexec=fctx.isexec(),
2239 copysource=copysource)
2254 copysource=copysource)
2240
2255
2241 return getfilectx
2256 return getfilectx
2242
2257
2243 def memfilefrompatch(patchstore):
2258 def memfilefrompatch(patchstore):
2244 """Given a patch (e.g. patchstore object) return a memfilectx
2259 """Given a patch (e.g. patchstore object) return a memfilectx
2245
2260
2246 This is a convenience method for building a memctx based on a patchstore.
2261 This is a convenience method for building a memctx based on a patchstore.
2247 """
2262 """
2248 def getfilectx(repo, memctx, path):
2263 def getfilectx(repo, memctx, path):
2249 data, mode, copysource = patchstore.getfile(path)
2264 data, mode, copysource = patchstore.getfile(path)
2250 if data is None:
2265 if data is None:
2251 return None
2266 return None
2252 islink, isexec = mode
2267 islink, isexec = mode
2253 return memfilectx(repo, memctx, path, data, islink=islink,
2268 return memfilectx(repo, memctx, path, data, islink=islink,
2254 isexec=isexec, copysource=copysource)
2269 isexec=isexec, copysource=copysource)
2255
2270
2256 return getfilectx
2271 return getfilectx
2257
2272
2258 class memctx(committablectx):
2273 class memctx(committablectx):
2259 """Use memctx to perform in-memory commits via localrepo.commitctx().
2274 """Use memctx to perform in-memory commits via localrepo.commitctx().
2260
2275
2261 Revision information is supplied at initialization time while
2276 Revision information is supplied at initialization time while
2262 related files data and is made available through a callback
2277 related files data and is made available through a callback
2263 mechanism. 'repo' is the current localrepo, 'parents' is a
2278 mechanism. 'repo' is the current localrepo, 'parents' is a
2264 sequence of two parent revisions identifiers (pass None for every
2279 sequence of two parent revisions identifiers (pass None for every
2265 missing parent), 'text' is the commit message and 'files' lists
2280 missing parent), 'text' is the commit message and 'files' lists
2266 names of files touched by the revision (normalized and relative to
2281 names of files touched by the revision (normalized and relative to
2267 repository root).
2282 repository root).
2268
2283
2269 filectxfn(repo, memctx, path) is a callable receiving the
2284 filectxfn(repo, memctx, path) is a callable receiving the
2270 repository, the current memctx object and the normalized path of
2285 repository, the current memctx object and the normalized path of
2271 requested file, relative to repository root. It is fired by the
2286 requested file, relative to repository root. It is fired by the
2272 commit function for every file in 'files', but calls order is
2287 commit function for every file in 'files', but calls order is
2273 undefined. If the file is available in the revision being
2288 undefined. If the file is available in the revision being
2274 committed (updated or added), filectxfn returns a memfilectx
2289 committed (updated or added), filectxfn returns a memfilectx
2275 object. If the file was removed, filectxfn return None for recent
2290 object. If the file was removed, filectxfn return None for recent
2276 Mercurial. Moved files are represented by marking the source file
2291 Mercurial. Moved files are represented by marking the source file
2277 removed and the new file added with copy information (see
2292 removed and the new file added with copy information (see
2278 memfilectx).
2293 memfilectx).
2279
2294
2280 user receives the committer name and defaults to current
2295 user receives the committer name and defaults to current
2281 repository username, date is the commit date in any format
2296 repository username, date is the commit date in any format
2282 supported by dateutil.parsedate() and defaults to current date, extra
2297 supported by dateutil.parsedate() and defaults to current date, extra
2283 is a dictionary of metadata or is left empty.
2298 is a dictionary of metadata or is left empty.
2284 """
2299 """
2285
2300
2286 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2301 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2287 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2302 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2288 # this field to determine what to do in filectxfn.
2303 # this field to determine what to do in filectxfn.
2289 _returnnoneformissingfiles = True
2304 _returnnoneformissingfiles = True
2290
2305
2291 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2306 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2292 date=None, extra=None, branch=None, editor=False):
2307 date=None, extra=None, branch=None, editor=False):
2293 super(memctx, self).__init__(repo, text, user, date, extra)
2308 super(memctx, self).__init__(repo, text, user, date, extra)
2294 self._rev = None
2309 self._rev = None
2295 self._node = None
2310 self._node = None
2296 parents = [(p or nullid) for p in parents]
2311 parents = [(p or nullid) for p in parents]
2297 p1, p2 = parents
2312 p1, p2 = parents
2298 self._parents = [self._repo[p] for p in (p1, p2)]
2313 self._parents = [self._repo[p] for p in (p1, p2)]
2299 files = sorted(set(files))
2314 files = sorted(set(files))
2300 self._files = files
2315 self._files = files
2301 if branch is not None:
2316 if branch is not None:
2302 self._extra['branch'] = encoding.fromlocal(branch)
2317 self._extra['branch'] = encoding.fromlocal(branch)
2303 self.substate = {}
2318 self.substate = {}
2304
2319
2305 if isinstance(filectxfn, patch.filestore):
2320 if isinstance(filectxfn, patch.filestore):
2306 filectxfn = memfilefrompatch(filectxfn)
2321 filectxfn = memfilefrompatch(filectxfn)
2307 elif not callable(filectxfn):
2322 elif not callable(filectxfn):
2308 # if store is not callable, wrap it in a function
2323 # if store is not callable, wrap it in a function
2309 filectxfn = memfilefromctx(filectxfn)
2324 filectxfn = memfilefromctx(filectxfn)
2310
2325
2311 # memoizing increases performance for e.g. vcs convert scenarios.
2326 # memoizing increases performance for e.g. vcs convert scenarios.
2312 self._filectxfn = makecachingfilectxfn(filectxfn)
2327 self._filectxfn = makecachingfilectxfn(filectxfn)
2313
2328
2314 if editor:
2329 if editor:
2315 self._text = editor(self._repo, self, [])
2330 self._text = editor(self._repo, self, [])
2316 self._repo.savecommitmessage(self._text)
2331 self._repo.savecommitmessage(self._text)
2317
2332
2318 def filectx(self, path, filelog=None):
2333 def filectx(self, path, filelog=None):
2319 """get a file context from the working directory
2334 """get a file context from the working directory
2320
2335
2321 Returns None if file doesn't exist and should be removed."""
2336 Returns None if file doesn't exist and should be removed."""
2322 return self._filectxfn(self._repo, self, path)
2337 return self._filectxfn(self._repo, self, path)
2323
2338
2324 def commit(self):
2339 def commit(self):
2325 """commit context to the repo"""
2340 """commit context to the repo"""
2326 return self._repo.commitctx(self)
2341 return self._repo.commitctx(self)
2327
2342
2328 @propertycache
2343 @propertycache
2329 def _manifest(self):
2344 def _manifest(self):
2330 """generate a manifest based on the return values of filectxfn"""
2345 """generate a manifest based on the return values of filectxfn"""
2331
2346
2332 # keep this simple for now; just worry about p1
2347 # keep this simple for now; just worry about p1
2333 pctx = self._parents[0]
2348 pctx = self._parents[0]
2334 man = pctx.manifest().copy()
2349 man = pctx.manifest().copy()
2335
2350
2336 for f in self._status.modified:
2351 for f in self._status.modified:
2337 man[f] = modifiednodeid
2352 man[f] = modifiednodeid
2338
2353
2339 for f in self._status.added:
2354 for f in self._status.added:
2340 man[f] = addednodeid
2355 man[f] = addednodeid
2341
2356
2342 for f in self._status.removed:
2357 for f in self._status.removed:
2343 if f in man:
2358 if f in man:
2344 del man[f]
2359 del man[f]
2345
2360
2346 return man
2361 return man
2347
2362
2348 @propertycache
2363 @propertycache
2349 def _status(self):
2364 def _status(self):
2350 """Calculate exact status from ``files`` specified at construction
2365 """Calculate exact status from ``files`` specified at construction
2351 """
2366 """
2352 man1 = self.p1().manifest()
2367 man1 = self.p1().manifest()
2353 p2 = self._parents[1]
2368 p2 = self._parents[1]
2354 # "1 < len(self._parents)" can't be used for checking
2369 # "1 < len(self._parents)" can't be used for checking
2355 # existence of the 2nd parent, because "memctx._parents" is
2370 # existence of the 2nd parent, because "memctx._parents" is
2356 # explicitly initialized by the list, of which length is 2.
2371 # explicitly initialized by the list, of which length is 2.
2357 if p2.node() != nullid:
2372 if p2.node() != nullid:
2358 man2 = p2.manifest()
2373 man2 = p2.manifest()
2359 managing = lambda f: f in man1 or f in man2
2374 managing = lambda f: f in man1 or f in man2
2360 else:
2375 else:
2361 managing = lambda f: f in man1
2376 managing = lambda f: f in man1
2362
2377
2363 modified, added, removed = [], [], []
2378 modified, added, removed = [], [], []
2364 for f in self._files:
2379 for f in self._files:
2365 if not managing(f):
2380 if not managing(f):
2366 added.append(f)
2381 added.append(f)
2367 elif self[f]:
2382 elif self[f]:
2368 modified.append(f)
2383 modified.append(f)
2369 else:
2384 else:
2370 removed.append(f)
2385 removed.append(f)
2371
2386
2372 return scmutil.status(modified, added, removed, [], [], [], [])
2387 return scmutil.status(modified, added, removed, [], [], [], [])
2373
2388
2374 class memfilectx(committablefilectx):
2389 class memfilectx(committablefilectx):
2375 """memfilectx represents an in-memory file to commit.
2390 """memfilectx represents an in-memory file to commit.
2376
2391
2377 See memctx and committablefilectx for more details.
2392 See memctx and committablefilectx for more details.
2378 """
2393 """
2379 def __init__(self, repo, changectx, path, data, islink=False,
2394 def __init__(self, repo, changectx, path, data, islink=False,
2380 isexec=False, copysource=None):
2395 isexec=False, copysource=None):
2381 """
2396 """
2382 path is the normalized file path relative to repository root.
2397 path is the normalized file path relative to repository root.
2383 data is the file content as a string.
2398 data is the file content as a string.
2384 islink is True if the file is a symbolic link.
2399 islink is True if the file is a symbolic link.
2385 isexec is True if the file is executable.
2400 isexec is True if the file is executable.
2386 copied is the source file path if current file was copied in the
2401 copied is the source file path if current file was copied in the
2387 revision being committed, or None."""
2402 revision being committed, or None."""
2388 super(memfilectx, self).__init__(repo, path, None, changectx)
2403 super(memfilectx, self).__init__(repo, path, None, changectx)
2389 self._data = data
2404 self._data = data
2390 if islink:
2405 if islink:
2391 self._flags = 'l'
2406 self._flags = 'l'
2392 elif isexec:
2407 elif isexec:
2393 self._flags = 'x'
2408 self._flags = 'x'
2394 else:
2409 else:
2395 self._flags = ''
2410 self._flags = ''
2396 self._copysource = copysource
2411 self._copysource = copysource
2397
2412
2398 def copysource(self):
2413 def copysource(self):
2399 return self._copysource
2414 return self._copysource
2400
2415
2401 def cmp(self, fctx):
2416 def cmp(self, fctx):
2402 return self.data() != fctx.data()
2417 return self.data() != fctx.data()
2403
2418
2404 def data(self):
2419 def data(self):
2405 return self._data
2420 return self._data
2406
2421
2407 def remove(self, ignoremissing=False):
2422 def remove(self, ignoremissing=False):
2408 """wraps unlink for a repo's working directory"""
2423 """wraps unlink for a repo's working directory"""
2409 # need to figure out what to do here
2424 # need to figure out what to do here
2410 del self._changectx[self._path]
2425 del self._changectx[self._path]
2411
2426
2412 def write(self, data, flags, **kwargs):
2427 def write(self, data, flags, **kwargs):
2413 """wraps repo.wwrite"""
2428 """wraps repo.wwrite"""
2414 self._data = data
2429 self._data = data
2415
2430
2416
2431
2417 class metadataonlyctx(committablectx):
2432 class metadataonlyctx(committablectx):
2418 """Like memctx but it's reusing the manifest of different commit.
2433 """Like memctx but it's reusing the manifest of different commit.
2419 Intended to be used by lightweight operations that are creating
2434 Intended to be used by lightweight operations that are creating
2420 metadata-only changes.
2435 metadata-only changes.
2421
2436
2422 Revision information is supplied at initialization time. 'repo' is the
2437 Revision information is supplied at initialization time. 'repo' is the
2423 current localrepo, 'ctx' is original revision which manifest we're reuisng
2438 current localrepo, 'ctx' is original revision which manifest we're reuisng
2424 'parents' is a sequence of two parent revisions identifiers (pass None for
2439 'parents' is a sequence of two parent revisions identifiers (pass None for
2425 every missing parent), 'text' is the commit.
2440 every missing parent), 'text' is the commit.
2426
2441
2427 user receives the committer name and defaults to current repository
2442 user receives the committer name and defaults to current repository
2428 username, date is the commit date in any format supported by
2443 username, date is the commit date in any format supported by
2429 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2444 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2430 metadata or is left empty.
2445 metadata or is left empty.
2431 """
2446 """
2432 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2447 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2433 date=None, extra=None, editor=False):
2448 date=None, extra=None, editor=False):
2434 if text is None:
2449 if text is None:
2435 text = originalctx.description()
2450 text = originalctx.description()
2436 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2451 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2437 self._rev = None
2452 self._rev = None
2438 self._node = None
2453 self._node = None
2439 self._originalctx = originalctx
2454 self._originalctx = originalctx
2440 self._manifestnode = originalctx.manifestnode()
2455 self._manifestnode = originalctx.manifestnode()
2441 if parents is None:
2456 if parents is None:
2442 parents = originalctx.parents()
2457 parents = originalctx.parents()
2443 else:
2458 else:
2444 parents = [repo[p] for p in parents if p is not None]
2459 parents = [repo[p] for p in parents if p is not None]
2445 parents = parents[:]
2460 parents = parents[:]
2446 while len(parents) < 2:
2461 while len(parents) < 2:
2447 parents.append(repo[nullid])
2462 parents.append(repo[nullid])
2448 p1, p2 = self._parents = parents
2463 p1, p2 = self._parents = parents
2449
2464
2450 # sanity check to ensure that the reused manifest parents are
2465 # sanity check to ensure that the reused manifest parents are
2451 # manifests of our commit parents
2466 # manifests of our commit parents
2452 mp1, mp2 = self.manifestctx().parents
2467 mp1, mp2 = self.manifestctx().parents
2453 if p1 != nullid and p1.manifestnode() != mp1:
2468 if p1 != nullid and p1.manifestnode() != mp1:
2454 raise RuntimeError(r"can't reuse the manifest: its p1 "
2469 raise RuntimeError(r"can't reuse the manifest: its p1 "
2455 r"doesn't match the new ctx p1")
2470 r"doesn't match the new ctx p1")
2456 if p2 != nullid and p2.manifestnode() != mp2:
2471 if p2 != nullid and p2.manifestnode() != mp2:
2457 raise RuntimeError(r"can't reuse the manifest: "
2472 raise RuntimeError(r"can't reuse the manifest: "
2458 r"its p2 doesn't match the new ctx p2")
2473 r"its p2 doesn't match the new ctx p2")
2459
2474
2460 self._files = originalctx.files()
2475 self._files = originalctx.files()
2461 self.substate = {}
2476 self.substate = {}
2462
2477
2463 if editor:
2478 if editor:
2464 self._text = editor(self._repo, self, [])
2479 self._text = editor(self._repo, self, [])
2465 self._repo.savecommitmessage(self._text)
2480 self._repo.savecommitmessage(self._text)
2466
2481
2467 def manifestnode(self):
2482 def manifestnode(self):
2468 return self._manifestnode
2483 return self._manifestnode
2469
2484
2470 @property
2485 @property
2471 def _manifestctx(self):
2486 def _manifestctx(self):
2472 return self._repo.manifestlog[self._manifestnode]
2487 return self._repo.manifestlog[self._manifestnode]
2473
2488
2474 def filectx(self, path, filelog=None):
2489 def filectx(self, path, filelog=None):
2475 return self._originalctx.filectx(path, filelog=filelog)
2490 return self._originalctx.filectx(path, filelog=filelog)
2476
2491
2477 def commit(self):
2492 def commit(self):
2478 """commit context to the repo"""
2493 """commit context to the repo"""
2479 return self._repo.commitctx(self)
2494 return self._repo.commitctx(self)
2480
2495
2481 @property
2496 @property
2482 def _manifest(self):
2497 def _manifest(self):
2483 return self._originalctx.manifest()
2498 return self._originalctx.manifest()
2484
2499
2485 @propertycache
2500 @propertycache
2486 def _status(self):
2501 def _status(self):
2487 """Calculate exact status from ``files`` specified in the ``origctx``
2502 """Calculate exact status from ``files`` specified in the ``origctx``
2488 and parents manifests.
2503 and parents manifests.
2489 """
2504 """
2490 man1 = self.p1().manifest()
2505 man1 = self.p1().manifest()
2491 p2 = self._parents[1]
2506 p2 = self._parents[1]
2492 # "1 < len(self._parents)" can't be used for checking
2507 # "1 < len(self._parents)" can't be used for checking
2493 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2508 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2494 # explicitly initialized by the list, of which length is 2.
2509 # explicitly initialized by the list, of which length is 2.
2495 if p2.node() != nullid:
2510 if p2.node() != nullid:
2496 man2 = p2.manifest()
2511 man2 = p2.manifest()
2497 managing = lambda f: f in man1 or f in man2
2512 managing = lambda f: f in man1 or f in man2
2498 else:
2513 else:
2499 managing = lambda f: f in man1
2514 managing = lambda f: f in man1
2500
2515
2501 modified, added, removed = [], [], []
2516 modified, added, removed = [], [], []
2502 for f in self._files:
2517 for f in self._files:
2503 if not managing(f):
2518 if not managing(f):
2504 added.append(f)
2519 added.append(f)
2505 elif f in self:
2520 elif f in self:
2506 modified.append(f)
2521 modified.append(f)
2507 else:
2522 else:
2508 removed.append(f)
2523 removed.append(f)
2509
2524
2510 return scmutil.status(modified, added, removed, [], [], [], [])
2525 return scmutil.status(modified, added, removed, [], [], [], [])
2511
2526
2512 class arbitraryfilectx(object):
2527 class arbitraryfilectx(object):
2513 """Allows you to use filectx-like functions on a file in an arbitrary
2528 """Allows you to use filectx-like functions on a file in an arbitrary
2514 location on disk, possibly not in the working directory.
2529 location on disk, possibly not in the working directory.
2515 """
2530 """
2516 def __init__(self, path, repo=None):
2531 def __init__(self, path, repo=None):
2517 # Repo is optional because contrib/simplemerge uses this class.
2532 # Repo is optional because contrib/simplemerge uses this class.
2518 self._repo = repo
2533 self._repo = repo
2519 self._path = path
2534 self._path = path
2520
2535
2521 def cmp(self, fctx):
2536 def cmp(self, fctx):
2522 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2537 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2523 # path if either side is a symlink.
2538 # path if either side is a symlink.
2524 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2539 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2525 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2540 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2526 # Add a fast-path for merge if both sides are disk-backed.
2541 # Add a fast-path for merge if both sides are disk-backed.
2527 # Note that filecmp uses the opposite return values (True if same)
2542 # Note that filecmp uses the opposite return values (True if same)
2528 # from our cmp functions (True if different).
2543 # from our cmp functions (True if different).
2529 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2544 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2530 return self.data() != fctx.data()
2545 return self.data() != fctx.data()
2531
2546
2532 def path(self):
2547 def path(self):
2533 return self._path
2548 return self._path
2534
2549
2535 def flags(self):
2550 def flags(self):
2536 return ''
2551 return ''
2537
2552
2538 def data(self):
2553 def data(self):
2539 return util.readfile(self._path)
2554 return util.readfile(self._path)
2540
2555
2541 def decodeddata(self):
2556 def decodeddata(self):
2542 with open(self._path, "rb") as f:
2557 with open(self._path, "rb") as f:
2543 return f.read()
2558 return f.read()
2544
2559
2545 def remove(self):
2560 def remove(self):
2546 util.unlink(self._path)
2561 util.unlink(self._path)
2547
2562
2548 def write(self, data, flags, **kwargs):
2563 def write(self, data, flags, **kwargs):
2549 assert not flags
2564 assert not flags
2550 with open(self._path, "wb") as f:
2565 with open(self._path, "wb") as f:
2551 f.write(data)
2566 f.write(data)
@@ -1,1012 +1,1012 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16 from . import (
16 from . import (
17 match as matchmod,
17 match as matchmod,
18 node,
18 node,
19 pathutil,
19 pathutil,
20 util,
20 util,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 stringutil,
23 stringutil,
24 )
24 )
25
25
26 def _findlimit(repo, ctxa, ctxb):
26 def _findlimit(repo, ctxa, ctxb):
27 """
27 """
28 Find the last revision that needs to be checked to ensure that a full
28 Find the last revision that needs to be checked to ensure that a full
29 transitive closure for file copies can be properly calculated.
29 transitive closure for file copies can be properly calculated.
30 Generally, this means finding the earliest revision number that's an
30 Generally, this means finding the earliest revision number that's an
31 ancestor of a or b but not both, except when a or b is a direct descendent
31 ancestor of a or b but not both, except when a or b is a direct descendent
32 of the other, in which case we can return the minimum revnum of a and b.
32 of the other, in which case we can return the minimum revnum of a and b.
33 """
33 """
34
34
35 # basic idea:
35 # basic idea:
36 # - mark a and b with different sides
36 # - mark a and b with different sides
37 # - if a parent's children are all on the same side, the parent is
37 # - if a parent's children are all on the same side, the parent is
38 # on that side, otherwise it is on no side
38 # on that side, otherwise it is on no side
39 # - walk the graph in topological order with the help of a heap;
39 # - walk the graph in topological order with the help of a heap;
40 # - add unseen parents to side map
40 # - add unseen parents to side map
41 # - clear side of any parent that has children on different sides
41 # - clear side of any parent that has children on different sides
42 # - track number of interesting revs that might still be on a side
42 # - track number of interesting revs that might still be on a side
43 # - track the lowest interesting rev seen
43 # - track the lowest interesting rev seen
44 # - quit when interesting revs is zero
44 # - quit when interesting revs is zero
45
45
46 cl = repo.changelog
46 cl = repo.changelog
47 wdirparents = None
47 wdirparents = None
48 a = ctxa.rev()
48 a = ctxa.rev()
49 b = ctxb.rev()
49 b = ctxb.rev()
50 if a is None:
50 if a is None:
51 wdirparents = (ctxa.p1(), ctxa.p2())
51 wdirparents = (ctxa.p1(), ctxa.p2())
52 a = node.wdirrev
52 a = node.wdirrev
53 if b is None:
53 if b is None:
54 assert not wdirparents
54 assert not wdirparents
55 wdirparents = (ctxb.p1(), ctxb.p2())
55 wdirparents = (ctxb.p1(), ctxb.p2())
56 b = node.wdirrev
56 b = node.wdirrev
57
57
58 side = {a: -1, b: 1}
58 side = {a: -1, b: 1}
59 visit = [-a, -b]
59 visit = [-a, -b]
60 heapq.heapify(visit)
60 heapq.heapify(visit)
61 interesting = len(visit)
61 interesting = len(visit)
62 limit = node.wdirrev
62 limit = node.wdirrev
63
63
64 while interesting:
64 while interesting:
65 r = -heapq.heappop(visit)
65 r = -heapq.heappop(visit)
66 if r == node.wdirrev:
66 if r == node.wdirrev:
67 parents = [pctx.rev() for pctx in wdirparents]
67 parents = [pctx.rev() for pctx in wdirparents]
68 else:
68 else:
69 parents = cl.parentrevs(r)
69 parents = cl.parentrevs(r)
70 if parents[1] == node.nullrev:
70 if parents[1] == node.nullrev:
71 parents = parents[:1]
71 parents = parents[:1]
72 for p in parents:
72 for p in parents:
73 if p not in side:
73 if p not in side:
74 # first time we see p; add it to visit
74 # first time we see p; add it to visit
75 side[p] = side[r]
75 side[p] = side[r]
76 if side[p]:
76 if side[p]:
77 interesting += 1
77 interesting += 1
78 heapq.heappush(visit, -p)
78 heapq.heappush(visit, -p)
79 elif side[p] and side[p] != side[r]:
79 elif side[p] and side[p] != side[r]:
80 # p was interesting but now we know better
80 # p was interesting but now we know better
81 side[p] = 0
81 side[p] = 0
82 interesting -= 1
82 interesting -= 1
83 if side[r]:
83 if side[r]:
84 limit = r # lowest rev visited
84 limit = r # lowest rev visited
85 interesting -= 1
85 interesting -= 1
86
86
87 # Consider the following flow (see test-commit-amend.t under issue4405):
87 # Consider the following flow (see test-commit-amend.t under issue4405):
88 # 1/ File 'a0' committed
88 # 1/ File 'a0' committed
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
90 # 3/ Move back to first commit
90 # 3/ Move back to first commit
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
93 #
93 #
94 # During the amend in step five, we will be in this state:
94 # During the amend in step five, we will be in this state:
95 #
95 #
96 # @ 3 temporary amend commit for a1-amend
96 # @ 3 temporary amend commit for a1-amend
97 # |
97 # |
98 # o 2 a1-amend
98 # o 2 a1-amend
99 # |
99 # |
100 # | o 1 a1
100 # | o 1 a1
101 # |/
101 # |/
102 # o 0 a0
102 # o 0 a0
103 #
103 #
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
105 # yet the filelog has the copy information in rev 1 and we will not look
105 # yet the filelog has the copy information in rev 1 and we will not look
106 # back far enough unless we also look at the a and b as candidates.
106 # back far enough unless we also look at the a and b as candidates.
107 # This only occurs when a is a descendent of b or visa-versa.
107 # This only occurs when a is a descendent of b or visa-versa.
108 return min(limit, a, b)
108 return min(limit, a, b)
109
109
110 def _chain(src, dst, a, b):
110 def _chain(src, dst, a, b):
111 """chain two sets of copies a->b"""
111 """chain two sets of copies a->b"""
112 t = a.copy()
112 t = a.copy()
113 for k, v in b.iteritems():
113 for k, v in b.iteritems():
114 if v in t:
114 if v in t:
115 # found a chain
115 # found a chain
116 if t[v] != k:
116 if t[v] != k:
117 # file wasn't renamed back to itself
117 # file wasn't renamed back to itself
118 t[k] = t[v]
118 t[k] = t[v]
119 if v not in dst:
119 if v not in dst:
120 # chain was a rename, not a copy
120 # chain was a rename, not a copy
121 del t[v]
121 del t[v]
122 if v in src:
122 if v in src:
123 # file is a copy of an existing file
123 # file is a copy of an existing file
124 t[k] = v
124 t[k] = v
125
125
126 for k, v in list(t.items()):
126 for k, v in list(t.items()):
127 # remove criss-crossed copies
127 # remove criss-crossed copies
128 if k in src and v in dst:
128 if k in src and v in dst:
129 del t[k]
129 del t[k]
130 # remove copies to files that were then removed
130 # remove copies to files that were then removed
131 elif k not in dst:
131 elif k not in dst:
132 del t[k]
132 del t[k]
133
133
134 return t
134 return t
135
135
136 def _tracefile(fctx, am, limit=node.nullrev):
136 def _tracefile(fctx, am, limit=node.nullrev):
137 """return file context that is the ancestor of fctx present in ancestor
137 """return file context that is the ancestor of fctx present in ancestor
138 manifest am, stopping after the first ancestor lower than limit"""
138 manifest am, stopping after the first ancestor lower than limit"""
139
139
140 for f in fctx.ancestors():
140 for f in fctx.ancestors():
141 if am.get(f.path(), None) == f.filenode():
141 if am.get(f.path(), None) == f.filenode():
142 return f
142 return f
143 if limit >= 0 and not f.isintroducedafter(limit):
143 if limit >= 0 and not f.isintroducedafter(limit):
144 return None
144 return None
145
145
146 def _dirstatecopies(repo, match=None):
146 def _dirstatecopies(repo, match=None):
147 ds = repo.dirstate
147 ds = repo.dirstate
148 c = ds.copies().copy()
148 c = ds.copies().copy()
149 for k in list(c):
149 for k in list(c):
150 if ds[k] not in 'anm' or (match and not match(k)):
150 if ds[k] not in 'anm' or (match and not match(k)):
151 del c[k]
151 del c[k]
152 return c
152 return c
153
153
154 def _computeforwardmissing(a, b, match=None):
154 def _computeforwardmissing(a, b, match=None):
155 """Computes which files are in b but not a.
155 """Computes which files are in b but not a.
156 This is its own function so extensions can easily wrap this call to see what
156 This is its own function so extensions can easily wrap this call to see what
157 files _forwardcopies is about to process.
157 files _forwardcopies is about to process.
158 """
158 """
159 ma = a.manifest()
159 ma = a.manifest()
160 mb = b.manifest()
160 mb = b.manifest()
161 return mb.filesnotin(ma, match=match)
161 return mb.filesnotin(ma, match=match)
162
162
163 def usechangesetcentricalgo(repo):
163 def usechangesetcentricalgo(repo):
164 """Checks if we should use changeset-centric copy algorithms"""
164 """Checks if we should use changeset-centric copy algorithms"""
165 return (repo.ui.config('experimental', 'copies.read-from') ==
165 return (repo.ui.config('experimental', 'copies.read-from') in
166 'compatibility')
166 ('changeset-only', 'compatibility'))
167
167
168 def _committedforwardcopies(a, b, match):
168 def _committedforwardcopies(a, b, match):
169 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
169 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
170 # files might have to be traced back to the fctx parent of the last
170 # files might have to be traced back to the fctx parent of the last
171 # one-side-only changeset, but not further back than that
171 # one-side-only changeset, but not further back than that
172 repo = a._repo
172 repo = a._repo
173
173
174 if usechangesetcentricalgo(repo):
174 if usechangesetcentricalgo(repo):
175 return _changesetforwardcopies(a, b, match)
175 return _changesetforwardcopies(a, b, match)
176
176
177 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
177 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
178 dbg = repo.ui.debug
178 dbg = repo.ui.debug
179 if debug:
179 if debug:
180 dbg('debug.copies: looking into rename from %s to %s\n'
180 dbg('debug.copies: looking into rename from %s to %s\n'
181 % (a, b))
181 % (a, b))
182 limit = _findlimit(repo, a, b)
182 limit = _findlimit(repo, a, b)
183 if debug:
183 if debug:
184 dbg('debug.copies: search limit: %d\n' % limit)
184 dbg('debug.copies: search limit: %d\n' % limit)
185 am = a.manifest()
185 am = a.manifest()
186
186
187 # find where new files came from
187 # find where new files came from
188 # we currently don't try to find where old files went, too expensive
188 # we currently don't try to find where old files went, too expensive
189 # this means we can miss a case like 'hg rm b; hg cp a b'
189 # this means we can miss a case like 'hg rm b; hg cp a b'
190 cm = {}
190 cm = {}
191
191
192 # Computing the forward missing is quite expensive on large manifests, since
192 # Computing the forward missing is quite expensive on large manifests, since
193 # it compares the entire manifests. We can optimize it in the common use
193 # it compares the entire manifests. We can optimize it in the common use
194 # case of computing what copies are in a commit versus its parent (like
194 # case of computing what copies are in a commit versus its parent (like
195 # during a rebase or histedit). Note, we exclude merge commits from this
195 # during a rebase or histedit). Note, we exclude merge commits from this
196 # optimization, since the ctx.files() for a merge commit is not correct for
196 # optimization, since the ctx.files() for a merge commit is not correct for
197 # this comparison.
197 # this comparison.
198 forwardmissingmatch = match
198 forwardmissingmatch = match
199 if b.p1() == a and b.p2().node() == node.nullid:
199 if b.p1() == a and b.p2().node() == node.nullid:
200 filesmatcher = matchmod.exact(b.files())
200 filesmatcher = matchmod.exact(b.files())
201 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
201 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
202 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
202 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
203
203
204 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
204 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
205
205
206 if debug:
206 if debug:
207 dbg('debug.copies: missing file to search: %d\n' % len(missing))
207 dbg('debug.copies: missing file to search: %d\n' % len(missing))
208
208
209 for f in missing:
209 for f in missing:
210 if debug:
210 if debug:
211 dbg('debug.copies: tracing file: %s\n' % f)
211 dbg('debug.copies: tracing file: %s\n' % f)
212 fctx = b[f]
212 fctx = b[f]
213 fctx._ancestrycontext = ancestrycontext
213 fctx._ancestrycontext = ancestrycontext
214
214
215 if debug:
215 if debug:
216 start = util.timer()
216 start = util.timer()
217 ofctx = _tracefile(fctx, am, limit)
217 ofctx = _tracefile(fctx, am, limit)
218 if ofctx:
218 if ofctx:
219 if debug:
219 if debug:
220 dbg('debug.copies: rename of: %s\n' % ofctx._path)
220 dbg('debug.copies: rename of: %s\n' % ofctx._path)
221 cm[f] = ofctx.path()
221 cm[f] = ofctx.path()
222 if debug:
222 if debug:
223 dbg('debug.copies: time: %f seconds\n'
223 dbg('debug.copies: time: %f seconds\n'
224 % (util.timer() - start))
224 % (util.timer() - start))
225 return cm
225 return cm
226
226
227 def _changesetforwardcopies(a, b, match):
227 def _changesetforwardcopies(a, b, match):
228 if a.rev() == node.nullrev:
228 if a.rev() == node.nullrev:
229 return {}
229 return {}
230
230
231 repo = a.repo()
231 repo = a.repo()
232 children = {}
232 children = {}
233 cl = repo.changelog
233 cl = repo.changelog
234 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
234 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
235 for r in missingrevs:
235 for r in missingrevs:
236 for p in cl.parentrevs(r):
236 for p in cl.parentrevs(r):
237 if p == node.nullrev:
237 if p == node.nullrev:
238 continue
238 continue
239 if p not in children:
239 if p not in children:
240 children[p] = [r]
240 children[p] = [r]
241 else:
241 else:
242 children[p].append(r)
242 children[p].append(r)
243
243
244 roots = set(children) - set(missingrevs)
244 roots = set(children) - set(missingrevs)
245 # 'work' contains 3-tuples of a (revision number, parent number, copies).
245 # 'work' contains 3-tuples of a (revision number, parent number, copies).
246 # The parent number is only used for knowing which parent the copies dict
246 # The parent number is only used for knowing which parent the copies dict
247 # came from.
247 # came from.
248 work = [(r, 1, {}) for r in roots]
248 work = [(r, 1, {}) for r in roots]
249 heapq.heapify(work)
249 heapq.heapify(work)
250 while work:
250 while work:
251 r, i1, copies1 = heapq.heappop(work)
251 r, i1, copies1 = heapq.heappop(work)
252 if work and work[0][0] == r:
252 if work and work[0][0] == r:
253 # We are tracing copies from both parents
253 # We are tracing copies from both parents
254 r, i2, copies2 = heapq.heappop(work)
254 r, i2, copies2 = heapq.heappop(work)
255 copies = {}
255 copies = {}
256 ctx = repo[r]
256 ctx = repo[r]
257 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
257 p1man, p2man = ctx.p1().manifest(), ctx.p2().manifest()
258 allcopies = set(copies1) | set(copies2)
258 allcopies = set(copies1) | set(copies2)
259 # TODO: perhaps this filtering should be done as long as ctx
259 # TODO: perhaps this filtering should be done as long as ctx
260 # is merge, whether or not we're tracing from both parent.
260 # is merge, whether or not we're tracing from both parent.
261 for dst in allcopies:
261 for dst in allcopies:
262 if not match(dst):
262 if not match(dst):
263 continue
263 continue
264 if dst not in copies2:
264 if dst not in copies2:
265 # Copied on p1 side: mark as copy from p1 side if it didn't
265 # Copied on p1 side: mark as copy from p1 side if it didn't
266 # already exist on p2 side
266 # already exist on p2 side
267 if dst not in p2man:
267 if dst not in p2man:
268 copies[dst] = copies1[dst]
268 copies[dst] = copies1[dst]
269 elif dst not in copies1:
269 elif dst not in copies1:
270 # Copied on p2 side: mark as copy from p2 side if it didn't
270 # Copied on p2 side: mark as copy from p2 side if it didn't
271 # already exist on p1 side
271 # already exist on p1 side
272 if dst not in p1man:
272 if dst not in p1man:
273 copies[dst] = copies2[dst]
273 copies[dst] = copies2[dst]
274 else:
274 else:
275 # Copied on both sides: mark as copy from p1 side
275 # Copied on both sides: mark as copy from p1 side
276 copies[dst] = copies1[dst]
276 copies[dst] = copies1[dst]
277 else:
277 else:
278 copies = copies1
278 copies = copies1
279 if r == b.rev():
279 if r == b.rev():
280 return copies
280 return copies
281 for c in children[r]:
281 for c in children[r]:
282 childctx = repo[c]
282 childctx = repo[c]
283 if r == childctx.p1().rev():
283 if r == childctx.p1().rev():
284 parent = 1
284 parent = 1
285 childcopies = childctx.p1copies()
285 childcopies = childctx.p1copies()
286 else:
286 else:
287 assert r == childctx.p2().rev()
287 assert r == childctx.p2().rev()
288 parent = 2
288 parent = 2
289 childcopies = childctx.p2copies()
289 childcopies = childctx.p2copies()
290 if not match.always():
290 if not match.always():
291 childcopies = {dst: src for dst, src in childcopies.items()
291 childcopies = {dst: src for dst, src in childcopies.items()
292 if match(dst)}
292 if match(dst)}
293 childcopies = _chain(a, childctx, copies, childcopies)
293 childcopies = _chain(a, childctx, copies, childcopies)
294 heapq.heappush(work, (c, parent, childcopies))
294 heapq.heappush(work, (c, parent, childcopies))
295 assert False
295 assert False
296
296
297 def _forwardcopies(a, b, match=None):
297 def _forwardcopies(a, b, match=None):
298 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
298 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
299
299
300 match = a.repo().narrowmatch(match)
300 match = a.repo().narrowmatch(match)
301 # check for working copy
301 # check for working copy
302 if b.rev() is None:
302 if b.rev() is None:
303 if a == b.p1():
303 if a == b.p1():
304 # short-circuit to avoid issues with merge states
304 # short-circuit to avoid issues with merge states
305 return _dirstatecopies(b._repo, match)
305 return _dirstatecopies(b._repo, match)
306
306
307 cm = _committedforwardcopies(a, b.p1(), match)
307 cm = _committedforwardcopies(a, b.p1(), match)
308 # combine copies from dirstate if necessary
308 # combine copies from dirstate if necessary
309 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
309 return _chain(a, b, cm, _dirstatecopies(b._repo, match))
310 return _committedforwardcopies(a, b, match)
310 return _committedforwardcopies(a, b, match)
311
311
312 def _backwardrenames(a, b, match):
312 def _backwardrenames(a, b, match):
313 if a._repo.ui.config('experimental', 'copytrace') == 'off':
313 if a._repo.ui.config('experimental', 'copytrace') == 'off':
314 return {}
314 return {}
315
315
316 # Even though we're not taking copies into account, 1:n rename situations
316 # Even though we're not taking copies into account, 1:n rename situations
317 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
317 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
318 # arbitrarily pick one of the renames.
318 # arbitrarily pick one of the renames.
319 # We don't want to pass in "match" here, since that would filter
319 # We don't want to pass in "match" here, since that would filter
320 # the destination by it. Since we're reversing the copies, we want
320 # the destination by it. Since we're reversing the copies, we want
321 # to filter the source instead.
321 # to filter the source instead.
322 f = _forwardcopies(b, a)
322 f = _forwardcopies(b, a)
323 r = {}
323 r = {}
324 for k, v in sorted(f.iteritems()):
324 for k, v in sorted(f.iteritems()):
325 if match and not match(v):
325 if match and not match(v):
326 continue
326 continue
327 # remove copies
327 # remove copies
328 if v in a:
328 if v in a:
329 continue
329 continue
330 r[v] = k
330 r[v] = k
331 return r
331 return r
332
332
333 def pathcopies(x, y, match=None):
333 def pathcopies(x, y, match=None):
334 """find {dst@y: src@x} copy mapping for directed compare"""
334 """find {dst@y: src@x} copy mapping for directed compare"""
335 repo = x._repo
335 repo = x._repo
336 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
336 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
337 if debug:
337 if debug:
338 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
338 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
339 % (x, y))
339 % (x, y))
340 if x == y or not x or not y:
340 if x == y or not x or not y:
341 return {}
341 return {}
342 a = y.ancestor(x)
342 a = y.ancestor(x)
343 if a == x:
343 if a == x:
344 if debug:
344 if debug:
345 repo.ui.debug('debug.copies: search mode: forward\n')
345 repo.ui.debug('debug.copies: search mode: forward\n')
346 return _forwardcopies(x, y, match=match)
346 return _forwardcopies(x, y, match=match)
347 if a == y:
347 if a == y:
348 if debug:
348 if debug:
349 repo.ui.debug('debug.copies: search mode: backward\n')
349 repo.ui.debug('debug.copies: search mode: backward\n')
350 return _backwardrenames(x, y, match=match)
350 return _backwardrenames(x, y, match=match)
351 if debug:
351 if debug:
352 repo.ui.debug('debug.copies: search mode: combined\n')
352 repo.ui.debug('debug.copies: search mode: combined\n')
353 return _chain(x, y, _backwardrenames(x, a, match=match),
353 return _chain(x, y, _backwardrenames(x, a, match=match),
354 _forwardcopies(a, y, match=match))
354 _forwardcopies(a, y, match=match))
355
355
356 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
356 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
357 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
357 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
358 and c2. This is its own function so extensions can easily wrap this call
358 and c2. This is its own function so extensions can easily wrap this call
359 to see what files mergecopies is about to process.
359 to see what files mergecopies is about to process.
360
360
361 Even though c1 and c2 are not used in this function, they are useful in
361 Even though c1 and c2 are not used in this function, they are useful in
362 other extensions for being able to read the file nodes of the changed files.
362 other extensions for being able to read the file nodes of the changed files.
363
363
364 "baselabel" can be passed to help distinguish the multiple computations
364 "baselabel" can be passed to help distinguish the multiple computations
365 done in the graft case.
365 done in the graft case.
366 """
366 """
367 u1 = sorted(addedinm1 - addedinm2)
367 u1 = sorted(addedinm1 - addedinm2)
368 u2 = sorted(addedinm2 - addedinm1)
368 u2 = sorted(addedinm2 - addedinm1)
369
369
370 header = " unmatched files in %s"
370 header = " unmatched files in %s"
371 if baselabel:
371 if baselabel:
372 header += ' (from %s)' % baselabel
372 header += ' (from %s)' % baselabel
373 if u1:
373 if u1:
374 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
374 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
375 if u2:
375 if u2:
376 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
376 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
377
377
378 return u1, u2
378 return u1, u2
379
379
380 def _makegetfctx(ctx):
380 def _makegetfctx(ctx):
381 """return a 'getfctx' function suitable for _checkcopies usage
381 """return a 'getfctx' function suitable for _checkcopies usage
382
382
383 We have to re-setup the function building 'filectx' for each
383 We have to re-setup the function building 'filectx' for each
384 '_checkcopies' to ensure the linkrev adjustment is properly setup for
384 '_checkcopies' to ensure the linkrev adjustment is properly setup for
385 each. Linkrev adjustment is important to avoid bug in rename
385 each. Linkrev adjustment is important to avoid bug in rename
386 detection. Moreover, having a proper '_ancestrycontext' setup ensures
386 detection. Moreover, having a proper '_ancestrycontext' setup ensures
387 the performance impact of this adjustment is kept limited. Without it,
387 the performance impact of this adjustment is kept limited. Without it,
388 each file could do a full dag traversal making the time complexity of
388 each file could do a full dag traversal making the time complexity of
389 the operation explode (see issue4537).
389 the operation explode (see issue4537).
390
390
391 This function exists here mostly to limit the impact on stable. Feel
391 This function exists here mostly to limit the impact on stable. Feel
392 free to refactor on default.
392 free to refactor on default.
393 """
393 """
394 rev = ctx.rev()
394 rev = ctx.rev()
395 repo = ctx._repo
395 repo = ctx._repo
396 ac = getattr(ctx, '_ancestrycontext', None)
396 ac = getattr(ctx, '_ancestrycontext', None)
397 if ac is None:
397 if ac is None:
398 revs = [rev]
398 revs = [rev]
399 if rev is None:
399 if rev is None:
400 revs = [p.rev() for p in ctx.parents()]
400 revs = [p.rev() for p in ctx.parents()]
401 ac = repo.changelog.ancestors(revs, inclusive=True)
401 ac = repo.changelog.ancestors(revs, inclusive=True)
402 ctx._ancestrycontext = ac
402 ctx._ancestrycontext = ac
403 def makectx(f, n):
403 def makectx(f, n):
404 if n in node.wdirfilenodeids: # in a working context?
404 if n in node.wdirfilenodeids: # in a working context?
405 if ctx.rev() is None:
405 if ctx.rev() is None:
406 return ctx.filectx(f)
406 return ctx.filectx(f)
407 return repo[None][f]
407 return repo[None][f]
408 fctx = repo.filectx(f, fileid=n)
408 fctx = repo.filectx(f, fileid=n)
409 # setup only needed for filectx not create from a changectx
409 # setup only needed for filectx not create from a changectx
410 fctx._ancestrycontext = ac
410 fctx._ancestrycontext = ac
411 fctx._descendantrev = rev
411 fctx._descendantrev = rev
412 return fctx
412 return fctx
413 return util.lrucachefunc(makectx)
413 return util.lrucachefunc(makectx)
414
414
415 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
415 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
416 """combine partial copy paths"""
416 """combine partial copy paths"""
417 remainder = {}
417 remainder = {}
418 for f in copyfrom:
418 for f in copyfrom:
419 if f in copyto:
419 if f in copyto:
420 finalcopy[copyto[f]] = copyfrom[f]
420 finalcopy[copyto[f]] = copyfrom[f]
421 del copyto[f]
421 del copyto[f]
422 for f in incompletediverge:
422 for f in incompletediverge:
423 assert f not in diverge
423 assert f not in diverge
424 ic = incompletediverge[f]
424 ic = incompletediverge[f]
425 if ic[0] in copyto:
425 if ic[0] in copyto:
426 diverge[f] = [copyto[ic[0]], ic[1]]
426 diverge[f] = [copyto[ic[0]], ic[1]]
427 else:
427 else:
428 remainder[f] = ic
428 remainder[f] = ic
429 return remainder
429 return remainder
430
430
431 def mergecopies(repo, c1, c2, base):
431 def mergecopies(repo, c1, c2, base):
432 """
432 """
433 Finds moves and copies between context c1 and c2 that are relevant for
433 Finds moves and copies between context c1 and c2 that are relevant for
434 merging. 'base' will be used as the merge base.
434 merging. 'base' will be used as the merge base.
435
435
436 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
436 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
437 files that were moved/ copied in one merge parent and modified in another.
437 files that were moved/ copied in one merge parent and modified in another.
438 For example:
438 For example:
439
439
440 o ---> 4 another commit
440 o ---> 4 another commit
441 |
441 |
442 | o ---> 3 commit that modifies a.txt
442 | o ---> 3 commit that modifies a.txt
443 | /
443 | /
444 o / ---> 2 commit that moves a.txt to b.txt
444 o / ---> 2 commit that moves a.txt to b.txt
445 |/
445 |/
446 o ---> 1 merge base
446 o ---> 1 merge base
447
447
448 If we try to rebase revision 3 on revision 4, since there is no a.txt in
448 If we try to rebase revision 3 on revision 4, since there is no a.txt in
449 revision 4, and if user have copytrace disabled, we prints the following
449 revision 4, and if user have copytrace disabled, we prints the following
450 message:
450 message:
451
451
452 ```other changed <file> which local deleted```
452 ```other changed <file> which local deleted```
453
453
454 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
454 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
455 "dirmove".
455 "dirmove".
456
456
457 "copy" is a mapping from destination name -> source name,
457 "copy" is a mapping from destination name -> source name,
458 where source is in c1 and destination is in c2 or vice-versa.
458 where source is in c1 and destination is in c2 or vice-versa.
459
459
460 "movewithdir" is a mapping from source name -> destination name,
460 "movewithdir" is a mapping from source name -> destination name,
461 where the file at source present in one context but not the other
461 where the file at source present in one context but not the other
462 needs to be moved to destination by the merge process, because the
462 needs to be moved to destination by the merge process, because the
463 other context moved the directory it is in.
463 other context moved the directory it is in.
464
464
465 "diverge" is a mapping of source name -> list of destination names
465 "diverge" is a mapping of source name -> list of destination names
466 for divergent renames.
466 for divergent renames.
467
467
468 "renamedelete" is a mapping of source name -> list of destination
468 "renamedelete" is a mapping of source name -> list of destination
469 names for files deleted in c1 that were renamed in c2 or vice-versa.
469 names for files deleted in c1 that were renamed in c2 or vice-versa.
470
470
471 "dirmove" is a mapping of detected source dir -> destination dir renames.
471 "dirmove" is a mapping of detected source dir -> destination dir renames.
472 This is needed for handling changes to new files previously grafted into
472 This is needed for handling changes to new files previously grafted into
473 renamed directories.
473 renamed directories.
474
474
475 This function calls different copytracing algorithms based on config.
475 This function calls different copytracing algorithms based on config.
476 """
476 """
477 # avoid silly behavior for update from empty dir
477 # avoid silly behavior for update from empty dir
478 if not c1 or not c2 or c1 == c2:
478 if not c1 or not c2 or c1 == c2:
479 return {}, {}, {}, {}, {}
479 return {}, {}, {}, {}, {}
480
480
481 narrowmatch = c1.repo().narrowmatch()
481 narrowmatch = c1.repo().narrowmatch()
482
482
483 # avoid silly behavior for parent -> working dir
483 # avoid silly behavior for parent -> working dir
484 if c2.node() is None and c1.node() == repo.dirstate.p1():
484 if c2.node() is None and c1.node() == repo.dirstate.p1():
485 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
485 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
486
486
487 copytracing = repo.ui.config('experimental', 'copytrace')
487 copytracing = repo.ui.config('experimental', 'copytrace')
488 boolctrace = stringutil.parsebool(copytracing)
488 boolctrace = stringutil.parsebool(copytracing)
489
489
490 # Copy trace disabling is explicitly below the node == p1 logic above
490 # Copy trace disabling is explicitly below the node == p1 logic above
491 # because the logic above is required for a simple copy to be kept across a
491 # because the logic above is required for a simple copy to be kept across a
492 # rebase.
492 # rebase.
493 if copytracing == 'heuristics':
493 if copytracing == 'heuristics':
494 # Do full copytracing if only non-public revisions are involved as
494 # Do full copytracing if only non-public revisions are involved as
495 # that will be fast enough and will also cover the copies which could
495 # that will be fast enough and will also cover the copies which could
496 # be missed by heuristics
496 # be missed by heuristics
497 if _isfullcopytraceable(repo, c1, base):
497 if _isfullcopytraceable(repo, c1, base):
498 return _fullcopytracing(repo, c1, c2, base)
498 return _fullcopytracing(repo, c1, c2, base)
499 return _heuristicscopytracing(repo, c1, c2, base)
499 return _heuristicscopytracing(repo, c1, c2, base)
500 elif boolctrace is False:
500 elif boolctrace is False:
501 # stringutil.parsebool() returns None when it is unable to parse the
501 # stringutil.parsebool() returns None when it is unable to parse the
502 # value, so we should rely on making sure copytracing is on such cases
502 # value, so we should rely on making sure copytracing is on such cases
503 return {}, {}, {}, {}, {}
503 return {}, {}, {}, {}, {}
504 else:
504 else:
505 return _fullcopytracing(repo, c1, c2, base)
505 return _fullcopytracing(repo, c1, c2, base)
506
506
507 def _isfullcopytraceable(repo, c1, base):
507 def _isfullcopytraceable(repo, c1, base):
508 """ Checks that if base, source and destination are all no-public branches,
508 """ Checks that if base, source and destination are all no-public branches,
509 if yes let's use the full copytrace algorithm for increased capabilities
509 if yes let's use the full copytrace algorithm for increased capabilities
510 since it will be fast enough.
510 since it will be fast enough.
511
511
512 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
512 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
513 number of changesets from c1 to base such that if number of changesets are
513 number of changesets from c1 to base such that if number of changesets are
514 more than the limit, full copytracing algorithm won't be used.
514 more than the limit, full copytracing algorithm won't be used.
515 """
515 """
516 if c1.rev() is None:
516 if c1.rev() is None:
517 c1 = c1.p1()
517 c1 = c1.p1()
518 if c1.mutable() and base.mutable():
518 if c1.mutable() and base.mutable():
519 sourcecommitlimit = repo.ui.configint('experimental',
519 sourcecommitlimit = repo.ui.configint('experimental',
520 'copytrace.sourcecommitlimit')
520 'copytrace.sourcecommitlimit')
521 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
521 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
522 return commits < sourcecommitlimit
522 return commits < sourcecommitlimit
523 return False
523 return False
524
524
525 def _fullcopytracing(repo, c1, c2, base):
525 def _fullcopytracing(repo, c1, c2, base):
526 """ The full copytracing algorithm which finds all the new files that were
526 """ The full copytracing algorithm which finds all the new files that were
527 added from merge base up to the top commit and for each file it checks if
527 added from merge base up to the top commit and for each file it checks if
528 this file was copied from another file.
528 this file was copied from another file.
529
529
530 This is pretty slow when a lot of changesets are involved but will track all
530 This is pretty slow when a lot of changesets are involved but will track all
531 the copies.
531 the copies.
532 """
532 """
533 # In certain scenarios (e.g. graft, update or rebase), base can be
533 # In certain scenarios (e.g. graft, update or rebase), base can be
534 # overridden We still need to know a real common ancestor in this case We
534 # overridden We still need to know a real common ancestor in this case We
535 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
535 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
536 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
536 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
537 # caller may not know if the revision passed in lieu of the CA is a genuine
537 # caller may not know if the revision passed in lieu of the CA is a genuine
538 # common ancestor or not without explicitly checking it, it's better to
538 # common ancestor or not without explicitly checking it, it's better to
539 # determine that here.
539 # determine that here.
540 #
540 #
541 # base.isancestorof(wc) is False, work around that
541 # base.isancestorof(wc) is False, work around that
542 _c1 = c1.p1() if c1.rev() is None else c1
542 _c1 = c1.p1() if c1.rev() is None else c1
543 _c2 = c2.p1() if c2.rev() is None else c2
543 _c2 = c2.p1() if c2.rev() is None else c2
544 # an endpoint is "dirty" if it isn't a descendant of the merge base
544 # an endpoint is "dirty" if it isn't a descendant of the merge base
545 # if we have a dirty endpoint, we need to trigger graft logic, and also
545 # if we have a dirty endpoint, we need to trigger graft logic, and also
546 # keep track of which endpoint is dirty
546 # keep track of which endpoint is dirty
547 dirtyc1 = not base.isancestorof(_c1)
547 dirtyc1 = not base.isancestorof(_c1)
548 dirtyc2 = not base.isancestorof(_c2)
548 dirtyc2 = not base.isancestorof(_c2)
549 graft = dirtyc1 or dirtyc2
549 graft = dirtyc1 or dirtyc2
550 tca = base
550 tca = base
551 if graft:
551 if graft:
552 tca = _c1.ancestor(_c2)
552 tca = _c1.ancestor(_c2)
553
553
554 limit = _findlimit(repo, c1, c2)
554 limit = _findlimit(repo, c1, c2)
555 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
555 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
556
556
557 m1 = c1.manifest()
557 m1 = c1.manifest()
558 m2 = c2.manifest()
558 m2 = c2.manifest()
559 mb = base.manifest()
559 mb = base.manifest()
560
560
561 # gather data from _checkcopies:
561 # gather data from _checkcopies:
562 # - diverge = record all diverges in this dict
562 # - diverge = record all diverges in this dict
563 # - copy = record all non-divergent copies in this dict
563 # - copy = record all non-divergent copies in this dict
564 # - fullcopy = record all copies in this dict
564 # - fullcopy = record all copies in this dict
565 # - incomplete = record non-divergent partial copies here
565 # - incomplete = record non-divergent partial copies here
566 # - incompletediverge = record divergent partial copies here
566 # - incompletediverge = record divergent partial copies here
567 diverge = {} # divergence data is shared
567 diverge = {} # divergence data is shared
568 incompletediverge = {}
568 incompletediverge = {}
569 data1 = {'copy': {},
569 data1 = {'copy': {},
570 'fullcopy': {},
570 'fullcopy': {},
571 'incomplete': {},
571 'incomplete': {},
572 'diverge': diverge,
572 'diverge': diverge,
573 'incompletediverge': incompletediverge,
573 'incompletediverge': incompletediverge,
574 }
574 }
575 data2 = {'copy': {},
575 data2 = {'copy': {},
576 'fullcopy': {},
576 'fullcopy': {},
577 'incomplete': {},
577 'incomplete': {},
578 'diverge': diverge,
578 'diverge': diverge,
579 'incompletediverge': incompletediverge,
579 'incompletediverge': incompletediverge,
580 }
580 }
581
581
582 # find interesting file sets from manifests
582 # find interesting file sets from manifests
583 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
583 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
584 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
584 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
585 bothnew = sorted(addedinm1 & addedinm2)
585 bothnew = sorted(addedinm1 & addedinm2)
586 if tca == base:
586 if tca == base:
587 # unmatched file from base
587 # unmatched file from base
588 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
588 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
589 u1u, u2u = u1r, u2r
589 u1u, u2u = u1r, u2r
590 else:
590 else:
591 # unmatched file from base (DAG rotation in the graft case)
591 # unmatched file from base (DAG rotation in the graft case)
592 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
592 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
593 baselabel='base')
593 baselabel='base')
594 # unmatched file from topological common ancestors (no DAG rotation)
594 # unmatched file from topological common ancestors (no DAG rotation)
595 # need to recompute this for directory move handling when grafting
595 # need to recompute this for directory move handling when grafting
596 mta = tca.manifest()
596 mta = tca.manifest()
597 u1u, u2u = _computenonoverlap(repo, c1, c2,
597 u1u, u2u = _computenonoverlap(repo, c1, c2,
598 m1.filesnotin(mta, repo.narrowmatch()),
598 m1.filesnotin(mta, repo.narrowmatch()),
599 m2.filesnotin(mta, repo.narrowmatch()),
599 m2.filesnotin(mta, repo.narrowmatch()),
600 baselabel='topological common ancestor')
600 baselabel='topological common ancestor')
601
601
602 for f in u1u:
602 for f in u1u:
603 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
603 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
604
604
605 for f in u2u:
605 for f in u2u:
606 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
606 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
607
607
608 copy = dict(data1['copy'])
608 copy = dict(data1['copy'])
609 copy.update(data2['copy'])
609 copy.update(data2['copy'])
610 fullcopy = dict(data1['fullcopy'])
610 fullcopy = dict(data1['fullcopy'])
611 fullcopy.update(data2['fullcopy'])
611 fullcopy.update(data2['fullcopy'])
612
612
613 if dirtyc1:
613 if dirtyc1:
614 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
614 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
615 incompletediverge)
615 incompletediverge)
616 if dirtyc2:
616 if dirtyc2:
617 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
617 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
618 incompletediverge)
618 incompletediverge)
619
619
620 renamedelete = {}
620 renamedelete = {}
621 renamedeleteset = set()
621 renamedeleteset = set()
622 divergeset = set()
622 divergeset = set()
623 for of, fl in list(diverge.items()):
623 for of, fl in list(diverge.items()):
624 if len(fl) == 1 or of in c1 or of in c2:
624 if len(fl) == 1 or of in c1 or of in c2:
625 del diverge[of] # not actually divergent, or not a rename
625 del diverge[of] # not actually divergent, or not a rename
626 if of not in c1 and of not in c2:
626 if of not in c1 and of not in c2:
627 # renamed on one side, deleted on the other side, but filter
627 # renamed on one side, deleted on the other side, but filter
628 # out files that have been renamed and then deleted
628 # out files that have been renamed and then deleted
629 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
629 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
630 renamedeleteset.update(fl) # reverse map for below
630 renamedeleteset.update(fl) # reverse map for below
631 else:
631 else:
632 divergeset.update(fl) # reverse map for below
632 divergeset.update(fl) # reverse map for below
633
633
634 if bothnew:
634 if bothnew:
635 repo.ui.debug(" unmatched files new in both:\n %s\n"
635 repo.ui.debug(" unmatched files new in both:\n %s\n"
636 % "\n ".join(bothnew))
636 % "\n ".join(bothnew))
637 bothdiverge = {}
637 bothdiverge = {}
638 bothincompletediverge = {}
638 bothincompletediverge = {}
639 remainder = {}
639 remainder = {}
640 both1 = {'copy': {},
640 both1 = {'copy': {},
641 'fullcopy': {},
641 'fullcopy': {},
642 'incomplete': {},
642 'incomplete': {},
643 'diverge': bothdiverge,
643 'diverge': bothdiverge,
644 'incompletediverge': bothincompletediverge
644 'incompletediverge': bothincompletediverge
645 }
645 }
646 both2 = {'copy': {},
646 both2 = {'copy': {},
647 'fullcopy': {},
647 'fullcopy': {},
648 'incomplete': {},
648 'incomplete': {},
649 'diverge': bothdiverge,
649 'diverge': bothdiverge,
650 'incompletediverge': bothincompletediverge
650 'incompletediverge': bothincompletediverge
651 }
651 }
652 for f in bothnew:
652 for f in bothnew:
653 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
653 _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
654 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
654 _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
655 if dirtyc1 and dirtyc2:
655 if dirtyc1 and dirtyc2:
656 remainder = _combinecopies(both2['incomplete'], both1['incomplete'],
656 remainder = _combinecopies(both2['incomplete'], both1['incomplete'],
657 copy, bothdiverge, bothincompletediverge)
657 copy, bothdiverge, bothincompletediverge)
658 remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'],
658 remainder1 = _combinecopies(both1['incomplete'], both2['incomplete'],
659 copy, bothdiverge, bothincompletediverge)
659 copy, bothdiverge, bothincompletediverge)
660 remainder.update(remainder1)
660 remainder.update(remainder1)
661 elif dirtyc1:
661 elif dirtyc1:
662 # incomplete copies may only be found on the "dirty" side for bothnew
662 # incomplete copies may only be found on the "dirty" side for bothnew
663 assert not both2['incomplete']
663 assert not both2['incomplete']
664 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
664 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
665 bothincompletediverge)
665 bothincompletediverge)
666 elif dirtyc2:
666 elif dirtyc2:
667 assert not both1['incomplete']
667 assert not both1['incomplete']
668 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
668 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
669 bothincompletediverge)
669 bothincompletediverge)
670 else:
670 else:
671 # incomplete copies and divergences can't happen outside grafts
671 # incomplete copies and divergences can't happen outside grafts
672 assert not both1['incomplete']
672 assert not both1['incomplete']
673 assert not both2['incomplete']
673 assert not both2['incomplete']
674 assert not bothincompletediverge
674 assert not bothincompletediverge
675 for f in remainder:
675 for f in remainder:
676 assert f not in bothdiverge
676 assert f not in bothdiverge
677 ic = remainder[f]
677 ic = remainder[f]
678 if ic[0] in (m1 if dirtyc1 else m2):
678 if ic[0] in (m1 if dirtyc1 else m2):
679 # backed-out rename on one side, but watch out for deleted files
679 # backed-out rename on one side, but watch out for deleted files
680 bothdiverge[f] = ic
680 bothdiverge[f] = ic
681 for of, fl in bothdiverge.items():
681 for of, fl in bothdiverge.items():
682 if len(fl) == 2 and fl[0] == fl[1]:
682 if len(fl) == 2 and fl[0] == fl[1]:
683 copy[fl[0]] = of # not actually divergent, just matching renames
683 copy[fl[0]] = of # not actually divergent, just matching renames
684
684
685 if fullcopy and repo.ui.debugflag:
685 if fullcopy and repo.ui.debugflag:
686 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
686 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
687 "% = renamed and deleted):\n")
687 "% = renamed and deleted):\n")
688 for f in sorted(fullcopy):
688 for f in sorted(fullcopy):
689 note = ""
689 note = ""
690 if f in copy:
690 if f in copy:
691 note += "*"
691 note += "*"
692 if f in divergeset:
692 if f in divergeset:
693 note += "!"
693 note += "!"
694 if f in renamedeleteset:
694 if f in renamedeleteset:
695 note += "%"
695 note += "%"
696 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
696 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
697 note))
697 note))
698 del divergeset
698 del divergeset
699
699
700 if not fullcopy:
700 if not fullcopy:
701 return copy, {}, diverge, renamedelete, {}
701 return copy, {}, diverge, renamedelete, {}
702
702
703 repo.ui.debug(" checking for directory renames\n")
703 repo.ui.debug(" checking for directory renames\n")
704
704
705 # generate a directory move map
705 # generate a directory move map
706 d1, d2 = c1.dirs(), c2.dirs()
706 d1, d2 = c1.dirs(), c2.dirs()
707 # Hack for adding '', which is not otherwise added, to d1 and d2
707 # Hack for adding '', which is not otherwise added, to d1 and d2
708 d1.addpath('/')
708 d1.addpath('/')
709 d2.addpath('/')
709 d2.addpath('/')
710 invalid = set()
710 invalid = set()
711 dirmove = {}
711 dirmove = {}
712
712
713 # examine each file copy for a potential directory move, which is
713 # examine each file copy for a potential directory move, which is
714 # when all the files in a directory are moved to a new directory
714 # when all the files in a directory are moved to a new directory
715 for dst, src in fullcopy.iteritems():
715 for dst, src in fullcopy.iteritems():
716 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
716 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
717 if dsrc in invalid:
717 if dsrc in invalid:
718 # already seen to be uninteresting
718 # already seen to be uninteresting
719 continue
719 continue
720 elif dsrc in d1 and ddst in d1:
720 elif dsrc in d1 and ddst in d1:
721 # directory wasn't entirely moved locally
721 # directory wasn't entirely moved locally
722 invalid.add(dsrc)
722 invalid.add(dsrc)
723 elif dsrc in d2 and ddst in d2:
723 elif dsrc in d2 and ddst in d2:
724 # directory wasn't entirely moved remotely
724 # directory wasn't entirely moved remotely
725 invalid.add(dsrc)
725 invalid.add(dsrc)
726 elif dsrc in dirmove and dirmove[dsrc] != ddst:
726 elif dsrc in dirmove and dirmove[dsrc] != ddst:
727 # files from the same directory moved to two different places
727 # files from the same directory moved to two different places
728 invalid.add(dsrc)
728 invalid.add(dsrc)
729 else:
729 else:
730 # looks good so far
730 # looks good so far
731 dirmove[dsrc] = ddst
731 dirmove[dsrc] = ddst
732
732
733 for i in invalid:
733 for i in invalid:
734 if i in dirmove:
734 if i in dirmove:
735 del dirmove[i]
735 del dirmove[i]
736 del d1, d2, invalid
736 del d1, d2, invalid
737
737
738 if not dirmove:
738 if not dirmove:
739 return copy, {}, diverge, renamedelete, {}
739 return copy, {}, diverge, renamedelete, {}
740
740
741 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
741 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
742
742
743 for d in dirmove:
743 for d in dirmove:
744 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
744 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
745 (d, dirmove[d]))
745 (d, dirmove[d]))
746
746
747 movewithdir = {}
747 movewithdir = {}
748 # check unaccounted nonoverlapping files against directory moves
748 # check unaccounted nonoverlapping files against directory moves
749 for f in u1r + u2r:
749 for f in u1r + u2r:
750 if f not in fullcopy:
750 if f not in fullcopy:
751 for d in dirmove:
751 for d in dirmove:
752 if f.startswith(d):
752 if f.startswith(d):
753 # new file added in a directory that was moved, move it
753 # new file added in a directory that was moved, move it
754 df = dirmove[d] + f[len(d):]
754 df = dirmove[d] + f[len(d):]
755 if df not in copy:
755 if df not in copy:
756 movewithdir[f] = df
756 movewithdir[f] = df
757 repo.ui.debug((" pending file src: '%s' -> "
757 repo.ui.debug((" pending file src: '%s' -> "
758 "dst: '%s'\n") % (f, df))
758 "dst: '%s'\n") % (f, df))
759 break
759 break
760
760
761 return copy, movewithdir, diverge, renamedelete, dirmove
761 return copy, movewithdir, diverge, renamedelete, dirmove
762
762
763 def _heuristicscopytracing(repo, c1, c2, base):
763 def _heuristicscopytracing(repo, c1, c2, base):
764 """ Fast copytracing using filename heuristics
764 """ Fast copytracing using filename heuristics
765
765
766 Assumes that moves or renames are of following two types:
766 Assumes that moves or renames are of following two types:
767
767
768 1) Inside a directory only (same directory name but different filenames)
768 1) Inside a directory only (same directory name but different filenames)
769 2) Move from one directory to another
769 2) Move from one directory to another
770 (same filenames but different directory names)
770 (same filenames but different directory names)
771
771
772 Works only when there are no merge commits in the "source branch".
772 Works only when there are no merge commits in the "source branch".
773 Source branch is commits from base up to c2 not including base.
773 Source branch is commits from base up to c2 not including base.
774
774
775 If merge is involved it fallbacks to _fullcopytracing().
775 If merge is involved it fallbacks to _fullcopytracing().
776
776
777 Can be used by setting the following config:
777 Can be used by setting the following config:
778
778
779 [experimental]
779 [experimental]
780 copytrace = heuristics
780 copytrace = heuristics
781
781
782 In some cases the copy/move candidates found by heuristics can be very large
782 In some cases the copy/move candidates found by heuristics can be very large
783 in number and that will make the algorithm slow. The number of possible
783 in number and that will make the algorithm slow. The number of possible
784 candidates to check can be limited by using the config
784 candidates to check can be limited by using the config
785 `experimental.copytrace.movecandidateslimit` which defaults to 100.
785 `experimental.copytrace.movecandidateslimit` which defaults to 100.
786 """
786 """
787
787
788 if c1.rev() is None:
788 if c1.rev() is None:
789 c1 = c1.p1()
789 c1 = c1.p1()
790 if c2.rev() is None:
790 if c2.rev() is None:
791 c2 = c2.p1()
791 c2 = c2.p1()
792
792
793 copies = {}
793 copies = {}
794
794
795 changedfiles = set()
795 changedfiles = set()
796 m1 = c1.manifest()
796 m1 = c1.manifest()
797 if not repo.revs('%d::%d', base.rev(), c2.rev()):
797 if not repo.revs('%d::%d', base.rev(), c2.rev()):
798 # If base is not in c2 branch, we switch to fullcopytracing
798 # If base is not in c2 branch, we switch to fullcopytracing
799 repo.ui.debug("switching to full copytracing as base is not "
799 repo.ui.debug("switching to full copytracing as base is not "
800 "an ancestor of c2\n")
800 "an ancestor of c2\n")
801 return _fullcopytracing(repo, c1, c2, base)
801 return _fullcopytracing(repo, c1, c2, base)
802
802
803 ctx = c2
803 ctx = c2
804 while ctx != base:
804 while ctx != base:
805 if len(ctx.parents()) == 2:
805 if len(ctx.parents()) == 2:
806 # To keep things simple let's not handle merges
806 # To keep things simple let's not handle merges
807 repo.ui.debug("switching to full copytracing because of merges\n")
807 repo.ui.debug("switching to full copytracing because of merges\n")
808 return _fullcopytracing(repo, c1, c2, base)
808 return _fullcopytracing(repo, c1, c2, base)
809 changedfiles.update(ctx.files())
809 changedfiles.update(ctx.files())
810 ctx = ctx.p1()
810 ctx = ctx.p1()
811
811
812 cp = _forwardcopies(base, c2)
812 cp = _forwardcopies(base, c2)
813 for dst, src in cp.iteritems():
813 for dst, src in cp.iteritems():
814 if src in m1:
814 if src in m1:
815 copies[dst] = src
815 copies[dst] = src
816
816
817 # file is missing if it isn't present in the destination, but is present in
817 # file is missing if it isn't present in the destination, but is present in
818 # the base and present in the source.
818 # the base and present in the source.
819 # Presence in the base is important to exclude added files, presence in the
819 # Presence in the base is important to exclude added files, presence in the
820 # source is important to exclude removed files.
820 # source is important to exclude removed files.
821 filt = lambda f: f not in m1 and f in base and f in c2
821 filt = lambda f: f not in m1 and f in base and f in c2
822 missingfiles = [f for f in changedfiles if filt(f)]
822 missingfiles = [f for f in changedfiles if filt(f)]
823
823
824 if missingfiles:
824 if missingfiles:
825 basenametofilename = collections.defaultdict(list)
825 basenametofilename = collections.defaultdict(list)
826 dirnametofilename = collections.defaultdict(list)
826 dirnametofilename = collections.defaultdict(list)
827
827
828 for f in m1.filesnotin(base.manifest()):
828 for f in m1.filesnotin(base.manifest()):
829 basename = os.path.basename(f)
829 basename = os.path.basename(f)
830 dirname = os.path.dirname(f)
830 dirname = os.path.dirname(f)
831 basenametofilename[basename].append(f)
831 basenametofilename[basename].append(f)
832 dirnametofilename[dirname].append(f)
832 dirnametofilename[dirname].append(f)
833
833
834 for f in missingfiles:
834 for f in missingfiles:
835 basename = os.path.basename(f)
835 basename = os.path.basename(f)
836 dirname = os.path.dirname(f)
836 dirname = os.path.dirname(f)
837 samebasename = basenametofilename[basename]
837 samebasename = basenametofilename[basename]
838 samedirname = dirnametofilename[dirname]
838 samedirname = dirnametofilename[dirname]
839 movecandidates = samebasename + samedirname
839 movecandidates = samebasename + samedirname
840 # f is guaranteed to be present in c2, that's why
840 # f is guaranteed to be present in c2, that's why
841 # c2.filectx(f) won't fail
841 # c2.filectx(f) won't fail
842 f2 = c2.filectx(f)
842 f2 = c2.filectx(f)
843 # we can have a lot of candidates which can slow down the heuristics
843 # we can have a lot of candidates which can slow down the heuristics
844 # config value to limit the number of candidates moves to check
844 # config value to limit the number of candidates moves to check
845 maxcandidates = repo.ui.configint('experimental',
845 maxcandidates = repo.ui.configint('experimental',
846 'copytrace.movecandidateslimit')
846 'copytrace.movecandidateslimit')
847
847
848 if len(movecandidates) > maxcandidates:
848 if len(movecandidates) > maxcandidates:
849 repo.ui.status(_("skipping copytracing for '%s', more "
849 repo.ui.status(_("skipping copytracing for '%s', more "
850 "candidates than the limit: %d\n")
850 "candidates than the limit: %d\n")
851 % (f, len(movecandidates)))
851 % (f, len(movecandidates)))
852 continue
852 continue
853
853
854 for candidate in movecandidates:
854 for candidate in movecandidates:
855 f1 = c1.filectx(candidate)
855 f1 = c1.filectx(candidate)
856 if _related(f1, f2):
856 if _related(f1, f2):
857 # if there are a few related copies then we'll merge
857 # if there are a few related copies then we'll merge
858 # changes into all of them. This matches the behaviour
858 # changes into all of them. This matches the behaviour
859 # of upstream copytracing
859 # of upstream copytracing
860 copies[candidate] = f
860 copies[candidate] = f
861
861
862 return copies, {}, {}, {}, {}
862 return copies, {}, {}, {}, {}
863
863
864 def _related(f1, f2):
864 def _related(f1, f2):
865 """return True if f1 and f2 filectx have a common ancestor
865 """return True if f1 and f2 filectx have a common ancestor
866
866
867 Walk back to common ancestor to see if the two files originate
867 Walk back to common ancestor to see if the two files originate
868 from the same file. Since workingfilectx's rev() is None it messes
868 from the same file. Since workingfilectx's rev() is None it messes
869 up the integer comparison logic, hence the pre-step check for
869 up the integer comparison logic, hence the pre-step check for
870 None (f1 and f2 can only be workingfilectx's initially).
870 None (f1 and f2 can only be workingfilectx's initially).
871 """
871 """
872
872
873 if f1 == f2:
873 if f1 == f2:
874 return True # a match
874 return True # a match
875
875
876 g1, g2 = f1.ancestors(), f2.ancestors()
876 g1, g2 = f1.ancestors(), f2.ancestors()
877 try:
877 try:
878 f1r, f2r = f1.linkrev(), f2.linkrev()
878 f1r, f2r = f1.linkrev(), f2.linkrev()
879
879
880 if f1r is None:
880 if f1r is None:
881 f1 = next(g1)
881 f1 = next(g1)
882 if f2r is None:
882 if f2r is None:
883 f2 = next(g2)
883 f2 = next(g2)
884
884
885 while True:
885 while True:
886 f1r, f2r = f1.linkrev(), f2.linkrev()
886 f1r, f2r = f1.linkrev(), f2.linkrev()
887 if f1r > f2r:
887 if f1r > f2r:
888 f1 = next(g1)
888 f1 = next(g1)
889 elif f2r > f1r:
889 elif f2r > f1r:
890 f2 = next(g2)
890 f2 = next(g2)
891 else: # f1 and f2 point to files in the same linkrev
891 else: # f1 and f2 point to files in the same linkrev
892 return f1 == f2 # true if they point to the same file
892 return f1 == f2 # true if they point to the same file
893 except StopIteration:
893 except StopIteration:
894 return False
894 return False
895
895
896 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
896 def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data):
897 """
897 """
898 check possible copies of f from msrc to mdst
898 check possible copies of f from msrc to mdst
899
899
900 srcctx = starting context for f in msrc
900 srcctx = starting context for f in msrc
901 dstctx = destination context for f in mdst
901 dstctx = destination context for f in mdst
902 f = the filename to check (as in msrc)
902 f = the filename to check (as in msrc)
903 base = the changectx used as a merge base
903 base = the changectx used as a merge base
904 tca = topological common ancestor for graft-like scenarios
904 tca = topological common ancestor for graft-like scenarios
905 remotebase = True if base is outside tca::srcctx, False otherwise
905 remotebase = True if base is outside tca::srcctx, False otherwise
906 limit = the rev number to not search beyond
906 limit = the rev number to not search beyond
907 data = dictionary of dictionary to store copy data. (see mergecopies)
907 data = dictionary of dictionary to store copy data. (see mergecopies)
908
908
909 note: limit is only an optimization, and provides no guarantee that
909 note: limit is only an optimization, and provides no guarantee that
910 irrelevant revisions will not be visited
910 irrelevant revisions will not be visited
911 there is no easy way to make this algorithm stop in a guaranteed way
911 there is no easy way to make this algorithm stop in a guaranteed way
912 once it "goes behind a certain revision".
912 once it "goes behind a certain revision".
913 """
913 """
914
914
915 msrc = srcctx.manifest()
915 msrc = srcctx.manifest()
916 mdst = dstctx.manifest()
916 mdst = dstctx.manifest()
917 mb = base.manifest()
917 mb = base.manifest()
918 mta = tca.manifest()
918 mta = tca.manifest()
919 # Might be true if this call is about finding backward renames,
919 # Might be true if this call is about finding backward renames,
920 # This happens in the case of grafts because the DAG is then rotated.
920 # This happens in the case of grafts because the DAG is then rotated.
921 # If the file exists in both the base and the source, we are not looking
921 # If the file exists in both the base and the source, we are not looking
922 # for a rename on the source side, but on the part of the DAG that is
922 # for a rename on the source side, but on the part of the DAG that is
923 # traversed backwards.
923 # traversed backwards.
924 #
924 #
925 # In the case there is both backward and forward renames (before and after
925 # In the case there is both backward and forward renames (before and after
926 # the base) this is more complicated as we must detect a divergence.
926 # the base) this is more complicated as we must detect a divergence.
927 # We use 'backwards = False' in that case.
927 # We use 'backwards = False' in that case.
928 backwards = not remotebase and base != tca and f in mb
928 backwards = not remotebase and base != tca and f in mb
929 getsrcfctx = _makegetfctx(srcctx)
929 getsrcfctx = _makegetfctx(srcctx)
930 getdstfctx = _makegetfctx(dstctx)
930 getdstfctx = _makegetfctx(dstctx)
931
931
932 if msrc[f] == mb.get(f) and not remotebase:
932 if msrc[f] == mb.get(f) and not remotebase:
933 # Nothing to merge
933 # Nothing to merge
934 return
934 return
935
935
936 of = None
936 of = None
937 seen = {f}
937 seen = {f}
938 for oc in getsrcfctx(f, msrc[f]).ancestors():
938 for oc in getsrcfctx(f, msrc[f]).ancestors():
939 of = oc.path()
939 of = oc.path()
940 if of in seen:
940 if of in seen:
941 # check limit late - grab last rename before
941 # check limit late - grab last rename before
942 if oc.linkrev() < limit:
942 if oc.linkrev() < limit:
943 break
943 break
944 continue
944 continue
945 seen.add(of)
945 seen.add(of)
946
946
947 # remember for dir rename detection
947 # remember for dir rename detection
948 if backwards:
948 if backwards:
949 data['fullcopy'][of] = f # grafting backwards through renames
949 data['fullcopy'][of] = f # grafting backwards through renames
950 else:
950 else:
951 data['fullcopy'][f] = of
951 data['fullcopy'][f] = of
952 if of not in mdst:
952 if of not in mdst:
953 continue # no match, keep looking
953 continue # no match, keep looking
954 if mdst[of] == mb.get(of):
954 if mdst[of] == mb.get(of):
955 return # no merge needed, quit early
955 return # no merge needed, quit early
956 c2 = getdstfctx(of, mdst[of])
956 c2 = getdstfctx(of, mdst[of])
957 # c2 might be a plain new file on added on destination side that is
957 # c2 might be a plain new file on added on destination side that is
958 # unrelated to the droids we are looking for.
958 # unrelated to the droids we are looking for.
959 cr = _related(oc, c2)
959 cr = _related(oc, c2)
960 if cr and (of == f or of == c2.path()): # non-divergent
960 if cr and (of == f or of == c2.path()): # non-divergent
961 if backwards:
961 if backwards:
962 data['copy'][of] = f
962 data['copy'][of] = f
963 elif of in mb:
963 elif of in mb:
964 data['copy'][f] = of
964 data['copy'][f] = of
965 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
965 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
966 data['copy'][of] = f
966 data['copy'][of] = f
967 del data['fullcopy'][f]
967 del data['fullcopy'][f]
968 data['fullcopy'][of] = f
968 data['fullcopy'][of] = f
969 else: # divergence w.r.t. graft CA on one side of topological CA
969 else: # divergence w.r.t. graft CA on one side of topological CA
970 for sf in seen:
970 for sf in seen:
971 if sf in mb:
971 if sf in mb:
972 assert sf not in data['diverge']
972 assert sf not in data['diverge']
973 data['diverge'][sf] = [f, of]
973 data['diverge'][sf] = [f, of]
974 break
974 break
975 return
975 return
976
976
977 if of in mta:
977 if of in mta:
978 if backwards or remotebase:
978 if backwards or remotebase:
979 data['incomplete'][of] = f
979 data['incomplete'][of] = f
980 else:
980 else:
981 for sf in seen:
981 for sf in seen:
982 if sf in mb:
982 if sf in mb:
983 if tca == base:
983 if tca == base:
984 data['diverge'].setdefault(sf, []).append(f)
984 data['diverge'].setdefault(sf, []).append(f)
985 else:
985 else:
986 data['incompletediverge'][sf] = [of, f]
986 data['incompletediverge'][sf] = [of, f]
987 return
987 return
988
988
989 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
989 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
990 """reproduce copies from fromrev to rev in the dirstate
990 """reproduce copies from fromrev to rev in the dirstate
991
991
992 If skiprev is specified, it's a revision that should be used to
992 If skiprev is specified, it's a revision that should be used to
993 filter copy records. Any copies that occur between fromrev and
993 filter copy records. Any copies that occur between fromrev and
994 skiprev will not be duplicated, even if they appear in the set of
994 skiprev will not be duplicated, even if they appear in the set of
995 copies between fromrev and rev.
995 copies between fromrev and rev.
996 """
996 """
997 exclude = {}
997 exclude = {}
998 ctraceconfig = repo.ui.config('experimental', 'copytrace')
998 ctraceconfig = repo.ui.config('experimental', 'copytrace')
999 bctrace = stringutil.parsebool(ctraceconfig)
999 bctrace = stringutil.parsebool(ctraceconfig)
1000 if (skiprev is not None and
1000 if (skiprev is not None and
1001 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
1001 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
1002 # copytrace='off' skips this line, but not the entire function because
1002 # copytrace='off' skips this line, but not the entire function because
1003 # the line below is O(size of the repo) during a rebase, while the rest
1003 # the line below is O(size of the repo) during a rebase, while the rest
1004 # of the function is much faster (and is required for carrying copy
1004 # of the function is much faster (and is required for carrying copy
1005 # metadata across the rebase anyway).
1005 # metadata across the rebase anyway).
1006 exclude = pathcopies(repo[fromrev], repo[skiprev])
1006 exclude = pathcopies(repo[fromrev], repo[skiprev])
1007 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
1007 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
1008 # copies.pathcopies returns backward renames, so dst might not
1008 # copies.pathcopies returns backward renames, so dst might not
1009 # actually be in the dirstate
1009 # actually be in the dirstate
1010 if dst in exclude:
1010 if dst in exclude:
1011 continue
1011 continue
1012 wctx[dst].markcopied(src)
1012 wctx[dst].markcopied(src)
@@ -1,105 +1,135 b''
1
1
2 $ cat >> $HGRCPATH << EOF
2 $ cat >> $HGRCPATH << EOF
3 > [experimental]
3 > [experimental]
4 > copies.write-to=changeset-only
4 > copies.write-to=changeset-only
5 > copies.read-from=changeset-only
5 > [alias]
6 > [alias]
6 > changesetcopies = log -r . -T 'files: {files}
7 > changesetcopies = log -r . -T 'files: {files}
7 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
8 > {extras % "{ifcontains("copies", key, "{key}: {value}\n")}"}'
9 > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}'
8 > EOF
10 > EOF
9
11
10 Check that copies are recorded correctly
12 Check that copies are recorded correctly
11
13
12 $ hg init repo
14 $ hg init repo
13 $ cd repo
15 $ cd repo
14 $ echo a > a
16 $ echo a > a
15 $ hg add a
17 $ hg add a
16 $ hg ci -m initial
18 $ hg ci -m initial
17 $ hg cp a b
19 $ hg cp a b
18 $ hg cp a c
20 $ hg cp a c
19 $ hg cp a d
21 $ hg cp a d
20 $ hg ci -m 'copy a to b, c, and d'
22 $ hg ci -m 'copy a to b, c, and d'
21 $ hg changesetcopies
23 $ hg changesetcopies
22 files: b c d
24 files: b c d
23 p1copies: b\x00a (esc)
25 p1copies: b\x00a (esc)
24 c\x00a (esc)
26 c\x00a (esc)
25 d\x00a (esc)
27 d\x00a (esc)
28 $ hg showcopies
29 a -> b
30 a -> c
31 a -> d
32 $ hg showcopies --config experimental.copies.read-from=compatibility
33 a -> b
34 a -> c
35 a -> d
36 $ hg showcopies --config experimental.copies.read-from=filelog-only
26
37
27 Check that renames are recorded correctly
38 Check that renames are recorded correctly
28
39
29 $ hg mv b b2
40 $ hg mv b b2
30 $ hg ci -m 'rename b to b2'
41 $ hg ci -m 'rename b to b2'
31 $ hg changesetcopies
42 $ hg changesetcopies
32 files: b b2
43 files: b b2
33 p1copies: b2\x00b (esc)
44 p1copies: b2\x00b (esc)
45 $ hg showcopies
46 b -> b2
34
47
35 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
48 Rename onto existing file. This should get recorded in the changeset files list and in the extras,
36 even though there is no filelog entry.
49 even though there is no filelog entry.
37
50
38 $ hg cp b2 c --force
51 $ hg cp b2 c --force
39 $ hg st --copies
52 $ hg st --copies
40 M c
53 M c
41 b2
54 b2
42 $ hg debugindex c
55 $ hg debugindex c
43 rev linkrev nodeid p1 p2
56 rev linkrev nodeid p1 p2
44 0 1 b789fdd96dc2 000000000000 000000000000
57 0 1 b789fdd96dc2 000000000000 000000000000
45 $ hg ci -m 'move b onto d'
58 $ hg ci -m 'move b onto d'
46 $ hg changesetcopies
59 $ hg changesetcopies
47 files: c
60 files: c
48 p1copies: c\x00b2 (esc)
61 p1copies: c\x00b2 (esc)
62 $ hg showcopies
63 b2 -> c
49 $ hg debugindex c
64 $ hg debugindex c
50 rev linkrev nodeid p1 p2
65 rev linkrev nodeid p1 p2
51 0 1 b789fdd96dc2 000000000000 000000000000
66 0 1 b789fdd96dc2 000000000000 000000000000
52
67
53 Create a merge commit with copying done during merge.
68 Create a merge commit with copying done during merge.
54
69
55 $ hg co 0
70 $ hg co 0
56 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
71 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
57 $ hg cp a e
72 $ hg cp a e
58 $ hg cp a f
73 $ hg cp a f
59 $ hg ci -m 'copy a to e and f'
74 $ hg ci -m 'copy a to e and f'
60 created new head
75 created new head
61 $ hg merge 3
76 $ hg merge 3
62 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 (branch merge, don't forget to commit)
78 (branch merge, don't forget to commit)
64 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
79 File 'a' exists on both sides, so 'g' could be recorded as being from p1 or p2, but we currently
65 always record it as being from p1
80 always record it as being from p1
66 $ hg cp a g
81 $ hg cp a g
67 File 'd' exists only in p2, so 'h' should be from p2
82 File 'd' exists only in p2, so 'h' should be from p2
68 $ hg cp d h
83 $ hg cp d h
69 File 'f' exists only in p1, so 'i' should be from p1
84 File 'f' exists only in p1, so 'i' should be from p1
70 $ hg cp f i
85 $ hg cp f i
71 $ hg ci -m 'merge'
86 $ hg ci -m 'merge'
72 $ hg changesetcopies
87 $ hg changesetcopies
73 files: g h i
88 files: g h i
74 p1copies: g\x00a (esc)
89 p1copies: g\x00a (esc)
75 i\x00f (esc)
90 i\x00f (esc)
76 p2copies: h\x00d (esc)
91 p2copies: h\x00d (esc)
92 $ hg showcopies
93 a -> g
94 d -> h
95 f -> i
77
96
78 Test writing to both changeset and filelog
97 Test writing to both changeset and filelog
79
98
80 $ hg cp a j
99 $ hg cp a j
81 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
100 $ hg ci -m 'copy a to j' --config experimental.copies.write-to=compatibility
82 $ hg changesetcopies
101 $ hg changesetcopies
83 files: j
102 files: j
84 p1copies: j\x00a (esc)
103 p1copies: j\x00a (esc)
85 $ hg debugdata j 0
104 $ hg debugdata j 0
86 \x01 (esc)
105 \x01 (esc)
87 copy: a
106 copy: a
88 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
107 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
89 \x01 (esc)
108 \x01 (esc)
90 a
109 a
110 $ hg showcopies
111 a -> j
112 $ hg showcopies --config experimental.copies.read-from=compatibility
113 a -> j
114 $ hg showcopies --config experimental.copies.read-from=filelog-only
115 a -> j
91
116
92 Test writing only to filelog
117 Test writing only to filelog
93
118
94 $ hg cp a k
119 $ hg cp a k
95 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
120 $ hg ci -m 'copy a to k' --config experimental.copies.write-to=filelog-only
96 $ hg changesetcopies
121 $ hg changesetcopies
97 files: k
122 files: k
98 $ hg debugdata k 0
123 $ hg debugdata k 0
99 \x01 (esc)
124 \x01 (esc)
100 copy: a
125 copy: a
101 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
126 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
102 \x01 (esc)
127 \x01 (esc)
103 a
128 a
129 $ hg showcopies
130 $ hg showcopies --config experimental.copies.read-from=compatibility
131 a -> k
132 $ hg showcopies --config experimental.copies.read-from=filelog-only
133 a -> k
104
134
105 $ cd ..
135 $ cd ..
@@ -1,633 +1,648 b''
1 #testcases filelog compatibility
1 #testcases filelog compatibility changeset
2
2
3 $ cat >> $HGRCPATH << EOF
3 $ cat >> $HGRCPATH << EOF
4 > [extensions]
4 > [extensions]
5 > rebase=
5 > rebase=
6 > [alias]
6 > [alias]
7 > l = log -G -T '{rev} {desc}\n{files}\n'
7 > l = log -G -T '{rev} {desc}\n{files}\n'
8 > EOF
8 > EOF
9
9
10 #if compatibility
10 #if compatibility
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > copies.read-from = compatibility
13 > copies.read-from = compatibility
14 > EOF
14 > EOF
15 #endif
15 #endif
16
16
17 #if changeset
18 $ cat >> $HGRCPATH << EOF
19 > [experimental]
20 > copies.read-from = changeset-only
21 > copies.write-to = changeset-only
22 > EOF
23 #endif
24
17 $ REPONUM=0
25 $ REPONUM=0
18 $ newrepo() {
26 $ newrepo() {
19 > cd $TESTTMP
27 > cd $TESTTMP
20 > REPONUM=`expr $REPONUM + 1`
28 > REPONUM=`expr $REPONUM + 1`
21 > hg init repo-$REPONUM
29 > hg init repo-$REPONUM
22 > cd repo-$REPONUM
30 > cd repo-$REPONUM
23 > }
31 > }
24
32
25 Simple rename case
33 Simple rename case
26 $ newrepo
34 $ newrepo
27 $ echo x > x
35 $ echo x > x
28 $ hg ci -Aqm 'add x'
36 $ hg ci -Aqm 'add x'
29 $ hg mv x y
37 $ hg mv x y
30 $ hg debugp1copies
38 $ hg debugp1copies
31 x -> y
39 x -> y
32 $ hg debugp2copies
40 $ hg debugp2copies
33 $ hg ci -m 'rename x to y'
41 $ hg ci -m 'rename x to y'
34 $ hg l
42 $ hg l
35 @ 1 rename x to y
43 @ 1 rename x to y
36 | x y
44 | x y
37 o 0 add x
45 o 0 add x
38 x
46 x
39 $ hg debugp1copies -r 1
47 $ hg debugp1copies -r 1
40 x -> y
48 x -> y
41 $ hg debugpathcopies 0 1
49 $ hg debugpathcopies 0 1
42 x -> y
50 x -> y
43 $ hg debugpathcopies 1 0
51 $ hg debugpathcopies 1 0
44 y -> x
52 y -> x
45 Test filtering copies by path. We do filtering by destination.
53 Test filtering copies by path. We do filtering by destination.
46 $ hg debugpathcopies 0 1 x
54 $ hg debugpathcopies 0 1 x
47 $ hg debugpathcopies 1 0 x
55 $ hg debugpathcopies 1 0 x
48 y -> x
56 y -> x
49 $ hg debugpathcopies 0 1 y
57 $ hg debugpathcopies 0 1 y
50 x -> y
58 x -> y
51 $ hg debugpathcopies 1 0 y
59 $ hg debugpathcopies 1 0 y
52
60
53 Copy a file onto another file
61 Copy a file onto another file
54 $ newrepo
62 $ newrepo
55 $ echo x > x
63 $ echo x > x
56 $ echo y > y
64 $ echo y > y
57 $ hg ci -Aqm 'add x and y'
65 $ hg ci -Aqm 'add x and y'
58 $ hg cp -f x y
66 $ hg cp -f x y
59 $ hg debugp1copies
67 $ hg debugp1copies
60 x -> y
68 x -> y
61 $ hg debugp2copies
69 $ hg debugp2copies
62 $ hg ci -m 'copy x onto y'
70 $ hg ci -m 'copy x onto y'
63 $ hg l
71 $ hg l
64 @ 1 copy x onto y
72 @ 1 copy x onto y
65 | y
73 | y
66 o 0 add x and y
74 o 0 add x and y
67 x y
75 x y
68 $ hg debugp1copies -r 1
76 $ hg debugp1copies -r 1
69 x -> y
77 x -> y
70 Incorrectly doesn't show the rename
78 Incorrectly doesn't show the rename
71 $ hg debugpathcopies 0 1
79 $ hg debugpathcopies 0 1
72
80
73 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
81 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
74 produce a new filelog entry. The changeset's "files" entry should still list the file.
82 produce a new filelog entry. The changeset's "files" entry should still list the file.
75 $ newrepo
83 $ newrepo
76 $ echo x > x
84 $ echo x > x
77 $ echo x > x2
85 $ echo x > x2
78 $ hg ci -Aqm 'add x and x2 with same content'
86 $ hg ci -Aqm 'add x and x2 with same content'
79 $ hg cp -f x x2
87 $ hg cp -f x x2
80 $ hg ci -m 'copy x onto x2'
88 $ hg ci -m 'copy x onto x2'
81 $ hg l
89 $ hg l
82 @ 1 copy x onto x2
90 @ 1 copy x onto x2
83 | x2
91 | x2
84 o 0 add x and x2 with same content
92 o 0 add x and x2 with same content
85 x x2
93 x x2
86 $ hg debugp1copies -r 1
94 $ hg debugp1copies -r 1
87 x -> x2
95 x -> x2
88 Incorrectly doesn't show the rename
96 Incorrectly doesn't show the rename
89 $ hg debugpathcopies 0 1
97 $ hg debugpathcopies 0 1
90
98
91 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
99 Copy a file, then delete destination, then copy again. This does not create a new filelog entry.
92 $ newrepo
100 $ newrepo
93 $ echo x > x
101 $ echo x > x
94 $ hg ci -Aqm 'add x'
102 $ hg ci -Aqm 'add x'
95 $ hg cp x y
103 $ hg cp x y
96 $ hg ci -m 'copy x to y'
104 $ hg ci -m 'copy x to y'
97 $ hg rm y
105 $ hg rm y
98 $ hg ci -m 'remove y'
106 $ hg ci -m 'remove y'
99 $ hg cp -f x y
107 $ hg cp -f x y
100 $ hg ci -m 'copy x onto y (again)'
108 $ hg ci -m 'copy x onto y (again)'
101 $ hg l
109 $ hg l
102 @ 3 copy x onto y (again)
110 @ 3 copy x onto y (again)
103 | y
111 | y
104 o 2 remove y
112 o 2 remove y
105 | y
113 | y
106 o 1 copy x to y
114 o 1 copy x to y
107 | y
115 | y
108 o 0 add x
116 o 0 add x
109 x
117 x
110 $ hg debugp1copies -r 3
118 $ hg debugp1copies -r 3
111 x -> y
119 x -> y
112 $ hg debugpathcopies 0 3
120 $ hg debugpathcopies 0 3
113 x -> y
121 x -> y
114
122
115 Rename file in a loop: x->y->z->x
123 Rename file in a loop: x->y->z->x
116 $ newrepo
124 $ newrepo
117 $ echo x > x
125 $ echo x > x
118 $ hg ci -Aqm 'add x'
126 $ hg ci -Aqm 'add x'
119 $ hg mv x y
127 $ hg mv x y
120 $ hg debugp1copies
128 $ hg debugp1copies
121 x -> y
129 x -> y
122 $ hg debugp2copies
130 $ hg debugp2copies
123 $ hg ci -m 'rename x to y'
131 $ hg ci -m 'rename x to y'
124 $ hg mv y z
132 $ hg mv y z
125 $ hg ci -m 'rename y to z'
133 $ hg ci -m 'rename y to z'
126 $ hg mv z x
134 $ hg mv z x
127 $ hg ci -m 'rename z to x'
135 $ hg ci -m 'rename z to x'
128 $ hg l
136 $ hg l
129 @ 3 rename z to x
137 @ 3 rename z to x
130 | x z
138 | x z
131 o 2 rename y to z
139 o 2 rename y to z
132 | y z
140 | y z
133 o 1 rename x to y
141 o 1 rename x to y
134 | x y
142 | x y
135 o 0 add x
143 o 0 add x
136 x
144 x
137 $ hg debugpathcopies 0 3
145 $ hg debugpathcopies 0 3
138
146
139 Copy x to y, then remove y, then add back y. With copy metadata in the changeset, this could easily
147 Copy x to y, then remove y, then add back y. With copy metadata in the changeset, this could easily
140 end up reporting y as copied from x (if we don't unmark it as a copy when it's removed).
148 end up reporting y as copied from x (if we don't unmark it as a copy when it's removed).
141 $ newrepo
149 $ newrepo
142 $ echo x > x
150 $ echo x > x
143 $ hg ci -Aqm 'add x'
151 $ hg ci -Aqm 'add x'
144 $ hg mv x y
152 $ hg mv x y
145 $ hg ci -m 'rename x to y'
153 $ hg ci -m 'rename x to y'
146 $ hg rm y
154 $ hg rm y
147 $ hg ci -qm 'remove y'
155 $ hg ci -qm 'remove y'
148 $ echo x > y
156 $ echo x > y
149 $ hg ci -Aqm 'add back y'
157 $ hg ci -Aqm 'add back y'
150 $ hg l
158 $ hg l
151 @ 3 add back y
159 @ 3 add back y
152 | y
160 | y
153 o 2 remove y
161 o 2 remove y
154 | y
162 | y
155 o 1 rename x to y
163 o 1 rename x to y
156 | x y
164 | x y
157 o 0 add x
165 o 0 add x
158 x
166 x
159 $ hg debugp1copies -r 3
167 $ hg debugp1copies -r 3
160 $ hg debugpathcopies 0 3
168 $ hg debugpathcopies 0 3
161
169
162 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
170 Copy x to z, then remove z, then copy x2 (same content as x) to z. With copy metadata in the
163 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
171 changeset, the two copies here will have the same filelog entry, so ctx['z'].introrev() might point
164 to the first commit that added the file. We should still report the copy as being from x2.
172 to the first commit that added the file. We should still report the copy as being from x2.
165 $ newrepo
173 $ newrepo
166 $ echo x > x
174 $ echo x > x
167 $ echo x > x2
175 $ echo x > x2
168 $ hg ci -Aqm 'add x and x2 with same content'
176 $ hg ci -Aqm 'add x and x2 with same content'
169 $ hg cp x z
177 $ hg cp x z
170 $ hg ci -qm 'copy x to z'
178 $ hg ci -qm 'copy x to z'
171 $ hg rm z
179 $ hg rm z
172 $ hg ci -m 'remove z'
180 $ hg ci -m 'remove z'
173 $ hg cp x2 z
181 $ hg cp x2 z
174 $ hg ci -m 'copy x2 to z'
182 $ hg ci -m 'copy x2 to z'
175 $ hg l
183 $ hg l
176 @ 3 copy x2 to z
184 @ 3 copy x2 to z
177 | z
185 | z
178 o 2 remove z
186 o 2 remove z
179 | z
187 | z
180 o 1 copy x to z
188 o 1 copy x to z
181 | z
189 | z
182 o 0 add x and x2 with same content
190 o 0 add x and x2 with same content
183 x x2
191 x x2
184 $ hg debugp1copies -r 3
192 $ hg debugp1copies -r 3
185 x2 -> z
193 x2 -> z
186 $ hg debugpathcopies 0 3
194 $ hg debugpathcopies 0 3
187 x2 -> z
195 x2 -> z
188
196
189 Create x and y, then rename them both to the same name, but on different sides of a fork
197 Create x and y, then rename them both to the same name, but on different sides of a fork
190 $ newrepo
198 $ newrepo
191 $ echo x > x
199 $ echo x > x
192 $ echo y > y
200 $ echo y > y
193 $ hg ci -Aqm 'add x and y'
201 $ hg ci -Aqm 'add x and y'
194 $ hg mv x z
202 $ hg mv x z
195 $ hg ci -qm 'rename x to z'
203 $ hg ci -qm 'rename x to z'
196 $ hg co -q 0
204 $ hg co -q 0
197 $ hg mv y z
205 $ hg mv y z
198 $ hg ci -qm 'rename y to z'
206 $ hg ci -qm 'rename y to z'
199 $ hg l
207 $ hg l
200 @ 2 rename y to z
208 @ 2 rename y to z
201 | y z
209 | y z
202 | o 1 rename x to z
210 | o 1 rename x to z
203 |/ x z
211 |/ x z
204 o 0 add x and y
212 o 0 add x and y
205 x y
213 x y
206 $ hg debugpathcopies 1 2
214 $ hg debugpathcopies 1 2
207 z -> x
215 z -> x
208 y -> z
216 y -> z
209
217
210 Fork renames x to y on one side and removes x on the other
218 Fork renames x to y on one side and removes x on the other
211 $ newrepo
219 $ newrepo
212 $ echo x > x
220 $ echo x > x
213 $ hg ci -Aqm 'add x'
221 $ hg ci -Aqm 'add x'
214 $ hg mv x y
222 $ hg mv x y
215 $ hg ci -m 'rename x to y'
223 $ hg ci -m 'rename x to y'
216 $ hg co -q 0
224 $ hg co -q 0
217 $ hg rm x
225 $ hg rm x
218 $ hg ci -m 'remove x'
226 $ hg ci -m 'remove x'
219 created new head
227 created new head
220 $ hg l
228 $ hg l
221 @ 2 remove x
229 @ 2 remove x
222 | x
230 | x
223 | o 1 rename x to y
231 | o 1 rename x to y
224 |/ x y
232 |/ x y
225 o 0 add x
233 o 0 add x
226 x
234 x
227 $ hg debugpathcopies 1 2
235 $ hg debugpathcopies 1 2
228
236
229 Copies via null revision (there shouldn't be any)
237 Copies via null revision (there shouldn't be any)
230 $ newrepo
238 $ newrepo
231 $ echo x > x
239 $ echo x > x
232 $ hg ci -Aqm 'add x'
240 $ hg ci -Aqm 'add x'
233 $ hg cp x y
241 $ hg cp x y
234 $ hg ci -m 'copy x to y'
242 $ hg ci -m 'copy x to y'
235 $ hg co -q null
243 $ hg co -q null
236 $ echo x > x
244 $ echo x > x
237 $ hg ci -Aqm 'add x (again)'
245 $ hg ci -Aqm 'add x (again)'
238 $ hg l
246 $ hg l
239 @ 2 add x (again)
247 @ 2 add x (again)
240 x
248 x
241 o 1 copy x to y
249 o 1 copy x to y
242 | y
250 | y
243 o 0 add x
251 o 0 add x
244 x
252 x
245 $ hg debugpathcopies 1 2
253 $ hg debugpathcopies 1 2
246 $ hg debugpathcopies 2 1
254 $ hg debugpathcopies 2 1
247
255
248 Merge rename from other branch
256 Merge rename from other branch
249 $ newrepo
257 $ newrepo
250 $ echo x > x
258 $ echo x > x
251 $ hg ci -Aqm 'add x'
259 $ hg ci -Aqm 'add x'
252 $ hg mv x y
260 $ hg mv x y
253 $ hg ci -m 'rename x to y'
261 $ hg ci -m 'rename x to y'
254 $ hg co -q 0
262 $ hg co -q 0
255 $ echo z > z
263 $ echo z > z
256 $ hg ci -Aqm 'add z'
264 $ hg ci -Aqm 'add z'
257 $ hg merge -q 1
265 $ hg merge -q 1
258 $ hg debugp1copies
266 $ hg debugp1copies
259 $ hg debugp2copies
267 $ hg debugp2copies
260 $ hg ci -m 'merge rename from p2'
268 $ hg ci -m 'merge rename from p2'
261 $ hg l
269 $ hg l
262 @ 3 merge rename from p2
270 @ 3 merge rename from p2
263 |\ x
271 |\ x
264 | o 2 add z
272 | o 2 add z
265 | | z
273 | | z
266 o | 1 rename x to y
274 o | 1 rename x to y
267 |/ x y
275 |/ x y
268 o 0 add x
276 o 0 add x
269 x
277 x
270 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
278 Perhaps we should indicate the rename here, but `hg status` is documented to be weird during
271 merges, so...
279 merges, so...
272 $ hg debugp1copies -r 3
280 $ hg debugp1copies -r 3
273 $ hg debugp2copies -r 3
281 $ hg debugp2copies -r 3
274 $ hg debugpathcopies 0 3
282 $ hg debugpathcopies 0 3
275 x -> y
283 x -> y
276 $ hg debugpathcopies 1 2
284 $ hg debugpathcopies 1 2
277 y -> x
285 y -> x
278 $ hg debugpathcopies 1 3
286 $ hg debugpathcopies 1 3
279 $ hg debugpathcopies 2 3
287 $ hg debugpathcopies 2 3
280 x -> y
288 x -> y
281
289
282 Copy file from either side in a merge
290 Copy file from either side in a merge
283 $ newrepo
291 $ newrepo
284 $ echo x > x
292 $ echo x > x
285 $ hg ci -Aqm 'add x'
293 $ hg ci -Aqm 'add x'
286 $ hg co -q null
294 $ hg co -q null
287 $ echo y > y
295 $ echo y > y
288 $ hg ci -Aqm 'add y'
296 $ hg ci -Aqm 'add y'
289 $ hg merge -q 0
297 $ hg merge -q 0
290 $ hg cp y z
298 $ hg cp y z
291 $ hg debugp1copies
299 $ hg debugp1copies
292 y -> z
300 y -> z
293 $ hg debugp2copies
301 $ hg debugp2copies
294 $ hg ci -m 'copy file from p1 in merge'
302 $ hg ci -m 'copy file from p1 in merge'
295 $ hg co -q 1
303 $ hg co -q 1
296 $ hg merge -q 0
304 $ hg merge -q 0
297 $ hg cp x z
305 $ hg cp x z
298 $ hg debugp1copies
306 $ hg debugp1copies
299 $ hg debugp2copies
307 $ hg debugp2copies
300 x -> z
308 x -> z
301 $ hg ci -qm 'copy file from p2 in merge'
309 $ hg ci -qm 'copy file from p2 in merge'
302 $ hg l
310 $ hg l
303 @ 3 copy file from p2 in merge
311 @ 3 copy file from p2 in merge
304 |\ z
312 |\ z
305 +---o 2 copy file from p1 in merge
313 +---o 2 copy file from p1 in merge
306 | |/ z
314 | |/ z
307 | o 1 add y
315 | o 1 add y
308 | y
316 | y
309 o 0 add x
317 o 0 add x
310 x
318 x
311 $ hg debugp1copies -r 2
319 $ hg debugp1copies -r 2
312 y -> z
320 y -> z
313 $ hg debugp2copies -r 2
321 $ hg debugp2copies -r 2
314 $ hg debugpathcopies 1 2
322 $ hg debugpathcopies 1 2
315 y -> z
323 y -> z
316 $ hg debugpathcopies 0 2
324 $ hg debugpathcopies 0 2
317 $ hg debugp1copies -r 3
325 $ hg debugp1copies -r 3
318 $ hg debugp2copies -r 3
326 $ hg debugp2copies -r 3
319 x -> z
327 x -> z
320 $ hg debugpathcopies 1 3
328 $ hg debugpathcopies 1 3
321 $ hg debugpathcopies 0 3
329 $ hg debugpathcopies 0 3
322 x -> z
330 x -> z
323
331
324 Copy file that exists on both sides of the merge, same content on both sides
332 Copy file that exists on both sides of the merge, same content on both sides
325 $ newrepo
333 $ newrepo
326 $ echo x > x
334 $ echo x > x
327 $ hg ci -Aqm 'add x on branch 1'
335 $ hg ci -Aqm 'add x on branch 1'
328 $ hg co -q null
336 $ hg co -q null
329 $ echo x > x
337 $ echo x > x
330 $ hg ci -Aqm 'add x on branch 2'
338 $ hg ci -Aqm 'add x on branch 2'
331 $ hg merge -q 0
339 $ hg merge -q 0
332 $ hg cp x z
340 $ hg cp x z
333 $ hg debugp1copies
341 $ hg debugp1copies
334 x -> z
342 x -> z
335 $ hg debugp2copies
343 $ hg debugp2copies
336 $ hg ci -qm 'merge'
344 $ hg ci -qm 'merge'
337 $ hg l
345 $ hg l
338 @ 2 merge
346 @ 2 merge
339 |\ z
347 |\ z
340 | o 1 add x on branch 2
348 | o 1 add x on branch 2
341 | x
349 | x
342 o 0 add x on branch 1
350 o 0 add x on branch 1
343 x
351 x
344 $ hg debugp1copies -r 2
352 $ hg debugp1copies -r 2
345 x -> z
353 x -> z
346 $ hg debugp2copies -r 2
354 $ hg debugp2copies -r 2
347 It's a little weird that it shows up on both sides
355 It's a little weird that it shows up on both sides
348 $ hg debugpathcopies 1 2
356 $ hg debugpathcopies 1 2
349 x -> z
357 x -> z
350 $ hg debugpathcopies 0 2
358 $ hg debugpathcopies 0 2
351 x -> z (filelog !)
359 x -> z (filelog !)
352
360
353 Copy file that exists on both sides of the merge, different content
361 Copy file that exists on both sides of the merge, different content
354 $ newrepo
362 $ newrepo
355 $ echo branch1 > x
363 $ echo branch1 > x
356 $ hg ci -Aqm 'add x on branch 1'
364 $ hg ci -Aqm 'add x on branch 1'
357 $ hg co -q null
365 $ hg co -q null
358 $ echo branch2 > x
366 $ echo branch2 > x
359 $ hg ci -Aqm 'add x on branch 2'
367 $ hg ci -Aqm 'add x on branch 2'
360 $ hg merge -q 0
368 $ hg merge -q 0
361 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
369 warning: conflicts while merging x! (edit, then use 'hg resolve --mark')
362 [1]
370 [1]
363 $ echo resolved > x
371 $ echo resolved > x
364 $ hg resolve -m x
372 $ hg resolve -m x
365 (no more unresolved files)
373 (no more unresolved files)
366 $ hg cp x z
374 $ hg cp x z
367 $ hg debugp1copies
375 $ hg debugp1copies
368 x -> z
376 x -> z
369 $ hg debugp2copies
377 $ hg debugp2copies
370 $ hg ci -qm 'merge'
378 $ hg ci -qm 'merge'
371 $ hg l
379 $ hg l
372 @ 2 merge
380 @ 2 merge
373 |\ x z
381 |\ x z
374 | o 1 add x on branch 2
382 | o 1 add x on branch 2
375 | x
383 | x
376 o 0 add x on branch 1
384 o 0 add x on branch 1
377 x
385 x
378 $ hg debugp1copies -r 2
386 $ hg debugp1copies -r 2
387 x -> z (changeset !)
379 $ hg debugp2copies -r 2
388 $ hg debugp2copies -r 2
380 x -> z
389 x -> z (no-changeset !)
381 $ hg debugpathcopies 1 2
390 $ hg debugpathcopies 1 2
391 x -> z (changeset !)
382 $ hg debugpathcopies 0 2
392 $ hg debugpathcopies 0 2
383 x -> z
393 x -> z (no-changeset !)
384
394
385 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
395 Copy x->y on one side of merge and copy x->z on the other side. Pathcopies from one parent
386 of the merge to the merge should include the copy from the other side.
396 of the merge to the merge should include the copy from the other side.
387 $ newrepo
397 $ newrepo
388 $ echo x > x
398 $ echo x > x
389 $ hg ci -Aqm 'add x'
399 $ hg ci -Aqm 'add x'
390 $ hg cp x y
400 $ hg cp x y
391 $ hg ci -qm 'copy x to y'
401 $ hg ci -qm 'copy x to y'
392 $ hg co -q 0
402 $ hg co -q 0
393 $ hg cp x z
403 $ hg cp x z
394 $ hg ci -qm 'copy x to z'
404 $ hg ci -qm 'copy x to z'
395 $ hg merge -q 1
405 $ hg merge -q 1
396 $ hg ci -m 'merge copy x->y and copy x->z'
406 $ hg ci -m 'merge copy x->y and copy x->z'
397 $ hg l
407 $ hg l
398 @ 3 merge copy x->y and copy x->z
408 @ 3 merge copy x->y and copy x->z
399 |\
409 |\
400 | o 2 copy x to z
410 | o 2 copy x to z
401 | | z
411 | | z
402 o | 1 copy x to y
412 o | 1 copy x to y
403 |/ y
413 |/ y
404 o 0 add x
414 o 0 add x
405 x
415 x
406 $ hg debugp1copies -r 3
416 $ hg debugp1copies -r 3
407 $ hg debugp2copies -r 3
417 $ hg debugp2copies -r 3
408 $ hg debugpathcopies 2 3
418 $ hg debugpathcopies 2 3
409 x -> y
419 x -> y
410 $ hg debugpathcopies 1 3
420 $ hg debugpathcopies 1 3
411 x -> z
421 x -> z
412
422
413 Copy x to y on one side of merge, create y and rename to z on the other side. Pathcopies from the
423 Copy x to y on one side of merge, create y and rename to z on the other side. Pathcopies from the
414 first side should not include the y->z rename since y didn't exist in the merge base.
424 first side should not include the y->z rename since y didn't exist in the merge base.
415 $ newrepo
425 $ newrepo
416 $ echo x > x
426 $ echo x > x
417 $ hg ci -Aqm 'add x'
427 $ hg ci -Aqm 'add x'
418 $ hg cp x y
428 $ hg cp x y
419 $ hg ci -qm 'copy x to y'
429 $ hg ci -qm 'copy x to y'
420 $ hg co -q 0
430 $ hg co -q 0
421 $ echo y > y
431 $ echo y > y
422 $ hg ci -Aqm 'add y'
432 $ hg ci -Aqm 'add y'
423 $ hg mv y z
433 $ hg mv y z
424 $ hg ci -m 'rename y to z'
434 $ hg ci -m 'rename y to z'
425 $ hg merge -q 1
435 $ hg merge -q 1
426 $ hg ci -m 'merge'
436 $ hg ci -m 'merge'
427 $ hg l
437 $ hg l
428 @ 4 merge
438 @ 4 merge
429 |\
439 |\
430 | o 3 rename y to z
440 | o 3 rename y to z
431 | | y z
441 | | y z
432 | o 2 add y
442 | o 2 add y
433 | | y
443 | | y
434 o | 1 copy x to y
444 o | 1 copy x to y
435 |/ y
445 |/ y
436 o 0 add x
446 o 0 add x
437 x
447 x
438 $ hg debugp1copies -r 3
448 $ hg debugp1copies -r 3
439 y -> z
449 y -> z
440 $ hg debugp2copies -r 3
450 $ hg debugp2copies -r 3
441 $ hg debugpathcopies 2 3
451 $ hg debugpathcopies 2 3
442 y -> z
452 y -> z
443 $ hg debugpathcopies 1 3
453 $ hg debugpathcopies 1 3
444
454
445 Create x and y, then rename x to z on one side of merge, and rename y to z and modify z on the
455 Create x and y, then rename x to z on one side of merge, and rename y to z and modify z on the
446 other side.
456 other side.
447 $ newrepo
457 $ newrepo
448 $ echo x > x
458 $ echo x > x
449 $ echo y > y
459 $ echo y > y
450 $ hg ci -Aqm 'add x and y'
460 $ hg ci -Aqm 'add x and y'
451 $ hg mv x z
461 $ hg mv x z
452 $ hg ci -qm 'rename x to z'
462 $ hg ci -qm 'rename x to z'
453 $ hg co -q 0
463 $ hg co -q 0
454 $ hg mv y z
464 $ hg mv y z
455 $ hg ci -qm 'rename y to z'
465 $ hg ci -qm 'rename y to z'
456 $ echo z >> z
466 $ echo z >> z
457 $ hg ci -m 'modify z'
467 $ hg ci -m 'modify z'
458 $ hg merge -q 1
468 $ hg merge -q 1
459 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
469 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
460 [1]
470 [1]
461 $ echo z > z
471 $ echo z > z
462 $ hg resolve -qm z
472 $ hg resolve -qm z
463 $ hg ci -m 'merge 1 into 3'
473 $ hg ci -m 'merge 1 into 3'
464 Try merging the other direction too
474 Try merging the other direction too
465 $ hg co -q 1
475 $ hg co -q 1
466 $ hg merge -q 3
476 $ hg merge -q 3
467 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
477 warning: conflicts while merging z! (edit, then use 'hg resolve --mark')
468 [1]
478 [1]
469 $ echo z > z
479 $ echo z > z
470 $ hg resolve -qm z
480 $ hg resolve -qm z
471 $ hg ci -m 'merge 3 into 1'
481 $ hg ci -m 'merge 3 into 1'
472 created new head
482 created new head
473 $ hg l
483 $ hg l
474 @ 5 merge 3 into 1
484 @ 5 merge 3 into 1
475 |\ y z
485 |\ y z
476 +---o 4 merge 1 into 3
486 +---o 4 merge 1 into 3
477 | |/ x z
487 | |/ x z
478 | o 3 modify z
488 | o 3 modify z
479 | | z
489 | | z
480 | o 2 rename y to z
490 | o 2 rename y to z
481 | | y z
491 | | y z
482 o | 1 rename x to z
492 o | 1 rename x to z
483 |/ x z
493 |/ x z
484 o 0 add x and y
494 o 0 add x and y
485 x y
495 x y
486 $ hg debugpathcopies 1 4
496 $ hg debugpathcopies 1 4
487 $ hg debugpathcopies 2 4
497 $ hg debugpathcopies 2 4
488 $ hg debugpathcopies 0 4
498 $ hg debugpathcopies 0 4
489 x -> z (filelog !)
499 x -> z (filelog !)
490 y -> z (compatibility !)
500 y -> z (compatibility !)
491 $ hg debugpathcopies 1 5
501 $ hg debugpathcopies 1 5
492 $ hg debugpathcopies 2 5
502 $ hg debugpathcopies 2 5
493 $ hg debugpathcopies 0 5
503 $ hg debugpathcopies 0 5
494 x -> z
504 x -> z
495
505
496
506
497 Test for a case in fullcopytracing algorithm where both the merging csets are
507 Test for a case in fullcopytracing algorithm where both the merging csets are
498 "dirty"; where a dirty cset means that cset is descendant of merge base. This
508 "dirty"; where a dirty cset means that cset is descendant of merge base. This
499 test reflect that for this particular case this algorithm correctly find the copies:
509 test reflect that for this particular case this algorithm correctly find the copies:
500
510
501 $ cat >> $HGRCPATH << EOF
511 $ cat >> $HGRCPATH << EOF
502 > [experimental]
512 > [experimental]
503 > evolution.createmarkers=True
513 > evolution.createmarkers=True
504 > evolution.allowunstable=True
514 > evolution.allowunstable=True
505 > EOF
515 > EOF
506
516
507 $ newrepo
517 $ newrepo
508 $ echo a > a
518 $ echo a > a
509 $ hg add a
519 $ hg add a
510 $ hg ci -m "added a"
520 $ hg ci -m "added a"
511 $ echo b > b
521 $ echo b > b
512 $ hg add b
522 $ hg add b
513 $ hg ci -m "added b"
523 $ hg ci -m "added b"
514
524
515 $ hg mv b b1
525 $ hg mv b b1
516 $ hg ci -m "rename b to b1"
526 $ hg ci -m "rename b to b1"
517
527
518 $ hg up ".^"
528 $ hg up ".^"
519 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
529 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
520 $ echo d > d
530 $ echo d > d
521 $ hg add d
531 $ hg add d
522 $ hg ci -m "added d"
532 $ hg ci -m "added d"
523 created new head
533 created new head
524
534
525 $ echo baba >> b
535 $ echo baba >> b
526 $ hg ci --amend -m "added d, modified b"
536 $ hg ci --amend -m "added d, modified b"
527
537
528 $ hg l --hidden
538 $ hg l --hidden
529 @ 4 added d, modified b
539 @ 4 added d, modified b
530 | b d
540 | b d
531 | x 3 added d
541 | x 3 added d
532 |/ d
542 |/ d
533 | o 2 rename b to b1
543 | o 2 rename b to b1
534 |/ b b1
544 |/ b b1
535 o 1 added b
545 o 1 added b
536 | b
546 | b
537 o 0 added a
547 o 0 added a
538 a
548 a
539
549
540 Grafting revision 4 on top of revision 2, showing that it respect the rename:
550 Grafting revision 4 on top of revision 2, showing that it respect the rename:
541
551
552 TODO: Make this work with copy info in changesets (probably by writing a
553 changeset-centric version of copies.mergecopies())
554 #if no-changeset
542 $ hg up 2 -q
555 $ hg up 2 -q
543 $ hg graft -r 4 --base 3 --hidden
556 $ hg graft -r 4 --base 3 --hidden
544 grafting 4:af28412ec03c "added d, modified b" (tip)
557 grafting 4:af28412ec03c "added d, modified b" (tip)
545 merging b1 and b to b1
558 merging b1 and b to b1
546
559
547 $ hg l -l1 -p
560 $ hg l -l1 -p
548 @ 5 added d, modified b
561 @ 5 added d, modified b
549 | b1
562 | b1
550 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1
563 ~ diff -r 5a4825cc2926 -r 94a2f1a0e8e2 b1
551 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
564 --- a/b1 Thu Jan 01 00:00:00 1970 +0000
552 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
565 +++ b/b1 Thu Jan 01 00:00:00 1970 +0000
553 @@ -1,1 +1,2 @@
566 @@ -1,1 +1,2 @@
554 b
567 b
555 +baba
568 +baba
556
569
570 #endif
571
557 Test to make sure that fullcopytracing algorithm don't fail when both the merging csets are dirty
572 Test to make sure that fullcopytracing algorithm don't fail when both the merging csets are dirty
558 (a dirty cset is one who is not the descendant of merge base)
573 (a dirty cset is one who is not the descendant of merge base)
559 -------------------------------------------------------------------------------------------------
574 -------------------------------------------------------------------------------------------------
560
575
561 $ newrepo
576 $ newrepo
562 $ echo a > a
577 $ echo a > a
563 $ hg add a
578 $ hg add a
564 $ hg ci -m "added a"
579 $ hg ci -m "added a"
565 $ echo b > b
580 $ echo b > b
566 $ hg add b
581 $ hg add b
567 $ hg ci -m "added b"
582 $ hg ci -m "added b"
568
583
569 $ echo foobar > willconflict
584 $ echo foobar > willconflict
570 $ hg add willconflict
585 $ hg add willconflict
571 $ hg ci -m "added willconflict"
586 $ hg ci -m "added willconflict"
572 $ echo c > c
587 $ echo c > c
573 $ hg add c
588 $ hg add c
574 $ hg ci -m "added c"
589 $ hg ci -m "added c"
575
590
576 $ hg l
591 $ hg l
577 @ 3 added c
592 @ 3 added c
578 | c
593 | c
579 o 2 added willconflict
594 o 2 added willconflict
580 | willconflict
595 | willconflict
581 o 1 added b
596 o 1 added b
582 | b
597 | b
583 o 0 added a
598 o 0 added a
584 a
599 a
585
600
586 $ hg up ".^^"
601 $ hg up ".^^"
587 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
602 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
588 $ echo d > d
603 $ echo d > d
589 $ hg add d
604 $ hg add d
590 $ hg ci -m "added d"
605 $ hg ci -m "added d"
591 created new head
606 created new head
592
607
593 $ echo barfoo > willconflict
608 $ echo barfoo > willconflict
594 $ hg add willconflict
609 $ hg add willconflict
595 $ hg ci --amend -m "added willconflict and d"
610 $ hg ci --amend -m "added willconflict and d"
596
611
597 $ hg l
612 $ hg l
598 @ 5 added willconflict and d
613 @ 5 added willconflict and d
599 | d willconflict
614 | d willconflict
600 | o 3 added c
615 | o 3 added c
601 | | c
616 | | c
602 | o 2 added willconflict
617 | o 2 added willconflict
603 |/ willconflict
618 |/ willconflict
604 o 1 added b
619 o 1 added b
605 | b
620 | b
606 o 0 added a
621 o 0 added a
607 a
622 a
608
623
609 $ hg rebase -r . -d 2 -t :other
624 $ hg rebase -r . -d 2 -t :other
610 rebasing 5:5018b1509e94 "added willconflict and d" (tip)
625 rebasing 5:5018b1509e94 "added willconflict and d" (tip)
611
626
612 $ hg up 3 -q
627 $ hg up 3 -q
613 $ hg l --hidden
628 $ hg l --hidden
614 o 6 added willconflict and d
629 o 6 added willconflict and d
615 | d willconflict
630 | d willconflict
616 | x 5 added willconflict and d
631 | x 5 added willconflict and d
617 | | d willconflict
632 | | d willconflict
618 | | x 4 added d
633 | | x 4 added d
619 | |/ d
634 | |/ d
620 +---@ 3 added c
635 +---@ 3 added c
621 | | c
636 | | c
622 o | 2 added willconflict
637 o | 2 added willconflict
623 |/ willconflict
638 |/ willconflict
624 o 1 added b
639 o 1 added b
625 | b
640 | b
626 o 0 added a
641 o 0 added a
627 a
642 a
628
643
629 Now if we trigger a merge between cset revision 3 and 6 using base revision 4, in this case
644 Now if we trigger a merge between cset revision 3 and 6 using base revision 4, in this case
630 both the merging csets will be dirty as no one is descendent of base revision:
645 both the merging csets will be dirty as no one is descendent of base revision:
631
646
632 $ hg graft -r 6 --base 4 --hidden -t :other
647 $ hg graft -r 6 --base 4 --hidden -t :other
633 grafting 6:99802e4f1e46 "added willconflict and d" (tip)
648 grafting 6:99802e4f1e46 "added willconflict and d" (tip)
General Comments 0
You need to be logged in to leave comments. Login now