##// END OF EJS Templates
changelog: lazy decode description (API)...
Gregory Szorc -
r28306:1778770e default
parent child Browse files
Show More
@@ -1,417 +1,425 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullid,
14 nullid,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 _defaultextra = {'branch': 'default'}
24 _defaultextra = {'branch': 'default'}
25
25
26 def _string_escape(text):
26 def _string_escape(text):
27 """
27 """
28 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
28 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
29 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
29 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
30 >>> s
30 >>> s
31 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
31 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
32 >>> res = _string_escape(s)
32 >>> res = _string_escape(s)
33 >>> s == res.decode('string_escape')
33 >>> s == res.decode('string_escape')
34 True
34 True
35 """
35 """
36 # subset of the string_escape codec
36 # subset of the string_escape codec
37 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
37 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
38 return text.replace('\0', '\\0')
38 return text.replace('\0', '\\0')
39
39
40 def decodeextra(text):
40 def decodeextra(text):
41 """
41 """
42 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
42 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
43 ... ).iteritems())
43 ... ).iteritems())
44 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
44 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
45 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
45 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
46 ... 'baz': chr(92) + chr(0) + '2'})
46 ... 'baz': chr(92) + chr(0) + '2'})
47 ... ).iteritems())
47 ... ).iteritems())
48 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
48 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
49 """
49 """
50 extra = _defaultextra.copy()
50 extra = _defaultextra.copy()
51 for l in text.split('\0'):
51 for l in text.split('\0'):
52 if l:
52 if l:
53 if '\\0' in l:
53 if '\\0' in l:
54 # fix up \0 without getting into trouble with \\0
54 # fix up \0 without getting into trouble with \\0
55 l = l.replace('\\\\', '\\\\\n')
55 l = l.replace('\\\\', '\\\\\n')
56 l = l.replace('\\0', '\0')
56 l = l.replace('\\0', '\0')
57 l = l.replace('\n', '')
57 l = l.replace('\n', '')
58 k, v = l.decode('string_escape').split(':', 1)
58 k, v = l.decode('string_escape').split(':', 1)
59 extra[k] = v
59 extra[k] = v
60 return extra
60 return extra
61
61
62 def encodeextra(d):
62 def encodeextra(d):
63 # keys must be sorted to produce a deterministic changelog entry
63 # keys must be sorted to produce a deterministic changelog entry
64 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
64 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
65 return "\0".join(items)
65 return "\0".join(items)
66
66
67 def stripdesc(desc):
67 def stripdesc(desc):
68 """strip trailing whitespace and leading and trailing empty lines"""
68 """strip trailing whitespace and leading and trailing empty lines"""
69 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
69 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
70
70
71 class appender(object):
71 class appender(object):
72 '''the changelog index must be updated last on disk, so we use this class
72 '''the changelog index must be updated last on disk, so we use this class
73 to delay writes to it'''
73 to delay writes to it'''
74 def __init__(self, vfs, name, mode, buf):
74 def __init__(self, vfs, name, mode, buf):
75 self.data = buf
75 self.data = buf
76 fp = vfs(name, mode)
76 fp = vfs(name, mode)
77 self.fp = fp
77 self.fp = fp
78 self.offset = fp.tell()
78 self.offset = fp.tell()
79 self.size = vfs.fstat(fp).st_size
79 self.size = vfs.fstat(fp).st_size
80
80
81 def end(self):
81 def end(self):
82 return self.size + len("".join(self.data))
82 return self.size + len("".join(self.data))
83 def tell(self):
83 def tell(self):
84 return self.offset
84 return self.offset
85 def flush(self):
85 def flush(self):
86 pass
86 pass
87 def close(self):
87 def close(self):
88 self.fp.close()
88 self.fp.close()
89
89
90 def seek(self, offset, whence=0):
90 def seek(self, offset, whence=0):
91 '''virtual file offset spans real file and data'''
91 '''virtual file offset spans real file and data'''
92 if whence == 0:
92 if whence == 0:
93 self.offset = offset
93 self.offset = offset
94 elif whence == 1:
94 elif whence == 1:
95 self.offset += offset
95 self.offset += offset
96 elif whence == 2:
96 elif whence == 2:
97 self.offset = self.end() + offset
97 self.offset = self.end() + offset
98 if self.offset < self.size:
98 if self.offset < self.size:
99 self.fp.seek(self.offset)
99 self.fp.seek(self.offset)
100
100
101 def read(self, count=-1):
101 def read(self, count=-1):
102 '''only trick here is reads that span real file and data'''
102 '''only trick here is reads that span real file and data'''
103 ret = ""
103 ret = ""
104 if self.offset < self.size:
104 if self.offset < self.size:
105 s = self.fp.read(count)
105 s = self.fp.read(count)
106 ret = s
106 ret = s
107 self.offset += len(s)
107 self.offset += len(s)
108 if count > 0:
108 if count > 0:
109 count -= len(s)
109 count -= len(s)
110 if count != 0:
110 if count != 0:
111 doff = self.offset - self.size
111 doff = self.offset - self.size
112 self.data.insert(0, "".join(self.data))
112 self.data.insert(0, "".join(self.data))
113 del self.data[1:]
113 del self.data[1:]
114 s = self.data[0][doff:doff + count]
114 s = self.data[0][doff:doff + count]
115 self.offset += len(s)
115 self.offset += len(s)
116 ret += s
116 ret += s
117 return ret
117 return ret
118
118
119 def write(self, s):
119 def write(self, s):
120 self.data.append(str(s))
120 self.data.append(str(s))
121 self.offset += len(s)
121 self.offset += len(s)
122
122
123 def _divertopener(opener, target):
123 def _divertopener(opener, target):
124 """build an opener that writes in 'target.a' instead of 'target'"""
124 """build an opener that writes in 'target.a' instead of 'target'"""
125 def _divert(name, mode='r'):
125 def _divert(name, mode='r'):
126 if name != target:
126 if name != target:
127 return opener(name, mode)
127 return opener(name, mode)
128 return opener(name + ".a", mode)
128 return opener(name + ".a", mode)
129 return _divert
129 return _divert
130
130
131 def _delayopener(opener, target, buf):
131 def _delayopener(opener, target, buf):
132 """build an opener that stores chunks in 'buf' instead of 'target'"""
132 """build an opener that stores chunks in 'buf' instead of 'target'"""
133 def _delay(name, mode='r'):
133 def _delay(name, mode='r'):
134 if name != target:
134 if name != target:
135 return opener(name, mode)
135 return opener(name, mode)
136 return appender(opener, name, mode, buf)
136 return appender(opener, name, mode, buf)
137 return _delay
137 return _delay
138
138
139 class changelog(revlog.revlog):
139 class changelog(revlog.revlog):
140 def __init__(self, opener):
140 def __init__(self, opener):
141 revlog.revlog.__init__(self, opener, "00changelog.i")
141 revlog.revlog.__init__(self, opener, "00changelog.i")
142 if self._initempty:
142 if self._initempty:
143 # changelogs don't benefit from generaldelta
143 # changelogs don't benefit from generaldelta
144 self.version &= ~revlog.REVLOGGENERALDELTA
144 self.version &= ~revlog.REVLOGGENERALDELTA
145 self._generaldelta = False
145 self._generaldelta = False
146 self._realopener = opener
146 self._realopener = opener
147 self._delayed = False
147 self._delayed = False
148 self._delaybuf = None
148 self._delaybuf = None
149 self._divert = False
149 self._divert = False
150 self.filteredrevs = frozenset()
150 self.filteredrevs = frozenset()
151
151
152 def tip(self):
152 def tip(self):
153 """filtered version of revlog.tip"""
153 """filtered version of revlog.tip"""
154 for i in xrange(len(self) -1, -2, -1):
154 for i in xrange(len(self) -1, -2, -1):
155 if i not in self.filteredrevs:
155 if i not in self.filteredrevs:
156 return self.node(i)
156 return self.node(i)
157
157
158 def __contains__(self, rev):
158 def __contains__(self, rev):
159 """filtered version of revlog.__contains__"""
159 """filtered version of revlog.__contains__"""
160 return (0 <= rev < len(self)
160 return (0 <= rev < len(self)
161 and rev not in self.filteredrevs)
161 and rev not in self.filteredrevs)
162
162
163 def __iter__(self):
163 def __iter__(self):
164 """filtered version of revlog.__iter__"""
164 """filtered version of revlog.__iter__"""
165 if len(self.filteredrevs) == 0:
165 if len(self.filteredrevs) == 0:
166 return revlog.revlog.__iter__(self)
166 return revlog.revlog.__iter__(self)
167
167
168 def filterediter():
168 def filterediter():
169 for i in xrange(len(self)):
169 for i in xrange(len(self)):
170 if i not in self.filteredrevs:
170 if i not in self.filteredrevs:
171 yield i
171 yield i
172
172
173 return filterediter()
173 return filterediter()
174
174
175 def revs(self, start=0, stop=None):
175 def revs(self, start=0, stop=None):
176 """filtered version of revlog.revs"""
176 """filtered version of revlog.revs"""
177 for i in super(changelog, self).revs(start, stop):
177 for i in super(changelog, self).revs(start, stop):
178 if i not in self.filteredrevs:
178 if i not in self.filteredrevs:
179 yield i
179 yield i
180
180
181 @util.propertycache
181 @util.propertycache
182 def nodemap(self):
182 def nodemap(self):
183 # XXX need filtering too
183 # XXX need filtering too
184 self.rev(self.node(0))
184 self.rev(self.node(0))
185 return self._nodecache
185 return self._nodecache
186
186
187 def reachableroots(self, minroot, heads, roots, includepath=False):
187 def reachableroots(self, minroot, heads, roots, includepath=False):
188 return self.index.reachableroots2(minroot, heads, roots, includepath)
188 return self.index.reachableroots2(minroot, heads, roots, includepath)
189
189
190 def headrevs(self):
190 def headrevs(self):
191 if self.filteredrevs:
191 if self.filteredrevs:
192 try:
192 try:
193 return self.index.headrevsfiltered(self.filteredrevs)
193 return self.index.headrevsfiltered(self.filteredrevs)
194 # AttributeError covers non-c-extension environments and
194 # AttributeError covers non-c-extension environments and
195 # old c extensions without filter handling.
195 # old c extensions without filter handling.
196 except AttributeError:
196 except AttributeError:
197 return self._headrevs()
197 return self._headrevs()
198
198
199 return super(changelog, self).headrevs()
199 return super(changelog, self).headrevs()
200
200
201 def strip(self, *args, **kwargs):
201 def strip(self, *args, **kwargs):
202 # XXX make something better than assert
202 # XXX make something better than assert
203 # We can't expect proper strip behavior if we are filtered.
203 # We can't expect proper strip behavior if we are filtered.
204 assert not self.filteredrevs
204 assert not self.filteredrevs
205 super(changelog, self).strip(*args, **kwargs)
205 super(changelog, self).strip(*args, **kwargs)
206
206
207 def rev(self, node):
207 def rev(self, node):
208 """filtered version of revlog.rev"""
208 """filtered version of revlog.rev"""
209 r = super(changelog, self).rev(node)
209 r = super(changelog, self).rev(node)
210 if r in self.filteredrevs:
210 if r in self.filteredrevs:
211 raise error.FilteredLookupError(hex(node), self.indexfile,
211 raise error.FilteredLookupError(hex(node), self.indexfile,
212 _('filtered node'))
212 _('filtered node'))
213 return r
213 return r
214
214
215 def node(self, rev):
215 def node(self, rev):
216 """filtered version of revlog.node"""
216 """filtered version of revlog.node"""
217 if rev in self.filteredrevs:
217 if rev in self.filteredrevs:
218 raise error.FilteredIndexError(rev)
218 raise error.FilteredIndexError(rev)
219 return super(changelog, self).node(rev)
219 return super(changelog, self).node(rev)
220
220
221 def linkrev(self, rev):
221 def linkrev(self, rev):
222 """filtered version of revlog.linkrev"""
222 """filtered version of revlog.linkrev"""
223 if rev in self.filteredrevs:
223 if rev in self.filteredrevs:
224 raise error.FilteredIndexError(rev)
224 raise error.FilteredIndexError(rev)
225 return super(changelog, self).linkrev(rev)
225 return super(changelog, self).linkrev(rev)
226
226
227 def parentrevs(self, rev):
227 def parentrevs(self, rev):
228 """filtered version of revlog.parentrevs"""
228 """filtered version of revlog.parentrevs"""
229 if rev in self.filteredrevs:
229 if rev in self.filteredrevs:
230 raise error.FilteredIndexError(rev)
230 raise error.FilteredIndexError(rev)
231 return super(changelog, self).parentrevs(rev)
231 return super(changelog, self).parentrevs(rev)
232
232
233 def flags(self, rev):
233 def flags(self, rev):
234 """filtered version of revlog.flags"""
234 """filtered version of revlog.flags"""
235 if rev in self.filteredrevs:
235 if rev in self.filteredrevs:
236 raise error.FilteredIndexError(rev)
236 raise error.FilteredIndexError(rev)
237 return super(changelog, self).flags(rev)
237 return super(changelog, self).flags(rev)
238
238
239 def delayupdate(self, tr):
239 def delayupdate(self, tr):
240 "delay visibility of index updates to other readers"
240 "delay visibility of index updates to other readers"
241
241
242 if not self._delayed:
242 if not self._delayed:
243 if len(self) == 0:
243 if len(self) == 0:
244 self._divert = True
244 self._divert = True
245 if self._realopener.exists(self.indexfile + '.a'):
245 if self._realopener.exists(self.indexfile + '.a'):
246 self._realopener.unlink(self.indexfile + '.a')
246 self._realopener.unlink(self.indexfile + '.a')
247 self.opener = _divertopener(self._realopener, self.indexfile)
247 self.opener = _divertopener(self._realopener, self.indexfile)
248 else:
248 else:
249 self._delaybuf = []
249 self._delaybuf = []
250 self.opener = _delayopener(self._realopener, self.indexfile,
250 self.opener = _delayopener(self._realopener, self.indexfile,
251 self._delaybuf)
251 self._delaybuf)
252 self._delayed = True
252 self._delayed = True
253 tr.addpending('cl-%i' % id(self), self._writepending)
253 tr.addpending('cl-%i' % id(self), self._writepending)
254 tr.addfinalize('cl-%i' % id(self), self._finalize)
254 tr.addfinalize('cl-%i' % id(self), self._finalize)
255
255
256 def _finalize(self, tr):
256 def _finalize(self, tr):
257 "finalize index updates"
257 "finalize index updates"
258 self._delayed = False
258 self._delayed = False
259 self.opener = self._realopener
259 self.opener = self._realopener
260 # move redirected index data back into place
260 # move redirected index data back into place
261 if self._divert:
261 if self._divert:
262 assert not self._delaybuf
262 assert not self._delaybuf
263 tmpname = self.indexfile + ".a"
263 tmpname = self.indexfile + ".a"
264 nfile = self.opener.open(tmpname)
264 nfile = self.opener.open(tmpname)
265 nfile.close()
265 nfile.close()
266 self.opener.rename(tmpname, self.indexfile)
266 self.opener.rename(tmpname, self.indexfile)
267 elif self._delaybuf:
267 elif self._delaybuf:
268 fp = self.opener(self.indexfile, 'a')
268 fp = self.opener(self.indexfile, 'a')
269 fp.write("".join(self._delaybuf))
269 fp.write("".join(self._delaybuf))
270 fp.close()
270 fp.close()
271 self._delaybuf = None
271 self._delaybuf = None
272 self._divert = False
272 self._divert = False
273 # split when we're done
273 # split when we're done
274 self.checkinlinesize(tr)
274 self.checkinlinesize(tr)
275
275
276 def readpending(self, file):
276 def readpending(self, file):
277 """read index data from a "pending" file
277 """read index data from a "pending" file
278
278
279 During a transaction, the actual changeset data is already stored in the
279 During a transaction, the actual changeset data is already stored in the
280 main file, but not yet finalized in the on-disk index. Instead, a
280 main file, but not yet finalized in the on-disk index. Instead, a
281 "pending" index is written by the transaction logic. If this function
281 "pending" index is written by the transaction logic. If this function
282 is running, we are likely in a subprocess invoked in a hook. The
282 is running, we are likely in a subprocess invoked in a hook. The
283 subprocess is informed that it is within a transaction and needs to
283 subprocess is informed that it is within a transaction and needs to
284 access its content.
284 access its content.
285
285
286 This function will read all the index data out of the pending file and
286 This function will read all the index data out of the pending file and
287 overwrite the main index."""
287 overwrite the main index."""
288
288
289 if not self.opener.exists(file):
289 if not self.opener.exists(file):
290 return # no pending data for changelog
290 return # no pending data for changelog
291 r = revlog.revlog(self.opener, file)
291 r = revlog.revlog(self.opener, file)
292 self.index = r.index
292 self.index = r.index
293 self.nodemap = r.nodemap
293 self.nodemap = r.nodemap
294 self._nodecache = r._nodecache
294 self._nodecache = r._nodecache
295 self._chunkcache = r._chunkcache
295 self._chunkcache = r._chunkcache
296
296
297 def _writepending(self, tr):
297 def _writepending(self, tr):
298 "create a file containing the unfinalized state for pretxnchangegroup"
298 "create a file containing the unfinalized state for pretxnchangegroup"
299 if self._delaybuf:
299 if self._delaybuf:
300 # make a temporary copy of the index
300 # make a temporary copy of the index
301 fp1 = self._realopener(self.indexfile)
301 fp1 = self._realopener(self.indexfile)
302 pendingfilename = self.indexfile + ".a"
302 pendingfilename = self.indexfile + ".a"
303 # register as a temp file to ensure cleanup on failure
303 # register as a temp file to ensure cleanup on failure
304 tr.registertmp(pendingfilename)
304 tr.registertmp(pendingfilename)
305 # write existing data
305 # write existing data
306 fp2 = self._realopener(pendingfilename, "w")
306 fp2 = self._realopener(pendingfilename, "w")
307 fp2.write(fp1.read())
307 fp2.write(fp1.read())
308 # add pending data
308 # add pending data
309 fp2.write("".join(self._delaybuf))
309 fp2.write("".join(self._delaybuf))
310 fp2.close()
310 fp2.close()
311 # switch modes so finalize can simply rename
311 # switch modes so finalize can simply rename
312 self._delaybuf = None
312 self._delaybuf = None
313 self._divert = True
313 self._divert = True
314 self.opener = _divertopener(self._realopener, self.indexfile)
314 self.opener = _divertopener(self._realopener, self.indexfile)
315
315
316 if self._divert:
316 if self._divert:
317 return True
317 return True
318
318
319 return False
319 return False
320
320
321 def checkinlinesize(self, tr, fp=None):
321 def checkinlinesize(self, tr, fp=None):
322 if not self._delayed:
322 if not self._delayed:
323 revlog.revlog.checkinlinesize(self, tr, fp)
323 revlog.revlog.checkinlinesize(self, tr, fp)
324
324
325 def read(self, node):
325 def read(self, node):
326 """
326 """
327 format used:
327 format used:
328 nodeid\n : manifest node in ascii
328 nodeid\n : manifest node in ascii
329 user\n : user, no \n or \r allowed
329 user\n : user, no \n or \r allowed
330 time tz extra\n : date (time is int or float, timezone is int)
330 time tz extra\n : date (time is int or float, timezone is int)
331 : extra is metadata, encoded and separated by '\0'
331 : extra is metadata, encoded and separated by '\0'
332 : older versions ignore it
332 : older versions ignore it
333 files\n\n : files modified by the cset, no \n or \r allowed
333 files\n\n : files modified by the cset, no \n or \r allowed
334 (.*) : comment (free text, ideally utf-8)
334 (.*) : comment (free text, ideally utf-8)
335
335
336 changelog v0 doesn't use extra
336 changelog v0 doesn't use extra
337
338 Returns a 6-tuple consisting of the following:
339 - manifest node (binary)
340 - user (encoding.localstr)
341 - (time, timezone) 2-tuple of a float and int offset
342 - list of files modified by the cset
343 - commit message / description (binary)
344 - dict of extra entries
337 """
345 """
338 text = self.revision(node)
346 text = self.revision(node)
339 if not text:
347 if not text:
340 return nullid, "", (0, 0), [], "", _defaultextra
348 return nullid, "", (0, 0), [], "", _defaultextra
341 last = text.index("\n\n")
349 last = text.index("\n\n")
342 desc = encoding.tolocal(text[last + 2:])
350 desc = text[last + 2:]
343 l = text[:last].split('\n')
351 l = text[:last].split('\n')
344 manifest = bin(l[0])
352 manifest = bin(l[0])
345 user = encoding.tolocal(l[1])
353 user = encoding.tolocal(l[1])
346
354
347 tdata = l[2].split(' ', 2)
355 tdata = l[2].split(' ', 2)
348 if len(tdata) != 3:
356 if len(tdata) != 3:
349 time = float(tdata[0])
357 time = float(tdata[0])
350 try:
358 try:
351 # various tools did silly things with the time zone field.
359 # various tools did silly things with the time zone field.
352 timezone = int(tdata[1])
360 timezone = int(tdata[1])
353 except ValueError:
361 except ValueError:
354 timezone = 0
362 timezone = 0
355 extra = _defaultextra
363 extra = _defaultextra
356 else:
364 else:
357 time, timezone = float(tdata[0]), int(tdata[1])
365 time, timezone = float(tdata[0]), int(tdata[1])
358 extra = decodeextra(tdata[2])
366 extra = decodeextra(tdata[2])
359
367
360 files = l[3:]
368 files = l[3:]
361 return manifest, user, (time, timezone), files, desc, extra
369 return manifest, user, (time, timezone), files, desc, extra
362
370
363 def readfiles(self, node):
371 def readfiles(self, node):
364 """
372 """
365 short version of read that only returns the files modified by the cset
373 short version of read that only returns the files modified by the cset
366 """
374 """
367 text = self.revision(node)
375 text = self.revision(node)
368 if not text:
376 if not text:
369 return []
377 return []
370 last = text.index("\n\n")
378 last = text.index("\n\n")
371 l = text[:last].split('\n')
379 l = text[:last].split('\n')
372 return l[3:]
380 return l[3:]
373
381
374 def add(self, manifest, files, desc, transaction, p1, p2,
382 def add(self, manifest, files, desc, transaction, p1, p2,
375 user, date=None, extra=None):
383 user, date=None, extra=None):
376 # Convert to UTF-8 encoded bytestrings as the very first
384 # Convert to UTF-8 encoded bytestrings as the very first
377 # thing: calling any method on a localstr object will turn it
385 # thing: calling any method on a localstr object will turn it
378 # into a str object and the cached UTF-8 string is thus lost.
386 # into a str object and the cached UTF-8 string is thus lost.
379 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
387 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
380
388
381 user = user.strip()
389 user = user.strip()
382 # An empty username or a username with a "\n" will make the
390 # An empty username or a username with a "\n" will make the
383 # revision text contain two "\n\n" sequences -> corrupt
391 # revision text contain two "\n\n" sequences -> corrupt
384 # repository since read cannot unpack the revision.
392 # repository since read cannot unpack the revision.
385 if not user:
393 if not user:
386 raise error.RevlogError(_("empty username"))
394 raise error.RevlogError(_("empty username"))
387 if "\n" in user:
395 if "\n" in user:
388 raise error.RevlogError(_("username %s contains a newline")
396 raise error.RevlogError(_("username %s contains a newline")
389 % repr(user))
397 % repr(user))
390
398
391 desc = stripdesc(desc)
399 desc = stripdesc(desc)
392
400
393 if date:
401 if date:
394 parseddate = "%d %d" % util.parsedate(date)
402 parseddate = "%d %d" % util.parsedate(date)
395 else:
403 else:
396 parseddate = "%d %d" % util.makedate()
404 parseddate = "%d %d" % util.makedate()
397 if extra:
405 if extra:
398 branch = extra.get("branch")
406 branch = extra.get("branch")
399 if branch in ("default", ""):
407 if branch in ("default", ""):
400 del extra["branch"]
408 del extra["branch"]
401 elif branch in (".", "null", "tip"):
409 elif branch in (".", "null", "tip"):
402 raise error.RevlogError(_('the name \'%s\' is reserved')
410 raise error.RevlogError(_('the name \'%s\' is reserved')
403 % branch)
411 % branch)
404 if extra:
412 if extra:
405 extra = encodeextra(extra)
413 extra = encodeextra(extra)
406 parseddate = "%s %s" % (parseddate, extra)
414 parseddate = "%s %s" % (parseddate, extra)
407 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
415 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
408 text = "\n".join(l)
416 text = "\n".join(l)
409 return self.addrevision(text, transaction, len(self), p1, p2)
417 return self.addrevision(text, transaction, len(self), p1, p2)
410
418
411 def branchinfo(self, rev):
419 def branchinfo(self, rev):
412 """return the branch name and open/close state of a revision
420 """return the branch name and open/close state of a revision
413
421
414 This function exists because creating a changectx object
422 This function exists because creating a changectx object
415 just to access this is costly."""
423 just to access this is costly."""
416 extra = self.read(rev)[5]
424 extra = self.read(rev)[5]
417 return encoding.tolocal(extra.get("branch")), 'close' in extra
425 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,1972 +1,1972 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset[0], path)
262 node, flag = self._repo.manifest.find(self._changeset[0], path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path):
278 def sub(self, path):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path)
280 return subrepo.subrepo(self, path)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.read(self.rev())
527 return self._repo.changelog.read(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifest.read(self._changeset[0])
531 return self._repo.manifest.read(self._changeset[0])
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 return self._repo.manifest.readdelta(self._changeset[0])
535 return self._repo.manifest.readdelta(self._changeset[0])
536
536
537 @propertycache
537 @propertycache
538 def _parents(self):
538 def _parents(self):
539 repo = self._repo
539 repo = self._repo
540 p1, p2 = repo.changelog.parentrevs(self._rev)
540 p1, p2 = repo.changelog.parentrevs(self._rev)
541 if p2 == nullrev:
541 if p2 == nullrev:
542 return [changectx(repo, p1)]
542 return [changectx(repo, p1)]
543 return [changectx(repo, p1), changectx(repo, p2)]
543 return [changectx(repo, p1), changectx(repo, p2)]
544
544
545 def changeset(self):
545 def changeset(self):
546 return self._changeset
546 return self._changeset
547 def manifestnode(self):
547 def manifestnode(self):
548 return self._changeset[0]
548 return self._changeset[0]
549
549
550 def user(self):
550 def user(self):
551 return self._changeset[1]
551 return self._changeset[1]
552 def date(self):
552 def date(self):
553 return self._changeset[2]
553 return self._changeset[2]
554 def files(self):
554 def files(self):
555 return self._changeset[3]
555 return self._changeset[3]
556 def description(self):
556 def description(self):
557 return self._changeset[4]
557 return encoding.tolocal(self._changeset[4])
558 def branch(self):
558 def branch(self):
559 return encoding.tolocal(self._changeset[5].get("branch"))
559 return encoding.tolocal(self._changeset[5].get("branch"))
560 def closesbranch(self):
560 def closesbranch(self):
561 return 'close' in self._changeset[5]
561 return 'close' in self._changeset[5]
562 def extra(self):
562 def extra(self):
563 return self._changeset[5]
563 return self._changeset[5]
564 def tags(self):
564 def tags(self):
565 return self._repo.nodetags(self._node)
565 return self._repo.nodetags(self._node)
566 def bookmarks(self):
566 def bookmarks(self):
567 return self._repo.nodebookmarks(self._node)
567 return self._repo.nodebookmarks(self._node)
568 def phase(self):
568 def phase(self):
569 return self._repo._phasecache.phase(self._repo, self._rev)
569 return self._repo._phasecache.phase(self._repo, self._rev)
570 def hidden(self):
570 def hidden(self):
571 return self._rev in repoview.filterrevs(self._repo, 'visible')
571 return self._rev in repoview.filterrevs(self._repo, 'visible')
572
572
573 def children(self):
573 def children(self):
574 """return contexts for each child changeset"""
574 """return contexts for each child changeset"""
575 c = self._repo.changelog.children(self._node)
575 c = self._repo.changelog.children(self._node)
576 return [changectx(self._repo, x) for x in c]
576 return [changectx(self._repo, x) for x in c]
577
577
578 def ancestors(self):
578 def ancestors(self):
579 for a in self._repo.changelog.ancestors([self._rev]):
579 for a in self._repo.changelog.ancestors([self._rev]):
580 yield changectx(self._repo, a)
580 yield changectx(self._repo, a)
581
581
582 def descendants(self):
582 def descendants(self):
583 for d in self._repo.changelog.descendants([self._rev]):
583 for d in self._repo.changelog.descendants([self._rev]):
584 yield changectx(self._repo, d)
584 yield changectx(self._repo, d)
585
585
586 def filectx(self, path, fileid=None, filelog=None):
586 def filectx(self, path, fileid=None, filelog=None):
587 """get a file context from this changeset"""
587 """get a file context from this changeset"""
588 if fileid is None:
588 if fileid is None:
589 fileid = self.filenode(path)
589 fileid = self.filenode(path)
590 return filectx(self._repo, path, fileid=fileid,
590 return filectx(self._repo, path, fileid=fileid,
591 changectx=self, filelog=filelog)
591 changectx=self, filelog=filelog)
592
592
593 def ancestor(self, c2, warn=False):
593 def ancestor(self, c2, warn=False):
594 """return the "best" ancestor context of self and c2
594 """return the "best" ancestor context of self and c2
595
595
596 If there are multiple candidates, it will show a message and check
596 If there are multiple candidates, it will show a message and check
597 merge.preferancestor configuration before falling back to the
597 merge.preferancestor configuration before falling back to the
598 revlog ancestor."""
598 revlog ancestor."""
599 # deal with workingctxs
599 # deal with workingctxs
600 n2 = c2._node
600 n2 = c2._node
601 if n2 is None:
601 if n2 is None:
602 n2 = c2._parents[0]._node
602 n2 = c2._parents[0]._node
603 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
603 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
604 if not cahs:
604 if not cahs:
605 anc = nullid
605 anc = nullid
606 elif len(cahs) == 1:
606 elif len(cahs) == 1:
607 anc = cahs[0]
607 anc = cahs[0]
608 else:
608 else:
609 # experimental config: merge.preferancestor
609 # experimental config: merge.preferancestor
610 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
610 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
611 try:
611 try:
612 ctx = changectx(self._repo, r)
612 ctx = changectx(self._repo, r)
613 except error.RepoLookupError:
613 except error.RepoLookupError:
614 continue
614 continue
615 anc = ctx.node()
615 anc = ctx.node()
616 if anc in cahs:
616 if anc in cahs:
617 break
617 break
618 else:
618 else:
619 anc = self._repo.changelog.ancestor(self._node, n2)
619 anc = self._repo.changelog.ancestor(self._node, n2)
620 if warn:
620 if warn:
621 self._repo.ui.status(
621 self._repo.ui.status(
622 (_("note: using %s as ancestor of %s and %s\n") %
622 (_("note: using %s as ancestor of %s and %s\n") %
623 (short(anc), short(self._node), short(n2))) +
623 (short(anc), short(self._node), short(n2))) +
624 ''.join(_(" alternatively, use --config "
624 ''.join(_(" alternatively, use --config "
625 "merge.preferancestor=%s\n") %
625 "merge.preferancestor=%s\n") %
626 short(n) for n in sorted(cahs) if n != anc))
626 short(n) for n in sorted(cahs) if n != anc))
627 return changectx(self._repo, anc)
627 return changectx(self._repo, anc)
628
628
629 def descendant(self, other):
629 def descendant(self, other):
630 """True if other is descendant of this changeset"""
630 """True if other is descendant of this changeset"""
631 return self._repo.changelog.descendant(self._rev, other._rev)
631 return self._repo.changelog.descendant(self._rev, other._rev)
632
632
633 def walk(self, match):
633 def walk(self, match):
634 '''Generates matching file names.'''
634 '''Generates matching file names.'''
635
635
636 # Wrap match.bad method to have message with nodeid
636 # Wrap match.bad method to have message with nodeid
637 def bad(fn, msg):
637 def bad(fn, msg):
638 # The manifest doesn't know about subrepos, so don't complain about
638 # The manifest doesn't know about subrepos, so don't complain about
639 # paths into valid subrepos.
639 # paths into valid subrepos.
640 if any(fn == s or fn.startswith(s + '/')
640 if any(fn == s or fn.startswith(s + '/')
641 for s in self.substate):
641 for s in self.substate):
642 return
642 return
643 match.bad(fn, _('no such file in rev %s') % self)
643 match.bad(fn, _('no such file in rev %s') % self)
644
644
645 m = matchmod.badmatch(match, bad)
645 m = matchmod.badmatch(match, bad)
646 return self._manifest.walk(m)
646 return self._manifest.walk(m)
647
647
648 def matches(self, match):
648 def matches(self, match):
649 return self.walk(match)
649 return self.walk(match)
650
650
651 class basefilectx(object):
651 class basefilectx(object):
652 """A filecontext object represents the common logic for its children:
652 """A filecontext object represents the common logic for its children:
653 filectx: read-only access to a filerevision that is already present
653 filectx: read-only access to a filerevision that is already present
654 in the repo,
654 in the repo,
655 workingfilectx: a filecontext that represents files from the working
655 workingfilectx: a filecontext that represents files from the working
656 directory,
656 directory,
657 memfilectx: a filecontext that represents files in-memory."""
657 memfilectx: a filecontext that represents files in-memory."""
658 def __new__(cls, repo, path, *args, **kwargs):
658 def __new__(cls, repo, path, *args, **kwargs):
659 return super(basefilectx, cls).__new__(cls)
659 return super(basefilectx, cls).__new__(cls)
660
660
661 @propertycache
661 @propertycache
662 def _filelog(self):
662 def _filelog(self):
663 return self._repo.file(self._path)
663 return self._repo.file(self._path)
664
664
665 @propertycache
665 @propertycache
666 def _changeid(self):
666 def _changeid(self):
667 if '_changeid' in self.__dict__:
667 if '_changeid' in self.__dict__:
668 return self._changeid
668 return self._changeid
669 elif '_changectx' in self.__dict__:
669 elif '_changectx' in self.__dict__:
670 return self._changectx.rev()
670 return self._changectx.rev()
671 elif '_descendantrev' in self.__dict__:
671 elif '_descendantrev' in self.__dict__:
672 # this file context was created from a revision with a known
672 # this file context was created from a revision with a known
673 # descendant, we can (lazily) correct for linkrev aliases
673 # descendant, we can (lazily) correct for linkrev aliases
674 return self._adjustlinkrev(self._path, self._filelog,
674 return self._adjustlinkrev(self._path, self._filelog,
675 self._filenode, self._descendantrev)
675 self._filenode, self._descendantrev)
676 else:
676 else:
677 return self._filelog.linkrev(self._filerev)
677 return self._filelog.linkrev(self._filerev)
678
678
679 @propertycache
679 @propertycache
680 def _filenode(self):
680 def _filenode(self):
681 if '_fileid' in self.__dict__:
681 if '_fileid' in self.__dict__:
682 return self._filelog.lookup(self._fileid)
682 return self._filelog.lookup(self._fileid)
683 else:
683 else:
684 return self._changectx.filenode(self._path)
684 return self._changectx.filenode(self._path)
685
685
686 @propertycache
686 @propertycache
687 def _filerev(self):
687 def _filerev(self):
688 return self._filelog.rev(self._filenode)
688 return self._filelog.rev(self._filenode)
689
689
690 @propertycache
690 @propertycache
691 def _repopath(self):
691 def _repopath(self):
692 return self._path
692 return self._path
693
693
694 def __nonzero__(self):
694 def __nonzero__(self):
695 try:
695 try:
696 self._filenode
696 self._filenode
697 return True
697 return True
698 except error.LookupError:
698 except error.LookupError:
699 # file is missing
699 # file is missing
700 return False
700 return False
701
701
702 def __str__(self):
702 def __str__(self):
703 return "%s@%s" % (self.path(), self._changectx)
703 return "%s@%s" % (self.path(), self._changectx)
704
704
705 def __repr__(self):
705 def __repr__(self):
706 return "<%s %s>" % (type(self).__name__, str(self))
706 return "<%s %s>" % (type(self).__name__, str(self))
707
707
708 def __hash__(self):
708 def __hash__(self):
709 try:
709 try:
710 return hash((self._path, self._filenode))
710 return hash((self._path, self._filenode))
711 except AttributeError:
711 except AttributeError:
712 return id(self)
712 return id(self)
713
713
714 def __eq__(self, other):
714 def __eq__(self, other):
715 try:
715 try:
716 return (type(self) == type(other) and self._path == other._path
716 return (type(self) == type(other) and self._path == other._path
717 and self._filenode == other._filenode)
717 and self._filenode == other._filenode)
718 except AttributeError:
718 except AttributeError:
719 return False
719 return False
720
720
721 def __ne__(self, other):
721 def __ne__(self, other):
722 return not (self == other)
722 return not (self == other)
723
723
724 def filerev(self):
724 def filerev(self):
725 return self._filerev
725 return self._filerev
726 def filenode(self):
726 def filenode(self):
727 return self._filenode
727 return self._filenode
728 def flags(self):
728 def flags(self):
729 return self._changectx.flags(self._path)
729 return self._changectx.flags(self._path)
730 def filelog(self):
730 def filelog(self):
731 return self._filelog
731 return self._filelog
732 def rev(self):
732 def rev(self):
733 return self._changeid
733 return self._changeid
734 def linkrev(self):
734 def linkrev(self):
735 return self._filelog.linkrev(self._filerev)
735 return self._filelog.linkrev(self._filerev)
736 def node(self):
736 def node(self):
737 return self._changectx.node()
737 return self._changectx.node()
738 def hex(self):
738 def hex(self):
739 return self._changectx.hex()
739 return self._changectx.hex()
740 def user(self):
740 def user(self):
741 return self._changectx.user()
741 return self._changectx.user()
742 def date(self):
742 def date(self):
743 return self._changectx.date()
743 return self._changectx.date()
744 def files(self):
744 def files(self):
745 return self._changectx.files()
745 return self._changectx.files()
746 def description(self):
746 def description(self):
747 return self._changectx.description()
747 return self._changectx.description()
748 def branch(self):
748 def branch(self):
749 return self._changectx.branch()
749 return self._changectx.branch()
750 def extra(self):
750 def extra(self):
751 return self._changectx.extra()
751 return self._changectx.extra()
752 def phase(self):
752 def phase(self):
753 return self._changectx.phase()
753 return self._changectx.phase()
754 def phasestr(self):
754 def phasestr(self):
755 return self._changectx.phasestr()
755 return self._changectx.phasestr()
756 def manifest(self):
756 def manifest(self):
757 return self._changectx.manifest()
757 return self._changectx.manifest()
758 def changectx(self):
758 def changectx(self):
759 return self._changectx
759 return self._changectx
760 def repo(self):
760 def repo(self):
761 return self._repo
761 return self._repo
762
762
763 def path(self):
763 def path(self):
764 return self._path
764 return self._path
765
765
766 def isbinary(self):
766 def isbinary(self):
767 try:
767 try:
768 return util.binary(self.data())
768 return util.binary(self.data())
769 except IOError:
769 except IOError:
770 return False
770 return False
771 def isexec(self):
771 def isexec(self):
772 return 'x' in self.flags()
772 return 'x' in self.flags()
773 def islink(self):
773 def islink(self):
774 return 'l' in self.flags()
774 return 'l' in self.flags()
775
775
776 def isabsent(self):
776 def isabsent(self):
777 """whether this filectx represents a file not in self._changectx
777 """whether this filectx represents a file not in self._changectx
778
778
779 This is mainly for merge code to detect change/delete conflicts. This is
779 This is mainly for merge code to detect change/delete conflicts. This is
780 expected to be True for all subclasses of basectx."""
780 expected to be True for all subclasses of basectx."""
781 return False
781 return False
782
782
783 _customcmp = False
783 _customcmp = False
784 def cmp(self, fctx):
784 def cmp(self, fctx):
785 """compare with other file context
785 """compare with other file context
786
786
787 returns True if different than fctx.
787 returns True if different than fctx.
788 """
788 """
789 if fctx._customcmp:
789 if fctx._customcmp:
790 return fctx.cmp(self)
790 return fctx.cmp(self)
791
791
792 if (fctx._filenode is None
792 if (fctx._filenode is None
793 and (self._repo._encodefilterpats
793 and (self._repo._encodefilterpats
794 # if file data starts with '\1\n', empty metadata block is
794 # if file data starts with '\1\n', empty metadata block is
795 # prepended, which adds 4 bytes to filelog.size().
795 # prepended, which adds 4 bytes to filelog.size().
796 or self.size() - 4 == fctx.size())
796 or self.size() - 4 == fctx.size())
797 or self.size() == fctx.size()):
797 or self.size() == fctx.size()):
798 return self._filelog.cmp(self._filenode, fctx.data())
798 return self._filelog.cmp(self._filenode, fctx.data())
799
799
800 return True
800 return True
801
801
802 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
802 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
803 """return the first ancestor of <srcrev> introducing <fnode>
803 """return the first ancestor of <srcrev> introducing <fnode>
804
804
805 If the linkrev of the file revision does not point to an ancestor of
805 If the linkrev of the file revision does not point to an ancestor of
806 srcrev, we'll walk down the ancestors until we find one introducing
806 srcrev, we'll walk down the ancestors until we find one introducing
807 this file revision.
807 this file revision.
808
808
809 :repo: a localrepository object (used to access changelog and manifest)
809 :repo: a localrepository object (used to access changelog and manifest)
810 :path: the file path
810 :path: the file path
811 :fnode: the nodeid of the file revision
811 :fnode: the nodeid of the file revision
812 :filelog: the filelog of this path
812 :filelog: the filelog of this path
813 :srcrev: the changeset revision we search ancestors from
813 :srcrev: the changeset revision we search ancestors from
814 :inclusive: if true, the src revision will also be checked
814 :inclusive: if true, the src revision will also be checked
815 """
815 """
816 repo = self._repo
816 repo = self._repo
817 cl = repo.unfiltered().changelog
817 cl = repo.unfiltered().changelog
818 ma = repo.manifest
818 ma = repo.manifest
819 # fetch the linkrev
819 # fetch the linkrev
820 fr = filelog.rev(fnode)
820 fr = filelog.rev(fnode)
821 lkr = filelog.linkrev(fr)
821 lkr = filelog.linkrev(fr)
822 # hack to reuse ancestor computation when searching for renames
822 # hack to reuse ancestor computation when searching for renames
823 memberanc = getattr(self, '_ancestrycontext', None)
823 memberanc = getattr(self, '_ancestrycontext', None)
824 iteranc = None
824 iteranc = None
825 if srcrev is None:
825 if srcrev is None:
826 # wctx case, used by workingfilectx during mergecopy
826 # wctx case, used by workingfilectx during mergecopy
827 revs = [p.rev() for p in self._repo[None].parents()]
827 revs = [p.rev() for p in self._repo[None].parents()]
828 inclusive = True # we skipped the real (revless) source
828 inclusive = True # we skipped the real (revless) source
829 else:
829 else:
830 revs = [srcrev]
830 revs = [srcrev]
831 if memberanc is None:
831 if memberanc is None:
832 memberanc = iteranc = cl.ancestors(revs, lkr,
832 memberanc = iteranc = cl.ancestors(revs, lkr,
833 inclusive=inclusive)
833 inclusive=inclusive)
834 # check if this linkrev is an ancestor of srcrev
834 # check if this linkrev is an ancestor of srcrev
835 if lkr not in memberanc:
835 if lkr not in memberanc:
836 if iteranc is None:
836 if iteranc is None:
837 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
837 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
838 for a in iteranc:
838 for a in iteranc:
839 ac = cl.read(a) # get changeset data (we avoid object creation)
839 ac = cl.read(a) # get changeset data (we avoid object creation)
840 if path in ac[3]: # checking the 'files' field.
840 if path in ac[3]: # checking the 'files' field.
841 # The file has been touched, check if the content is
841 # The file has been touched, check if the content is
842 # similar to the one we search for.
842 # similar to the one we search for.
843 if fnode == ma.readfast(ac[0]).get(path):
843 if fnode == ma.readfast(ac[0]).get(path):
844 return a
844 return a
845 # In theory, we should never get out of that loop without a result.
845 # In theory, we should never get out of that loop without a result.
846 # But if manifest uses a buggy file revision (not children of the
846 # But if manifest uses a buggy file revision (not children of the
847 # one it replaces) we could. Such a buggy situation will likely
847 # one it replaces) we could. Such a buggy situation will likely
848 # result is crash somewhere else at to some point.
848 # result is crash somewhere else at to some point.
849 return lkr
849 return lkr
850
850
851 def introrev(self):
851 def introrev(self):
852 """return the rev of the changeset which introduced this file revision
852 """return the rev of the changeset which introduced this file revision
853
853
854 This method is different from linkrev because it take into account the
854 This method is different from linkrev because it take into account the
855 changeset the filectx was created from. It ensures the returned
855 changeset the filectx was created from. It ensures the returned
856 revision is one of its ancestors. This prevents bugs from
856 revision is one of its ancestors. This prevents bugs from
857 'linkrev-shadowing' when a file revision is used by multiple
857 'linkrev-shadowing' when a file revision is used by multiple
858 changesets.
858 changesets.
859 """
859 """
860 lkr = self.linkrev()
860 lkr = self.linkrev()
861 attrs = vars(self)
861 attrs = vars(self)
862 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
862 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
863 if noctx or self.rev() == lkr:
863 if noctx or self.rev() == lkr:
864 return self.linkrev()
864 return self.linkrev()
865 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
865 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
866 self.rev(), inclusive=True)
866 self.rev(), inclusive=True)
867
867
868 def _parentfilectx(self, path, fileid, filelog):
868 def _parentfilectx(self, path, fileid, filelog):
869 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
869 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
870 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
870 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
871 if '_changeid' in vars(self) or '_changectx' in vars(self):
871 if '_changeid' in vars(self) or '_changectx' in vars(self):
872 # If self is associated with a changeset (probably explicitly
872 # If self is associated with a changeset (probably explicitly
873 # fed), ensure the created filectx is associated with a
873 # fed), ensure the created filectx is associated with a
874 # changeset that is an ancestor of self.changectx.
874 # changeset that is an ancestor of self.changectx.
875 # This lets us later use _adjustlinkrev to get a correct link.
875 # This lets us later use _adjustlinkrev to get a correct link.
876 fctx._descendantrev = self.rev()
876 fctx._descendantrev = self.rev()
877 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
877 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
878 elif '_descendantrev' in vars(self):
878 elif '_descendantrev' in vars(self):
879 # Otherwise propagate _descendantrev if we have one associated.
879 # Otherwise propagate _descendantrev if we have one associated.
880 fctx._descendantrev = self._descendantrev
880 fctx._descendantrev = self._descendantrev
881 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
881 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
882 return fctx
882 return fctx
883
883
884 def parents(self):
884 def parents(self):
885 _path = self._path
885 _path = self._path
886 fl = self._filelog
886 fl = self._filelog
887 parents = self._filelog.parents(self._filenode)
887 parents = self._filelog.parents(self._filenode)
888 pl = [(_path, node, fl) for node in parents if node != nullid]
888 pl = [(_path, node, fl) for node in parents if node != nullid]
889
889
890 r = fl.renamed(self._filenode)
890 r = fl.renamed(self._filenode)
891 if r:
891 if r:
892 # - In the simple rename case, both parent are nullid, pl is empty.
892 # - In the simple rename case, both parent are nullid, pl is empty.
893 # - In case of merge, only one of the parent is null id and should
893 # - In case of merge, only one of the parent is null id and should
894 # be replaced with the rename information. This parent is -always-
894 # be replaced with the rename information. This parent is -always-
895 # the first one.
895 # the first one.
896 #
896 #
897 # As null id have always been filtered out in the previous list
897 # As null id have always been filtered out in the previous list
898 # comprehension, inserting to 0 will always result in "replacing
898 # comprehension, inserting to 0 will always result in "replacing
899 # first nullid parent with rename information.
899 # first nullid parent with rename information.
900 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
900 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
901
901
902 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
902 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
903
903
904 def p1(self):
904 def p1(self):
905 return self.parents()[0]
905 return self.parents()[0]
906
906
907 def p2(self):
907 def p2(self):
908 p = self.parents()
908 p = self.parents()
909 if len(p) == 2:
909 if len(p) == 2:
910 return p[1]
910 return p[1]
911 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
911 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
912
912
913 def annotate(self, follow=False, linenumber=None, diffopts=None):
913 def annotate(self, follow=False, linenumber=None, diffopts=None):
914 '''returns a list of tuples of (ctx, line) for each line
914 '''returns a list of tuples of (ctx, line) for each line
915 in the file, where ctx is the filectx of the node where
915 in the file, where ctx is the filectx of the node where
916 that line was last changed.
916 that line was last changed.
917 This returns tuples of ((ctx, linenumber), line) for each line,
917 This returns tuples of ((ctx, linenumber), line) for each line,
918 if "linenumber" parameter is NOT "None".
918 if "linenumber" parameter is NOT "None".
919 In such tuples, linenumber means one at the first appearance
919 In such tuples, linenumber means one at the first appearance
920 in the managed file.
920 in the managed file.
921 To reduce annotation cost,
921 To reduce annotation cost,
922 this returns fixed value(False is used) as linenumber,
922 this returns fixed value(False is used) as linenumber,
923 if "linenumber" parameter is "False".'''
923 if "linenumber" parameter is "False".'''
924
924
925 if linenumber is None:
925 if linenumber is None:
926 def decorate(text, rev):
926 def decorate(text, rev):
927 return ([rev] * len(text.splitlines()), text)
927 return ([rev] * len(text.splitlines()), text)
928 elif linenumber:
928 elif linenumber:
929 def decorate(text, rev):
929 def decorate(text, rev):
930 size = len(text.splitlines())
930 size = len(text.splitlines())
931 return ([(rev, i) for i in xrange(1, size + 1)], text)
931 return ([(rev, i) for i in xrange(1, size + 1)], text)
932 else:
932 else:
933 def decorate(text, rev):
933 def decorate(text, rev):
934 return ([(rev, False)] * len(text.splitlines()), text)
934 return ([(rev, False)] * len(text.splitlines()), text)
935
935
936 def pair(parent, child):
936 def pair(parent, child):
937 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
937 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
938 refine=True)
938 refine=True)
939 for (a1, a2, b1, b2), t in blocks:
939 for (a1, a2, b1, b2), t in blocks:
940 # Changed blocks ('!') or blocks made only of blank lines ('~')
940 # Changed blocks ('!') or blocks made only of blank lines ('~')
941 # belong to the child.
941 # belong to the child.
942 if t == '=':
942 if t == '=':
943 child[0][b1:b2] = parent[0][a1:a2]
943 child[0][b1:b2] = parent[0][a1:a2]
944 return child
944 return child
945
945
946 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
946 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
947
947
948 def parents(f):
948 def parents(f):
949 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
949 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
950 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
950 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
951 # from the topmost introrev (= srcrev) down to p.linkrev() if it
951 # from the topmost introrev (= srcrev) down to p.linkrev() if it
952 # isn't an ancestor of the srcrev.
952 # isn't an ancestor of the srcrev.
953 f._changeid
953 f._changeid
954 pl = f.parents()
954 pl = f.parents()
955
955
956 # Don't return renamed parents if we aren't following.
956 # Don't return renamed parents if we aren't following.
957 if not follow:
957 if not follow:
958 pl = [p for p in pl if p.path() == f.path()]
958 pl = [p for p in pl if p.path() == f.path()]
959
959
960 # renamed filectx won't have a filelog yet, so set it
960 # renamed filectx won't have a filelog yet, so set it
961 # from the cache to save time
961 # from the cache to save time
962 for p in pl:
962 for p in pl:
963 if not '_filelog' in p.__dict__:
963 if not '_filelog' in p.__dict__:
964 p._filelog = getlog(p.path())
964 p._filelog = getlog(p.path())
965
965
966 return pl
966 return pl
967
967
968 # use linkrev to find the first changeset where self appeared
968 # use linkrev to find the first changeset where self appeared
969 base = self
969 base = self
970 introrev = self.introrev()
970 introrev = self.introrev()
971 if self.rev() != introrev:
971 if self.rev() != introrev:
972 base = self.filectx(self.filenode(), changeid=introrev)
972 base = self.filectx(self.filenode(), changeid=introrev)
973 if getattr(base, '_ancestrycontext', None) is None:
973 if getattr(base, '_ancestrycontext', None) is None:
974 cl = self._repo.changelog
974 cl = self._repo.changelog
975 if introrev is None:
975 if introrev is None:
976 # wctx is not inclusive, but works because _ancestrycontext
976 # wctx is not inclusive, but works because _ancestrycontext
977 # is used to test filelog revisions
977 # is used to test filelog revisions
978 ac = cl.ancestors([p.rev() for p in base.parents()],
978 ac = cl.ancestors([p.rev() for p in base.parents()],
979 inclusive=True)
979 inclusive=True)
980 else:
980 else:
981 ac = cl.ancestors([introrev], inclusive=True)
981 ac = cl.ancestors([introrev], inclusive=True)
982 base._ancestrycontext = ac
982 base._ancestrycontext = ac
983
983
984 # This algorithm would prefer to be recursive, but Python is a
984 # This algorithm would prefer to be recursive, but Python is a
985 # bit recursion-hostile. Instead we do an iterative
985 # bit recursion-hostile. Instead we do an iterative
986 # depth-first search.
986 # depth-first search.
987
987
988 visit = [base]
988 visit = [base]
989 hist = {}
989 hist = {}
990 pcache = {}
990 pcache = {}
991 needed = {base: 1}
991 needed = {base: 1}
992 while visit:
992 while visit:
993 f = visit[-1]
993 f = visit[-1]
994 pcached = f in pcache
994 pcached = f in pcache
995 if not pcached:
995 if not pcached:
996 pcache[f] = parents(f)
996 pcache[f] = parents(f)
997
997
998 ready = True
998 ready = True
999 pl = pcache[f]
999 pl = pcache[f]
1000 for p in pl:
1000 for p in pl:
1001 if p not in hist:
1001 if p not in hist:
1002 ready = False
1002 ready = False
1003 visit.append(p)
1003 visit.append(p)
1004 if not pcached:
1004 if not pcached:
1005 needed[p] = needed.get(p, 0) + 1
1005 needed[p] = needed.get(p, 0) + 1
1006 if ready:
1006 if ready:
1007 visit.pop()
1007 visit.pop()
1008 reusable = f in hist
1008 reusable = f in hist
1009 if reusable:
1009 if reusable:
1010 curr = hist[f]
1010 curr = hist[f]
1011 else:
1011 else:
1012 curr = decorate(f.data(), f)
1012 curr = decorate(f.data(), f)
1013 for p in pl:
1013 for p in pl:
1014 if not reusable:
1014 if not reusable:
1015 curr = pair(hist[p], curr)
1015 curr = pair(hist[p], curr)
1016 if needed[p] == 1:
1016 if needed[p] == 1:
1017 del hist[p]
1017 del hist[p]
1018 del needed[p]
1018 del needed[p]
1019 else:
1019 else:
1020 needed[p] -= 1
1020 needed[p] -= 1
1021
1021
1022 hist[f] = curr
1022 hist[f] = curr
1023 pcache[f] = []
1023 pcache[f] = []
1024
1024
1025 return zip(hist[base][0], hist[base][1].splitlines(True))
1025 return zip(hist[base][0], hist[base][1].splitlines(True))
1026
1026
1027 def ancestors(self, followfirst=False):
1027 def ancestors(self, followfirst=False):
1028 visit = {}
1028 visit = {}
1029 c = self
1029 c = self
1030 if followfirst:
1030 if followfirst:
1031 cut = 1
1031 cut = 1
1032 else:
1032 else:
1033 cut = None
1033 cut = None
1034
1034
1035 while True:
1035 while True:
1036 for parent in c.parents()[:cut]:
1036 for parent in c.parents()[:cut]:
1037 visit[(parent.linkrev(), parent.filenode())] = parent
1037 visit[(parent.linkrev(), parent.filenode())] = parent
1038 if not visit:
1038 if not visit:
1039 break
1039 break
1040 c = visit.pop(max(visit))
1040 c = visit.pop(max(visit))
1041 yield c
1041 yield c
1042
1042
1043 class filectx(basefilectx):
1043 class filectx(basefilectx):
1044 """A filecontext object makes access to data related to a particular
1044 """A filecontext object makes access to data related to a particular
1045 filerevision convenient."""
1045 filerevision convenient."""
1046 def __init__(self, repo, path, changeid=None, fileid=None,
1046 def __init__(self, repo, path, changeid=None, fileid=None,
1047 filelog=None, changectx=None):
1047 filelog=None, changectx=None):
1048 """changeid can be a changeset revision, node, or tag.
1048 """changeid can be a changeset revision, node, or tag.
1049 fileid can be a file revision or node."""
1049 fileid can be a file revision or node."""
1050 self._repo = repo
1050 self._repo = repo
1051 self._path = path
1051 self._path = path
1052
1052
1053 assert (changeid is not None
1053 assert (changeid is not None
1054 or fileid is not None
1054 or fileid is not None
1055 or changectx is not None), \
1055 or changectx is not None), \
1056 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1056 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1057 % (changeid, fileid, changectx))
1057 % (changeid, fileid, changectx))
1058
1058
1059 if filelog is not None:
1059 if filelog is not None:
1060 self._filelog = filelog
1060 self._filelog = filelog
1061
1061
1062 if changeid is not None:
1062 if changeid is not None:
1063 self._changeid = changeid
1063 self._changeid = changeid
1064 if changectx is not None:
1064 if changectx is not None:
1065 self._changectx = changectx
1065 self._changectx = changectx
1066 if fileid is not None:
1066 if fileid is not None:
1067 self._fileid = fileid
1067 self._fileid = fileid
1068
1068
1069 @propertycache
1069 @propertycache
1070 def _changectx(self):
1070 def _changectx(self):
1071 try:
1071 try:
1072 return changectx(self._repo, self._changeid)
1072 return changectx(self._repo, self._changeid)
1073 except error.FilteredRepoLookupError:
1073 except error.FilteredRepoLookupError:
1074 # Linkrev may point to any revision in the repository. When the
1074 # Linkrev may point to any revision in the repository. When the
1075 # repository is filtered this may lead to `filectx` trying to build
1075 # repository is filtered this may lead to `filectx` trying to build
1076 # `changectx` for filtered revision. In such case we fallback to
1076 # `changectx` for filtered revision. In such case we fallback to
1077 # creating `changectx` on the unfiltered version of the reposition.
1077 # creating `changectx` on the unfiltered version of the reposition.
1078 # This fallback should not be an issue because `changectx` from
1078 # This fallback should not be an issue because `changectx` from
1079 # `filectx` are not used in complex operations that care about
1079 # `filectx` are not used in complex operations that care about
1080 # filtering.
1080 # filtering.
1081 #
1081 #
1082 # This fallback is a cheap and dirty fix that prevent several
1082 # This fallback is a cheap and dirty fix that prevent several
1083 # crashes. It does not ensure the behavior is correct. However the
1083 # crashes. It does not ensure the behavior is correct. However the
1084 # behavior was not correct before filtering either and "incorrect
1084 # behavior was not correct before filtering either and "incorrect
1085 # behavior" is seen as better as "crash"
1085 # behavior" is seen as better as "crash"
1086 #
1086 #
1087 # Linkrevs have several serious troubles with filtering that are
1087 # Linkrevs have several serious troubles with filtering that are
1088 # complicated to solve. Proper handling of the issue here should be
1088 # complicated to solve. Proper handling of the issue here should be
1089 # considered when solving linkrev issue are on the table.
1089 # considered when solving linkrev issue are on the table.
1090 return changectx(self._repo.unfiltered(), self._changeid)
1090 return changectx(self._repo.unfiltered(), self._changeid)
1091
1091
1092 def filectx(self, fileid, changeid=None):
1092 def filectx(self, fileid, changeid=None):
1093 '''opens an arbitrary revision of the file without
1093 '''opens an arbitrary revision of the file without
1094 opening a new filelog'''
1094 opening a new filelog'''
1095 return filectx(self._repo, self._path, fileid=fileid,
1095 return filectx(self._repo, self._path, fileid=fileid,
1096 filelog=self._filelog, changeid=changeid)
1096 filelog=self._filelog, changeid=changeid)
1097
1097
1098 def data(self):
1098 def data(self):
1099 try:
1099 try:
1100 return self._filelog.read(self._filenode)
1100 return self._filelog.read(self._filenode)
1101 except error.CensoredNodeError:
1101 except error.CensoredNodeError:
1102 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1102 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1103 return ""
1103 return ""
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1105 hint=_("set censor.policy to ignore errors"))
1105 hint=_("set censor.policy to ignore errors"))
1106
1106
1107 def size(self):
1107 def size(self):
1108 return self._filelog.size(self._filerev)
1108 return self._filelog.size(self._filerev)
1109
1109
1110 def renamed(self):
1110 def renamed(self):
1111 """check if file was actually renamed in this changeset revision
1111 """check if file was actually renamed in this changeset revision
1112
1112
1113 If rename logged in file revision, we report copy for changeset only
1113 If rename logged in file revision, we report copy for changeset only
1114 if file revisions linkrev points back to the changeset in question
1114 if file revisions linkrev points back to the changeset in question
1115 or both changeset parents contain different file revisions.
1115 or both changeset parents contain different file revisions.
1116 """
1116 """
1117
1117
1118 renamed = self._filelog.renamed(self._filenode)
1118 renamed = self._filelog.renamed(self._filenode)
1119 if not renamed:
1119 if not renamed:
1120 return renamed
1120 return renamed
1121
1121
1122 if self.rev() == self.linkrev():
1122 if self.rev() == self.linkrev():
1123 return renamed
1123 return renamed
1124
1124
1125 name = self.path()
1125 name = self.path()
1126 fnode = self._filenode
1126 fnode = self._filenode
1127 for p in self._changectx.parents():
1127 for p in self._changectx.parents():
1128 try:
1128 try:
1129 if fnode == p.filenode(name):
1129 if fnode == p.filenode(name):
1130 return None
1130 return None
1131 except error.LookupError:
1131 except error.LookupError:
1132 pass
1132 pass
1133 return renamed
1133 return renamed
1134
1134
1135 def children(self):
1135 def children(self):
1136 # hard for renames
1136 # hard for renames
1137 c = self._filelog.children(self._filenode)
1137 c = self._filelog.children(self._filenode)
1138 return [filectx(self._repo, self._path, fileid=x,
1138 return [filectx(self._repo, self._path, fileid=x,
1139 filelog=self._filelog) for x in c]
1139 filelog=self._filelog) for x in c]
1140
1140
1141 class committablectx(basectx):
1141 class committablectx(basectx):
1142 """A committablectx object provides common functionality for a context that
1142 """A committablectx object provides common functionality for a context that
1143 wants the ability to commit, e.g. workingctx or memctx."""
1143 wants the ability to commit, e.g. workingctx or memctx."""
1144 def __init__(self, repo, text="", user=None, date=None, extra=None,
1144 def __init__(self, repo, text="", user=None, date=None, extra=None,
1145 changes=None):
1145 changes=None):
1146 self._repo = repo
1146 self._repo = repo
1147 self._rev = None
1147 self._rev = None
1148 self._node = None
1148 self._node = None
1149 self._text = text
1149 self._text = text
1150 if date:
1150 if date:
1151 self._date = util.parsedate(date)
1151 self._date = util.parsedate(date)
1152 if user:
1152 if user:
1153 self._user = user
1153 self._user = user
1154 if changes:
1154 if changes:
1155 self._status = changes
1155 self._status = changes
1156
1156
1157 self._extra = {}
1157 self._extra = {}
1158 if extra:
1158 if extra:
1159 self._extra = extra.copy()
1159 self._extra = extra.copy()
1160 if 'branch' not in self._extra:
1160 if 'branch' not in self._extra:
1161 try:
1161 try:
1162 branch = encoding.fromlocal(self._repo.dirstate.branch())
1162 branch = encoding.fromlocal(self._repo.dirstate.branch())
1163 except UnicodeDecodeError:
1163 except UnicodeDecodeError:
1164 raise error.Abort(_('branch name not in UTF-8!'))
1164 raise error.Abort(_('branch name not in UTF-8!'))
1165 self._extra['branch'] = branch
1165 self._extra['branch'] = branch
1166 if self._extra['branch'] == '':
1166 if self._extra['branch'] == '':
1167 self._extra['branch'] = 'default'
1167 self._extra['branch'] = 'default'
1168
1168
1169 def __str__(self):
1169 def __str__(self):
1170 return str(self._parents[0]) + "+"
1170 return str(self._parents[0]) + "+"
1171
1171
1172 def __nonzero__(self):
1172 def __nonzero__(self):
1173 return True
1173 return True
1174
1174
1175 def _buildflagfunc(self):
1175 def _buildflagfunc(self):
1176 # Create a fallback function for getting file flags when the
1176 # Create a fallback function for getting file flags when the
1177 # filesystem doesn't support them
1177 # filesystem doesn't support them
1178
1178
1179 copiesget = self._repo.dirstate.copies().get
1179 copiesget = self._repo.dirstate.copies().get
1180 parents = self.parents()
1180 parents = self.parents()
1181 if len(parents) < 2:
1181 if len(parents) < 2:
1182 # when we have one parent, it's easy: copy from parent
1182 # when we have one parent, it's easy: copy from parent
1183 man = parents[0].manifest()
1183 man = parents[0].manifest()
1184 def func(f):
1184 def func(f):
1185 f = copiesget(f, f)
1185 f = copiesget(f, f)
1186 return man.flags(f)
1186 return man.flags(f)
1187 else:
1187 else:
1188 # merges are tricky: we try to reconstruct the unstored
1188 # merges are tricky: we try to reconstruct the unstored
1189 # result from the merge (issue1802)
1189 # result from the merge (issue1802)
1190 p1, p2 = parents
1190 p1, p2 = parents
1191 pa = p1.ancestor(p2)
1191 pa = p1.ancestor(p2)
1192 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1192 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1193
1193
1194 def func(f):
1194 def func(f):
1195 f = copiesget(f, f) # may be wrong for merges with copies
1195 f = copiesget(f, f) # may be wrong for merges with copies
1196 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1196 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1197 if fl1 == fl2:
1197 if fl1 == fl2:
1198 return fl1
1198 return fl1
1199 if fl1 == fla:
1199 if fl1 == fla:
1200 return fl2
1200 return fl2
1201 if fl2 == fla:
1201 if fl2 == fla:
1202 return fl1
1202 return fl1
1203 return '' # punt for conflicts
1203 return '' # punt for conflicts
1204
1204
1205 return func
1205 return func
1206
1206
1207 @propertycache
1207 @propertycache
1208 def _flagfunc(self):
1208 def _flagfunc(self):
1209 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1209 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1210
1210
1211 @propertycache
1211 @propertycache
1212 def _manifest(self):
1212 def _manifest(self):
1213 """generate a manifest corresponding to the values in self._status
1213 """generate a manifest corresponding to the values in self._status
1214
1214
1215 This reuse the file nodeid from parent, but we append an extra letter
1215 This reuse the file nodeid from parent, but we append an extra letter
1216 when modified. Modified files get an extra 'm' while added files get
1216 when modified. Modified files get an extra 'm' while added files get
1217 an extra 'a'. This is used by manifests merge to see that files
1217 an extra 'a'. This is used by manifests merge to see that files
1218 are different and by update logic to avoid deleting newly added files.
1218 are different and by update logic to avoid deleting newly added files.
1219 """
1219 """
1220 parents = self.parents()
1220 parents = self.parents()
1221
1221
1222 man1 = parents[0].manifest()
1222 man1 = parents[0].manifest()
1223 man = man1.copy()
1223 man = man1.copy()
1224 if len(parents) > 1:
1224 if len(parents) > 1:
1225 man2 = self.p2().manifest()
1225 man2 = self.p2().manifest()
1226 def getman(f):
1226 def getman(f):
1227 if f in man1:
1227 if f in man1:
1228 return man1
1228 return man1
1229 return man2
1229 return man2
1230 else:
1230 else:
1231 getman = lambda f: man1
1231 getman = lambda f: man1
1232
1232
1233 copied = self._repo.dirstate.copies()
1233 copied = self._repo.dirstate.copies()
1234 ff = self._flagfunc
1234 ff = self._flagfunc
1235 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1235 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1236 for f in l:
1236 for f in l:
1237 orig = copied.get(f, f)
1237 orig = copied.get(f, f)
1238 man[f] = getman(orig).get(orig, nullid) + i
1238 man[f] = getman(orig).get(orig, nullid) + i
1239 try:
1239 try:
1240 man.setflag(f, ff(f))
1240 man.setflag(f, ff(f))
1241 except OSError:
1241 except OSError:
1242 pass
1242 pass
1243
1243
1244 for f in self._status.deleted + self._status.removed:
1244 for f in self._status.deleted + self._status.removed:
1245 if f in man:
1245 if f in man:
1246 del man[f]
1246 del man[f]
1247
1247
1248 return man
1248 return man
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _status(self):
1251 def _status(self):
1252 return self._repo.status()
1252 return self._repo.status()
1253
1253
1254 @propertycache
1254 @propertycache
1255 def _user(self):
1255 def _user(self):
1256 return self._repo.ui.username()
1256 return self._repo.ui.username()
1257
1257
1258 @propertycache
1258 @propertycache
1259 def _date(self):
1259 def _date(self):
1260 return util.makedate()
1260 return util.makedate()
1261
1261
1262 def subrev(self, subpath):
1262 def subrev(self, subpath):
1263 return None
1263 return None
1264
1264
1265 def manifestnode(self):
1265 def manifestnode(self):
1266 return None
1266 return None
1267 def user(self):
1267 def user(self):
1268 return self._user or self._repo.ui.username()
1268 return self._user or self._repo.ui.username()
1269 def date(self):
1269 def date(self):
1270 return self._date
1270 return self._date
1271 def description(self):
1271 def description(self):
1272 return self._text
1272 return self._text
1273 def files(self):
1273 def files(self):
1274 return sorted(self._status.modified + self._status.added +
1274 return sorted(self._status.modified + self._status.added +
1275 self._status.removed)
1275 self._status.removed)
1276
1276
1277 def modified(self):
1277 def modified(self):
1278 return self._status.modified
1278 return self._status.modified
1279 def added(self):
1279 def added(self):
1280 return self._status.added
1280 return self._status.added
1281 def removed(self):
1281 def removed(self):
1282 return self._status.removed
1282 return self._status.removed
1283 def deleted(self):
1283 def deleted(self):
1284 return self._status.deleted
1284 return self._status.deleted
1285 def branch(self):
1285 def branch(self):
1286 return encoding.tolocal(self._extra['branch'])
1286 return encoding.tolocal(self._extra['branch'])
1287 def closesbranch(self):
1287 def closesbranch(self):
1288 return 'close' in self._extra
1288 return 'close' in self._extra
1289 def extra(self):
1289 def extra(self):
1290 return self._extra
1290 return self._extra
1291
1291
1292 def tags(self):
1292 def tags(self):
1293 return []
1293 return []
1294
1294
1295 def bookmarks(self):
1295 def bookmarks(self):
1296 b = []
1296 b = []
1297 for p in self.parents():
1297 for p in self.parents():
1298 b.extend(p.bookmarks())
1298 b.extend(p.bookmarks())
1299 return b
1299 return b
1300
1300
1301 def phase(self):
1301 def phase(self):
1302 phase = phases.draft # default phase to draft
1302 phase = phases.draft # default phase to draft
1303 for p in self.parents():
1303 for p in self.parents():
1304 phase = max(phase, p.phase())
1304 phase = max(phase, p.phase())
1305 return phase
1305 return phase
1306
1306
1307 def hidden(self):
1307 def hidden(self):
1308 return False
1308 return False
1309
1309
1310 def children(self):
1310 def children(self):
1311 return []
1311 return []
1312
1312
1313 def flags(self, path):
1313 def flags(self, path):
1314 if '_manifest' in self.__dict__:
1314 if '_manifest' in self.__dict__:
1315 try:
1315 try:
1316 return self._manifest.flags(path)
1316 return self._manifest.flags(path)
1317 except KeyError:
1317 except KeyError:
1318 return ''
1318 return ''
1319
1319
1320 try:
1320 try:
1321 return self._flagfunc(path)
1321 return self._flagfunc(path)
1322 except OSError:
1322 except OSError:
1323 return ''
1323 return ''
1324
1324
1325 def ancestor(self, c2):
1325 def ancestor(self, c2):
1326 """return the "best" ancestor context of self and c2"""
1326 """return the "best" ancestor context of self and c2"""
1327 return self._parents[0].ancestor(c2) # punt on two parents for now
1327 return self._parents[0].ancestor(c2) # punt on two parents for now
1328
1328
1329 def walk(self, match):
1329 def walk(self, match):
1330 '''Generates matching file names.'''
1330 '''Generates matching file names.'''
1331 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1331 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1332 True, False))
1332 True, False))
1333
1333
1334 def matches(self, match):
1334 def matches(self, match):
1335 return sorted(self._repo.dirstate.matches(match))
1335 return sorted(self._repo.dirstate.matches(match))
1336
1336
1337 def ancestors(self):
1337 def ancestors(self):
1338 for p in self._parents:
1338 for p in self._parents:
1339 yield p
1339 yield p
1340 for a in self._repo.changelog.ancestors(
1340 for a in self._repo.changelog.ancestors(
1341 [p.rev() for p in self._parents]):
1341 [p.rev() for p in self._parents]):
1342 yield changectx(self._repo, a)
1342 yield changectx(self._repo, a)
1343
1343
1344 def markcommitted(self, node):
1344 def markcommitted(self, node):
1345 """Perform post-commit cleanup necessary after committing this ctx
1345 """Perform post-commit cleanup necessary after committing this ctx
1346
1346
1347 Specifically, this updates backing stores this working context
1347 Specifically, this updates backing stores this working context
1348 wraps to reflect the fact that the changes reflected by this
1348 wraps to reflect the fact that the changes reflected by this
1349 workingctx have been committed. For example, it marks
1349 workingctx have been committed. For example, it marks
1350 modified and added files as normal in the dirstate.
1350 modified and added files as normal in the dirstate.
1351
1351
1352 """
1352 """
1353
1353
1354 self._repo.dirstate.beginparentchange()
1354 self._repo.dirstate.beginparentchange()
1355 for f in self.modified() + self.added():
1355 for f in self.modified() + self.added():
1356 self._repo.dirstate.normal(f)
1356 self._repo.dirstate.normal(f)
1357 for f in self.removed():
1357 for f in self.removed():
1358 self._repo.dirstate.drop(f)
1358 self._repo.dirstate.drop(f)
1359 self._repo.dirstate.setparents(node)
1359 self._repo.dirstate.setparents(node)
1360 self._repo.dirstate.endparentchange()
1360 self._repo.dirstate.endparentchange()
1361
1361
1362 # write changes out explicitly, because nesting wlock at
1362 # write changes out explicitly, because nesting wlock at
1363 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1363 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1364 # from immediately doing so for subsequent changing files
1364 # from immediately doing so for subsequent changing files
1365 self._repo.dirstate.write(self._repo.currenttransaction())
1365 self._repo.dirstate.write(self._repo.currenttransaction())
1366
1366
1367 class workingctx(committablectx):
1367 class workingctx(committablectx):
1368 """A workingctx object makes access to data related to
1368 """A workingctx object makes access to data related to
1369 the current working directory convenient.
1369 the current working directory convenient.
1370 date - any valid date string or (unixtime, offset), or None.
1370 date - any valid date string or (unixtime, offset), or None.
1371 user - username string, or None.
1371 user - username string, or None.
1372 extra - a dictionary of extra values, or None.
1372 extra - a dictionary of extra values, or None.
1373 changes - a list of file lists as returned by localrepo.status()
1373 changes - a list of file lists as returned by localrepo.status()
1374 or None to use the repository status.
1374 or None to use the repository status.
1375 """
1375 """
1376 def __init__(self, repo, text="", user=None, date=None, extra=None,
1376 def __init__(self, repo, text="", user=None, date=None, extra=None,
1377 changes=None):
1377 changes=None):
1378 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1378 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1379
1379
1380 def __iter__(self):
1380 def __iter__(self):
1381 d = self._repo.dirstate
1381 d = self._repo.dirstate
1382 for f in d:
1382 for f in d:
1383 if d[f] != 'r':
1383 if d[f] != 'r':
1384 yield f
1384 yield f
1385
1385
1386 def __contains__(self, key):
1386 def __contains__(self, key):
1387 return self._repo.dirstate[key] not in "?r"
1387 return self._repo.dirstate[key] not in "?r"
1388
1388
1389 def hex(self):
1389 def hex(self):
1390 return hex(wdirid)
1390 return hex(wdirid)
1391
1391
1392 @propertycache
1392 @propertycache
1393 def _parents(self):
1393 def _parents(self):
1394 p = self._repo.dirstate.parents()
1394 p = self._repo.dirstate.parents()
1395 if p[1] == nullid:
1395 if p[1] == nullid:
1396 p = p[:-1]
1396 p = p[:-1]
1397 return [changectx(self._repo, x) for x in p]
1397 return [changectx(self._repo, x) for x in p]
1398
1398
1399 def filectx(self, path, filelog=None):
1399 def filectx(self, path, filelog=None):
1400 """get a file context from the working directory"""
1400 """get a file context from the working directory"""
1401 return workingfilectx(self._repo, path, workingctx=self,
1401 return workingfilectx(self._repo, path, workingctx=self,
1402 filelog=filelog)
1402 filelog=filelog)
1403
1403
1404 def dirty(self, missing=False, merge=True, branch=True):
1404 def dirty(self, missing=False, merge=True, branch=True):
1405 "check whether a working directory is modified"
1405 "check whether a working directory is modified"
1406 # check subrepos first
1406 # check subrepos first
1407 for s in sorted(self.substate):
1407 for s in sorted(self.substate):
1408 if self.sub(s).dirty():
1408 if self.sub(s).dirty():
1409 return True
1409 return True
1410 # check current working dir
1410 # check current working dir
1411 return ((merge and self.p2()) or
1411 return ((merge and self.p2()) or
1412 (branch and self.branch() != self.p1().branch()) or
1412 (branch and self.branch() != self.p1().branch()) or
1413 self.modified() or self.added() or self.removed() or
1413 self.modified() or self.added() or self.removed() or
1414 (missing and self.deleted()))
1414 (missing and self.deleted()))
1415
1415
1416 def add(self, list, prefix=""):
1416 def add(self, list, prefix=""):
1417 join = lambda f: os.path.join(prefix, f)
1417 join = lambda f: os.path.join(prefix, f)
1418 with self._repo.wlock():
1418 with self._repo.wlock():
1419 ui, ds = self._repo.ui, self._repo.dirstate
1419 ui, ds = self._repo.ui, self._repo.dirstate
1420 rejected = []
1420 rejected = []
1421 lstat = self._repo.wvfs.lstat
1421 lstat = self._repo.wvfs.lstat
1422 for f in list:
1422 for f in list:
1423 scmutil.checkportable(ui, join(f))
1423 scmutil.checkportable(ui, join(f))
1424 try:
1424 try:
1425 st = lstat(f)
1425 st = lstat(f)
1426 except OSError:
1426 except OSError:
1427 ui.warn(_("%s does not exist!\n") % join(f))
1427 ui.warn(_("%s does not exist!\n") % join(f))
1428 rejected.append(f)
1428 rejected.append(f)
1429 continue
1429 continue
1430 if st.st_size > 10000000:
1430 if st.st_size > 10000000:
1431 ui.warn(_("%s: up to %d MB of RAM may be required "
1431 ui.warn(_("%s: up to %d MB of RAM may be required "
1432 "to manage this file\n"
1432 "to manage this file\n"
1433 "(use 'hg revert %s' to cancel the "
1433 "(use 'hg revert %s' to cancel the "
1434 "pending addition)\n")
1434 "pending addition)\n")
1435 % (f, 3 * st.st_size // 1000000, join(f)))
1435 % (f, 3 * st.st_size // 1000000, join(f)))
1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1437 ui.warn(_("%s not added: only files and symlinks "
1437 ui.warn(_("%s not added: only files and symlinks "
1438 "supported currently\n") % join(f))
1438 "supported currently\n") % join(f))
1439 rejected.append(f)
1439 rejected.append(f)
1440 elif ds[f] in 'amn':
1440 elif ds[f] in 'amn':
1441 ui.warn(_("%s already tracked!\n") % join(f))
1441 ui.warn(_("%s already tracked!\n") % join(f))
1442 elif ds[f] == 'r':
1442 elif ds[f] == 'r':
1443 ds.normallookup(f)
1443 ds.normallookup(f)
1444 else:
1444 else:
1445 ds.add(f)
1445 ds.add(f)
1446 return rejected
1446 return rejected
1447
1447
1448 def forget(self, files, prefix=""):
1448 def forget(self, files, prefix=""):
1449 join = lambda f: os.path.join(prefix, f)
1449 join = lambda f: os.path.join(prefix, f)
1450 with self._repo.wlock():
1450 with self._repo.wlock():
1451 rejected = []
1451 rejected = []
1452 for f in files:
1452 for f in files:
1453 if f not in self._repo.dirstate:
1453 if f not in self._repo.dirstate:
1454 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1454 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1455 rejected.append(f)
1455 rejected.append(f)
1456 elif self._repo.dirstate[f] != 'a':
1456 elif self._repo.dirstate[f] != 'a':
1457 self._repo.dirstate.remove(f)
1457 self._repo.dirstate.remove(f)
1458 else:
1458 else:
1459 self._repo.dirstate.drop(f)
1459 self._repo.dirstate.drop(f)
1460 return rejected
1460 return rejected
1461
1461
1462 def undelete(self, list):
1462 def undelete(self, list):
1463 pctxs = self.parents()
1463 pctxs = self.parents()
1464 with self._repo.wlock():
1464 with self._repo.wlock():
1465 for f in list:
1465 for f in list:
1466 if self._repo.dirstate[f] != 'r':
1466 if self._repo.dirstate[f] != 'r':
1467 self._repo.ui.warn(_("%s not removed!\n") % f)
1467 self._repo.ui.warn(_("%s not removed!\n") % f)
1468 else:
1468 else:
1469 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1469 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1470 t = fctx.data()
1470 t = fctx.data()
1471 self._repo.wwrite(f, t, fctx.flags())
1471 self._repo.wwrite(f, t, fctx.flags())
1472 self._repo.dirstate.normal(f)
1472 self._repo.dirstate.normal(f)
1473
1473
1474 def copy(self, source, dest):
1474 def copy(self, source, dest):
1475 try:
1475 try:
1476 st = self._repo.wvfs.lstat(dest)
1476 st = self._repo.wvfs.lstat(dest)
1477 except OSError as err:
1477 except OSError as err:
1478 if err.errno != errno.ENOENT:
1478 if err.errno != errno.ENOENT:
1479 raise
1479 raise
1480 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1480 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1481 return
1481 return
1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1482 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1483 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1483 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1484 "symbolic link\n") % dest)
1484 "symbolic link\n") % dest)
1485 else:
1485 else:
1486 with self._repo.wlock():
1486 with self._repo.wlock():
1487 if self._repo.dirstate[dest] in '?':
1487 if self._repo.dirstate[dest] in '?':
1488 self._repo.dirstate.add(dest)
1488 self._repo.dirstate.add(dest)
1489 elif self._repo.dirstate[dest] in 'r':
1489 elif self._repo.dirstate[dest] in 'r':
1490 self._repo.dirstate.normallookup(dest)
1490 self._repo.dirstate.normallookup(dest)
1491 self._repo.dirstate.copy(source, dest)
1491 self._repo.dirstate.copy(source, dest)
1492
1492
1493 def match(self, pats=[], include=None, exclude=None, default='glob',
1493 def match(self, pats=[], include=None, exclude=None, default='glob',
1494 listsubrepos=False, badfn=None):
1494 listsubrepos=False, badfn=None):
1495 r = self._repo
1495 r = self._repo
1496
1496
1497 # Only a case insensitive filesystem needs magic to translate user input
1497 # Only a case insensitive filesystem needs magic to translate user input
1498 # to actual case in the filesystem.
1498 # to actual case in the filesystem.
1499 if not util.checkcase(r.root):
1499 if not util.checkcase(r.root):
1500 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1500 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1501 exclude, default, r.auditor, self,
1501 exclude, default, r.auditor, self,
1502 listsubrepos=listsubrepos,
1502 listsubrepos=listsubrepos,
1503 badfn=badfn)
1503 badfn=badfn)
1504 return matchmod.match(r.root, r.getcwd(), pats,
1504 return matchmod.match(r.root, r.getcwd(), pats,
1505 include, exclude, default,
1505 include, exclude, default,
1506 auditor=r.auditor, ctx=self,
1506 auditor=r.auditor, ctx=self,
1507 listsubrepos=listsubrepos, badfn=badfn)
1507 listsubrepos=listsubrepos, badfn=badfn)
1508
1508
1509 def _filtersuspectsymlink(self, files):
1509 def _filtersuspectsymlink(self, files):
1510 if not files or self._repo.dirstate._checklink:
1510 if not files or self._repo.dirstate._checklink:
1511 return files
1511 return files
1512
1512
1513 # Symlink placeholders may get non-symlink-like contents
1513 # Symlink placeholders may get non-symlink-like contents
1514 # via user error or dereferencing by NFS or Samba servers,
1514 # via user error or dereferencing by NFS or Samba servers,
1515 # so we filter out any placeholders that don't look like a
1515 # so we filter out any placeholders that don't look like a
1516 # symlink
1516 # symlink
1517 sane = []
1517 sane = []
1518 for f in files:
1518 for f in files:
1519 if self.flags(f) == 'l':
1519 if self.flags(f) == 'l':
1520 d = self[f].data()
1520 d = self[f].data()
1521 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1521 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1522 self._repo.ui.debug('ignoring suspect symlink placeholder'
1522 self._repo.ui.debug('ignoring suspect symlink placeholder'
1523 ' "%s"\n' % f)
1523 ' "%s"\n' % f)
1524 continue
1524 continue
1525 sane.append(f)
1525 sane.append(f)
1526 return sane
1526 return sane
1527
1527
1528 def _checklookup(self, files):
1528 def _checklookup(self, files):
1529 # check for any possibly clean files
1529 # check for any possibly clean files
1530 if not files:
1530 if not files:
1531 return [], []
1531 return [], []
1532
1532
1533 modified = []
1533 modified = []
1534 fixup = []
1534 fixup = []
1535 pctx = self._parents[0]
1535 pctx = self._parents[0]
1536 # do a full compare of any files that might have changed
1536 # do a full compare of any files that might have changed
1537 for f in sorted(files):
1537 for f in sorted(files):
1538 if (f not in pctx or self.flags(f) != pctx.flags(f)
1538 if (f not in pctx or self.flags(f) != pctx.flags(f)
1539 or pctx[f].cmp(self[f])):
1539 or pctx[f].cmp(self[f])):
1540 modified.append(f)
1540 modified.append(f)
1541 else:
1541 else:
1542 fixup.append(f)
1542 fixup.append(f)
1543
1543
1544 # update dirstate for files that are actually clean
1544 # update dirstate for files that are actually clean
1545 if fixup:
1545 if fixup:
1546 try:
1546 try:
1547 # updating the dirstate is optional
1547 # updating the dirstate is optional
1548 # so we don't wait on the lock
1548 # so we don't wait on the lock
1549 # wlock can invalidate the dirstate, so cache normal _after_
1549 # wlock can invalidate the dirstate, so cache normal _after_
1550 # taking the lock
1550 # taking the lock
1551 with self._repo.wlock(False):
1551 with self._repo.wlock(False):
1552 normal = self._repo.dirstate.normal
1552 normal = self._repo.dirstate.normal
1553 for f in fixup:
1553 for f in fixup:
1554 normal(f)
1554 normal(f)
1555 # write changes out explicitly, because nesting
1555 # write changes out explicitly, because nesting
1556 # wlock at runtime may prevent 'wlock.release()'
1556 # wlock at runtime may prevent 'wlock.release()'
1557 # after this block from doing so for subsequent
1557 # after this block from doing so for subsequent
1558 # changing files
1558 # changing files
1559 self._repo.dirstate.write(self._repo.currenttransaction())
1559 self._repo.dirstate.write(self._repo.currenttransaction())
1560 except error.LockError:
1560 except error.LockError:
1561 pass
1561 pass
1562 return modified, fixup
1562 return modified, fixup
1563
1563
1564 def _manifestmatches(self, match, s):
1564 def _manifestmatches(self, match, s):
1565 """Slow path for workingctx
1565 """Slow path for workingctx
1566
1566
1567 The fast path is when we compare the working directory to its parent
1567 The fast path is when we compare the working directory to its parent
1568 which means this function is comparing with a non-parent; therefore we
1568 which means this function is comparing with a non-parent; therefore we
1569 need to build a manifest and return what matches.
1569 need to build a manifest and return what matches.
1570 """
1570 """
1571 mf = self._repo['.']._manifestmatches(match, s)
1571 mf = self._repo['.']._manifestmatches(match, s)
1572 for f in s.modified + s.added:
1572 for f in s.modified + s.added:
1573 mf[f] = _newnode
1573 mf[f] = _newnode
1574 mf.setflag(f, self.flags(f))
1574 mf.setflag(f, self.flags(f))
1575 for f in s.removed:
1575 for f in s.removed:
1576 if f in mf:
1576 if f in mf:
1577 del mf[f]
1577 del mf[f]
1578 return mf
1578 return mf
1579
1579
1580 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1580 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1581 unknown=False):
1581 unknown=False):
1582 '''Gets the status from the dirstate -- internal use only.'''
1582 '''Gets the status from the dirstate -- internal use only.'''
1583 listignored, listclean, listunknown = ignored, clean, unknown
1583 listignored, listclean, listunknown = ignored, clean, unknown
1584 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1584 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1585 subrepos = []
1585 subrepos = []
1586 if '.hgsub' in self:
1586 if '.hgsub' in self:
1587 subrepos = sorted(self.substate)
1587 subrepos = sorted(self.substate)
1588 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1588 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1589 listclean, listunknown)
1589 listclean, listunknown)
1590
1590
1591 # check for any possibly clean files
1591 # check for any possibly clean files
1592 if cmp:
1592 if cmp:
1593 modified2, fixup = self._checklookup(cmp)
1593 modified2, fixup = self._checklookup(cmp)
1594 s.modified.extend(modified2)
1594 s.modified.extend(modified2)
1595
1595
1596 # update dirstate for files that are actually clean
1596 # update dirstate for files that are actually clean
1597 if fixup and listclean:
1597 if fixup and listclean:
1598 s.clean.extend(fixup)
1598 s.clean.extend(fixup)
1599
1599
1600 if match.always():
1600 if match.always():
1601 # cache for performance
1601 # cache for performance
1602 if s.unknown or s.ignored or s.clean:
1602 if s.unknown or s.ignored or s.clean:
1603 # "_status" is cached with list*=False in the normal route
1603 # "_status" is cached with list*=False in the normal route
1604 self._status = scmutil.status(s.modified, s.added, s.removed,
1604 self._status = scmutil.status(s.modified, s.added, s.removed,
1605 s.deleted, [], [], [])
1605 s.deleted, [], [], [])
1606 else:
1606 else:
1607 self._status = s
1607 self._status = s
1608
1608
1609 return s
1609 return s
1610
1610
1611 def _buildstatus(self, other, s, match, listignored, listclean,
1611 def _buildstatus(self, other, s, match, listignored, listclean,
1612 listunknown):
1612 listunknown):
1613 """build a status with respect to another context
1613 """build a status with respect to another context
1614
1614
1615 This includes logic for maintaining the fast path of status when
1615 This includes logic for maintaining the fast path of status when
1616 comparing the working directory against its parent, which is to skip
1616 comparing the working directory against its parent, which is to skip
1617 building a new manifest if self (working directory) is not comparing
1617 building a new manifest if self (working directory) is not comparing
1618 against its parent (repo['.']).
1618 against its parent (repo['.']).
1619 """
1619 """
1620 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1620 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1621 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1621 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1622 # might have accidentally ended up with the entire contents of the file
1622 # might have accidentally ended up with the entire contents of the file
1623 # they are supposed to be linking to.
1623 # they are supposed to be linking to.
1624 s.modified[:] = self._filtersuspectsymlink(s.modified)
1624 s.modified[:] = self._filtersuspectsymlink(s.modified)
1625 if other != self._repo['.']:
1625 if other != self._repo['.']:
1626 s = super(workingctx, self)._buildstatus(other, s, match,
1626 s = super(workingctx, self)._buildstatus(other, s, match,
1627 listignored, listclean,
1627 listignored, listclean,
1628 listunknown)
1628 listunknown)
1629 return s
1629 return s
1630
1630
1631 def _matchstatus(self, other, match):
1631 def _matchstatus(self, other, match):
1632 """override the match method with a filter for directory patterns
1632 """override the match method with a filter for directory patterns
1633
1633
1634 We use inheritance to customize the match.bad method only in cases of
1634 We use inheritance to customize the match.bad method only in cases of
1635 workingctx since it belongs only to the working directory when
1635 workingctx since it belongs only to the working directory when
1636 comparing against the parent changeset.
1636 comparing against the parent changeset.
1637
1637
1638 If we aren't comparing against the working directory's parent, then we
1638 If we aren't comparing against the working directory's parent, then we
1639 just use the default match object sent to us.
1639 just use the default match object sent to us.
1640 """
1640 """
1641 superself = super(workingctx, self)
1641 superself = super(workingctx, self)
1642 match = superself._matchstatus(other, match)
1642 match = superself._matchstatus(other, match)
1643 if other != self._repo['.']:
1643 if other != self._repo['.']:
1644 def bad(f, msg):
1644 def bad(f, msg):
1645 # 'f' may be a directory pattern from 'match.files()',
1645 # 'f' may be a directory pattern from 'match.files()',
1646 # so 'f not in ctx1' is not enough
1646 # so 'f not in ctx1' is not enough
1647 if f not in other and not other.hasdir(f):
1647 if f not in other and not other.hasdir(f):
1648 self._repo.ui.warn('%s: %s\n' %
1648 self._repo.ui.warn('%s: %s\n' %
1649 (self._repo.dirstate.pathto(f), msg))
1649 (self._repo.dirstate.pathto(f), msg))
1650 match.bad = bad
1650 match.bad = bad
1651 return match
1651 return match
1652
1652
1653 class committablefilectx(basefilectx):
1653 class committablefilectx(basefilectx):
1654 """A committablefilectx provides common functionality for a file context
1654 """A committablefilectx provides common functionality for a file context
1655 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1655 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1656 def __init__(self, repo, path, filelog=None, ctx=None):
1656 def __init__(self, repo, path, filelog=None, ctx=None):
1657 self._repo = repo
1657 self._repo = repo
1658 self._path = path
1658 self._path = path
1659 self._changeid = None
1659 self._changeid = None
1660 self._filerev = self._filenode = None
1660 self._filerev = self._filenode = None
1661
1661
1662 if filelog is not None:
1662 if filelog is not None:
1663 self._filelog = filelog
1663 self._filelog = filelog
1664 if ctx:
1664 if ctx:
1665 self._changectx = ctx
1665 self._changectx = ctx
1666
1666
1667 def __nonzero__(self):
1667 def __nonzero__(self):
1668 return True
1668 return True
1669
1669
1670 def linkrev(self):
1670 def linkrev(self):
1671 # linked to self._changectx no matter if file is modified or not
1671 # linked to self._changectx no matter if file is modified or not
1672 return self.rev()
1672 return self.rev()
1673
1673
1674 def parents(self):
1674 def parents(self):
1675 '''return parent filectxs, following copies if necessary'''
1675 '''return parent filectxs, following copies if necessary'''
1676 def filenode(ctx, path):
1676 def filenode(ctx, path):
1677 return ctx._manifest.get(path, nullid)
1677 return ctx._manifest.get(path, nullid)
1678
1678
1679 path = self._path
1679 path = self._path
1680 fl = self._filelog
1680 fl = self._filelog
1681 pcl = self._changectx._parents
1681 pcl = self._changectx._parents
1682 renamed = self.renamed()
1682 renamed = self.renamed()
1683
1683
1684 if renamed:
1684 if renamed:
1685 pl = [renamed + (None,)]
1685 pl = [renamed + (None,)]
1686 else:
1686 else:
1687 pl = [(path, filenode(pcl[0], path), fl)]
1687 pl = [(path, filenode(pcl[0], path), fl)]
1688
1688
1689 for pc in pcl[1:]:
1689 for pc in pcl[1:]:
1690 pl.append((path, filenode(pc, path), fl))
1690 pl.append((path, filenode(pc, path), fl))
1691
1691
1692 return [self._parentfilectx(p, fileid=n, filelog=l)
1692 return [self._parentfilectx(p, fileid=n, filelog=l)
1693 for p, n, l in pl if n != nullid]
1693 for p, n, l in pl if n != nullid]
1694
1694
1695 def children(self):
1695 def children(self):
1696 return []
1696 return []
1697
1697
1698 class workingfilectx(committablefilectx):
1698 class workingfilectx(committablefilectx):
1699 """A workingfilectx object makes access to data related to a particular
1699 """A workingfilectx object makes access to data related to a particular
1700 file in the working directory convenient."""
1700 file in the working directory convenient."""
1701 def __init__(self, repo, path, filelog=None, workingctx=None):
1701 def __init__(self, repo, path, filelog=None, workingctx=None):
1702 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1702 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1703
1703
1704 @propertycache
1704 @propertycache
1705 def _changectx(self):
1705 def _changectx(self):
1706 return workingctx(self._repo)
1706 return workingctx(self._repo)
1707
1707
1708 def data(self):
1708 def data(self):
1709 return self._repo.wread(self._path)
1709 return self._repo.wread(self._path)
1710 def renamed(self):
1710 def renamed(self):
1711 rp = self._repo.dirstate.copied(self._path)
1711 rp = self._repo.dirstate.copied(self._path)
1712 if not rp:
1712 if not rp:
1713 return None
1713 return None
1714 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1714 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1715
1715
1716 def size(self):
1716 def size(self):
1717 return self._repo.wvfs.lstat(self._path).st_size
1717 return self._repo.wvfs.lstat(self._path).st_size
1718 def date(self):
1718 def date(self):
1719 t, tz = self._changectx.date()
1719 t, tz = self._changectx.date()
1720 try:
1720 try:
1721 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1721 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1722 except OSError as err:
1722 except OSError as err:
1723 if err.errno != errno.ENOENT:
1723 if err.errno != errno.ENOENT:
1724 raise
1724 raise
1725 return (t, tz)
1725 return (t, tz)
1726
1726
1727 def cmp(self, fctx):
1727 def cmp(self, fctx):
1728 """compare with other file context
1728 """compare with other file context
1729
1729
1730 returns True if different than fctx.
1730 returns True if different than fctx.
1731 """
1731 """
1732 # fctx should be a filectx (not a workingfilectx)
1732 # fctx should be a filectx (not a workingfilectx)
1733 # invert comparison to reuse the same code path
1733 # invert comparison to reuse the same code path
1734 return fctx.cmp(self)
1734 return fctx.cmp(self)
1735
1735
1736 def remove(self, ignoremissing=False):
1736 def remove(self, ignoremissing=False):
1737 """wraps unlink for a repo's working directory"""
1737 """wraps unlink for a repo's working directory"""
1738 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1738 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1739
1739
1740 def write(self, data, flags):
1740 def write(self, data, flags):
1741 """wraps repo.wwrite"""
1741 """wraps repo.wwrite"""
1742 self._repo.wwrite(self._path, data, flags)
1742 self._repo.wwrite(self._path, data, flags)
1743
1743
1744 class workingcommitctx(workingctx):
1744 class workingcommitctx(workingctx):
1745 """A workingcommitctx object makes access to data related to
1745 """A workingcommitctx object makes access to data related to
1746 the revision being committed convenient.
1746 the revision being committed convenient.
1747
1747
1748 This hides changes in the working directory, if they aren't
1748 This hides changes in the working directory, if they aren't
1749 committed in this context.
1749 committed in this context.
1750 """
1750 """
1751 def __init__(self, repo, changes,
1751 def __init__(self, repo, changes,
1752 text="", user=None, date=None, extra=None):
1752 text="", user=None, date=None, extra=None):
1753 super(workingctx, self).__init__(repo, text, user, date, extra,
1753 super(workingctx, self).__init__(repo, text, user, date, extra,
1754 changes)
1754 changes)
1755
1755
1756 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1756 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1757 unknown=False):
1757 unknown=False):
1758 """Return matched files only in ``self._status``
1758 """Return matched files only in ``self._status``
1759
1759
1760 Uncommitted files appear "clean" via this context, even if
1760 Uncommitted files appear "clean" via this context, even if
1761 they aren't actually so in the working directory.
1761 they aren't actually so in the working directory.
1762 """
1762 """
1763 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1763 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1764 if clean:
1764 if clean:
1765 clean = [f for f in self._manifest if f not in self._changedset]
1765 clean = [f for f in self._manifest if f not in self._changedset]
1766 else:
1766 else:
1767 clean = []
1767 clean = []
1768 return scmutil.status([f for f in self._status.modified if match(f)],
1768 return scmutil.status([f for f in self._status.modified if match(f)],
1769 [f for f in self._status.added if match(f)],
1769 [f for f in self._status.added if match(f)],
1770 [f for f in self._status.removed if match(f)],
1770 [f for f in self._status.removed if match(f)],
1771 [], [], [], clean)
1771 [], [], [], clean)
1772
1772
1773 @propertycache
1773 @propertycache
1774 def _changedset(self):
1774 def _changedset(self):
1775 """Return the set of files changed in this context
1775 """Return the set of files changed in this context
1776 """
1776 """
1777 changed = set(self._status.modified)
1777 changed = set(self._status.modified)
1778 changed.update(self._status.added)
1778 changed.update(self._status.added)
1779 changed.update(self._status.removed)
1779 changed.update(self._status.removed)
1780 return changed
1780 return changed
1781
1781
1782 def makecachingfilectxfn(func):
1782 def makecachingfilectxfn(func):
1783 """Create a filectxfn that caches based on the path.
1783 """Create a filectxfn that caches based on the path.
1784
1784
1785 We can't use util.cachefunc because it uses all arguments as the cache
1785 We can't use util.cachefunc because it uses all arguments as the cache
1786 key and this creates a cycle since the arguments include the repo and
1786 key and this creates a cycle since the arguments include the repo and
1787 memctx.
1787 memctx.
1788 """
1788 """
1789 cache = {}
1789 cache = {}
1790
1790
1791 def getfilectx(repo, memctx, path):
1791 def getfilectx(repo, memctx, path):
1792 if path not in cache:
1792 if path not in cache:
1793 cache[path] = func(repo, memctx, path)
1793 cache[path] = func(repo, memctx, path)
1794 return cache[path]
1794 return cache[path]
1795
1795
1796 return getfilectx
1796 return getfilectx
1797
1797
1798 class memctx(committablectx):
1798 class memctx(committablectx):
1799 """Use memctx to perform in-memory commits via localrepo.commitctx().
1799 """Use memctx to perform in-memory commits via localrepo.commitctx().
1800
1800
1801 Revision information is supplied at initialization time while
1801 Revision information is supplied at initialization time while
1802 related files data and is made available through a callback
1802 related files data and is made available through a callback
1803 mechanism. 'repo' is the current localrepo, 'parents' is a
1803 mechanism. 'repo' is the current localrepo, 'parents' is a
1804 sequence of two parent revisions identifiers (pass None for every
1804 sequence of two parent revisions identifiers (pass None for every
1805 missing parent), 'text' is the commit message and 'files' lists
1805 missing parent), 'text' is the commit message and 'files' lists
1806 names of files touched by the revision (normalized and relative to
1806 names of files touched by the revision (normalized and relative to
1807 repository root).
1807 repository root).
1808
1808
1809 filectxfn(repo, memctx, path) is a callable receiving the
1809 filectxfn(repo, memctx, path) is a callable receiving the
1810 repository, the current memctx object and the normalized path of
1810 repository, the current memctx object and the normalized path of
1811 requested file, relative to repository root. It is fired by the
1811 requested file, relative to repository root. It is fired by the
1812 commit function for every file in 'files', but calls order is
1812 commit function for every file in 'files', but calls order is
1813 undefined. If the file is available in the revision being
1813 undefined. If the file is available in the revision being
1814 committed (updated or added), filectxfn returns a memfilectx
1814 committed (updated or added), filectxfn returns a memfilectx
1815 object. If the file was removed, filectxfn raises an
1815 object. If the file was removed, filectxfn raises an
1816 IOError. Moved files are represented by marking the source file
1816 IOError. Moved files are represented by marking the source file
1817 removed and the new file added with copy information (see
1817 removed and the new file added with copy information (see
1818 memfilectx).
1818 memfilectx).
1819
1819
1820 user receives the committer name and defaults to current
1820 user receives the committer name and defaults to current
1821 repository username, date is the commit date in any format
1821 repository username, date is the commit date in any format
1822 supported by util.parsedate() and defaults to current date, extra
1822 supported by util.parsedate() and defaults to current date, extra
1823 is a dictionary of metadata or is left empty.
1823 is a dictionary of metadata or is left empty.
1824 """
1824 """
1825
1825
1826 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1826 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1827 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1827 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1828 # this field to determine what to do in filectxfn.
1828 # this field to determine what to do in filectxfn.
1829 _returnnoneformissingfiles = True
1829 _returnnoneformissingfiles = True
1830
1830
1831 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1831 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1832 date=None, extra=None, editor=False):
1832 date=None, extra=None, editor=False):
1833 super(memctx, self).__init__(repo, text, user, date, extra)
1833 super(memctx, self).__init__(repo, text, user, date, extra)
1834 self._rev = None
1834 self._rev = None
1835 self._node = None
1835 self._node = None
1836 parents = [(p or nullid) for p in parents]
1836 parents = [(p or nullid) for p in parents]
1837 p1, p2 = parents
1837 p1, p2 = parents
1838 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1838 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1839 files = sorted(set(files))
1839 files = sorted(set(files))
1840 self._files = files
1840 self._files = files
1841 self.substate = {}
1841 self.substate = {}
1842
1842
1843 # if store is not callable, wrap it in a function
1843 # if store is not callable, wrap it in a function
1844 if not callable(filectxfn):
1844 if not callable(filectxfn):
1845 def getfilectx(repo, memctx, path):
1845 def getfilectx(repo, memctx, path):
1846 fctx = filectxfn[path]
1846 fctx = filectxfn[path]
1847 # this is weird but apparently we only keep track of one parent
1847 # this is weird but apparently we only keep track of one parent
1848 # (why not only store that instead of a tuple?)
1848 # (why not only store that instead of a tuple?)
1849 copied = fctx.renamed()
1849 copied = fctx.renamed()
1850 if copied:
1850 if copied:
1851 copied = copied[0]
1851 copied = copied[0]
1852 return memfilectx(repo, path, fctx.data(),
1852 return memfilectx(repo, path, fctx.data(),
1853 islink=fctx.islink(), isexec=fctx.isexec(),
1853 islink=fctx.islink(), isexec=fctx.isexec(),
1854 copied=copied, memctx=memctx)
1854 copied=copied, memctx=memctx)
1855 self._filectxfn = getfilectx
1855 self._filectxfn = getfilectx
1856 else:
1856 else:
1857 # memoizing increases performance for e.g. vcs convert scenarios.
1857 # memoizing increases performance for e.g. vcs convert scenarios.
1858 self._filectxfn = makecachingfilectxfn(filectxfn)
1858 self._filectxfn = makecachingfilectxfn(filectxfn)
1859
1859
1860 if extra:
1860 if extra:
1861 self._extra = extra.copy()
1861 self._extra = extra.copy()
1862 else:
1862 else:
1863 self._extra = {}
1863 self._extra = {}
1864
1864
1865 if self._extra.get('branch', '') == '':
1865 if self._extra.get('branch', '') == '':
1866 self._extra['branch'] = 'default'
1866 self._extra['branch'] = 'default'
1867
1867
1868 if editor:
1868 if editor:
1869 self._text = editor(self._repo, self, [])
1869 self._text = editor(self._repo, self, [])
1870 self._repo.savecommitmessage(self._text)
1870 self._repo.savecommitmessage(self._text)
1871
1871
1872 def filectx(self, path, filelog=None):
1872 def filectx(self, path, filelog=None):
1873 """get a file context from the working directory
1873 """get a file context from the working directory
1874
1874
1875 Returns None if file doesn't exist and should be removed."""
1875 Returns None if file doesn't exist and should be removed."""
1876 return self._filectxfn(self._repo, self, path)
1876 return self._filectxfn(self._repo, self, path)
1877
1877
1878 def commit(self):
1878 def commit(self):
1879 """commit context to the repo"""
1879 """commit context to the repo"""
1880 return self._repo.commitctx(self)
1880 return self._repo.commitctx(self)
1881
1881
1882 @propertycache
1882 @propertycache
1883 def _manifest(self):
1883 def _manifest(self):
1884 """generate a manifest based on the return values of filectxfn"""
1884 """generate a manifest based on the return values of filectxfn"""
1885
1885
1886 # keep this simple for now; just worry about p1
1886 # keep this simple for now; just worry about p1
1887 pctx = self._parents[0]
1887 pctx = self._parents[0]
1888 man = pctx.manifest().copy()
1888 man = pctx.manifest().copy()
1889
1889
1890 for f in self._status.modified:
1890 for f in self._status.modified:
1891 p1node = nullid
1891 p1node = nullid
1892 p2node = nullid
1892 p2node = nullid
1893 p = pctx[f].parents() # if file isn't in pctx, check p2?
1893 p = pctx[f].parents() # if file isn't in pctx, check p2?
1894 if len(p) > 0:
1894 if len(p) > 0:
1895 p1node = p[0].filenode()
1895 p1node = p[0].filenode()
1896 if len(p) > 1:
1896 if len(p) > 1:
1897 p2node = p[1].filenode()
1897 p2node = p[1].filenode()
1898 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1898 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1899
1899
1900 for f in self._status.added:
1900 for f in self._status.added:
1901 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1901 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1902
1902
1903 for f in self._status.removed:
1903 for f in self._status.removed:
1904 if f in man:
1904 if f in man:
1905 del man[f]
1905 del man[f]
1906
1906
1907 return man
1907 return man
1908
1908
1909 @propertycache
1909 @propertycache
1910 def _status(self):
1910 def _status(self):
1911 """Calculate exact status from ``files`` specified at construction
1911 """Calculate exact status from ``files`` specified at construction
1912 """
1912 """
1913 man1 = self.p1().manifest()
1913 man1 = self.p1().manifest()
1914 p2 = self._parents[1]
1914 p2 = self._parents[1]
1915 # "1 < len(self._parents)" can't be used for checking
1915 # "1 < len(self._parents)" can't be used for checking
1916 # existence of the 2nd parent, because "memctx._parents" is
1916 # existence of the 2nd parent, because "memctx._parents" is
1917 # explicitly initialized by the list, of which length is 2.
1917 # explicitly initialized by the list, of which length is 2.
1918 if p2.node() != nullid:
1918 if p2.node() != nullid:
1919 man2 = p2.manifest()
1919 man2 = p2.manifest()
1920 managing = lambda f: f in man1 or f in man2
1920 managing = lambda f: f in man1 or f in man2
1921 else:
1921 else:
1922 managing = lambda f: f in man1
1922 managing = lambda f: f in man1
1923
1923
1924 modified, added, removed = [], [], []
1924 modified, added, removed = [], [], []
1925 for f in self._files:
1925 for f in self._files:
1926 if not managing(f):
1926 if not managing(f):
1927 added.append(f)
1927 added.append(f)
1928 elif self[f]:
1928 elif self[f]:
1929 modified.append(f)
1929 modified.append(f)
1930 else:
1930 else:
1931 removed.append(f)
1931 removed.append(f)
1932
1932
1933 return scmutil.status(modified, added, removed, [], [], [], [])
1933 return scmutil.status(modified, added, removed, [], [], [], [])
1934
1934
1935 class memfilectx(committablefilectx):
1935 class memfilectx(committablefilectx):
1936 """memfilectx represents an in-memory file to commit.
1936 """memfilectx represents an in-memory file to commit.
1937
1937
1938 See memctx and committablefilectx for more details.
1938 See memctx and committablefilectx for more details.
1939 """
1939 """
1940 def __init__(self, repo, path, data, islink=False,
1940 def __init__(self, repo, path, data, islink=False,
1941 isexec=False, copied=None, memctx=None):
1941 isexec=False, copied=None, memctx=None):
1942 """
1942 """
1943 path is the normalized file path relative to repository root.
1943 path is the normalized file path relative to repository root.
1944 data is the file content as a string.
1944 data is the file content as a string.
1945 islink is True if the file is a symbolic link.
1945 islink is True if the file is a symbolic link.
1946 isexec is True if the file is executable.
1946 isexec is True if the file is executable.
1947 copied is the source file path if current file was copied in the
1947 copied is the source file path if current file was copied in the
1948 revision being committed, or None."""
1948 revision being committed, or None."""
1949 super(memfilectx, self).__init__(repo, path, None, memctx)
1949 super(memfilectx, self).__init__(repo, path, None, memctx)
1950 self._data = data
1950 self._data = data
1951 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1951 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1952 self._copied = None
1952 self._copied = None
1953 if copied:
1953 if copied:
1954 self._copied = (copied, nullid)
1954 self._copied = (copied, nullid)
1955
1955
1956 def data(self):
1956 def data(self):
1957 return self._data
1957 return self._data
1958 def size(self):
1958 def size(self):
1959 return len(self.data())
1959 return len(self.data())
1960 def flags(self):
1960 def flags(self):
1961 return self._flags
1961 return self._flags
1962 def renamed(self):
1962 def renamed(self):
1963 return self._copied
1963 return self._copied
1964
1964
1965 def remove(self, ignoremissing=False):
1965 def remove(self, ignoremissing=False):
1966 """wraps unlink for a repo's working directory"""
1966 """wraps unlink for a repo's working directory"""
1967 # need to figure out what to do here
1967 # need to figure out what to do here
1968 del self._changectx[self._path]
1968 del self._changectx[self._path]
1969
1969
1970 def write(self, data, flags):
1970 def write(self, data, flags):
1971 """wraps repo.wwrite"""
1971 """wraps repo.wwrite"""
1972 self._data = data
1972 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now