##// END OF EJS Templates
context: safegaurd against 'lx' being passed as file flag in manifest...
Pulkit Goyal -
r38784:d558e53c stable
parent child Browse files
Show More
@@ -1,2552 +1,2557 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 sparse,
41 sparse,
42 subrepo,
42 subrepo,
43 subrepoutil,
43 subrepoutil,
44 util,
44 util,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 dateutil,
47 dateutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
70 return r"<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(self, other, s, match, listignored, listclean,
102 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
103 listunknown):
104 """build a status with respect to another context"""
104 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
105 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
110 # delta application.
111 mf2 = None
111 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
112 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
113 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
115 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117
117
118 modified, added = [], []
118 modified, added = [], []
119 removed = []
119 removed = []
120 clean = []
120 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
122 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
123 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
124 for fn, value in d.iteritems():
125 if fn in deletedset:
125 if fn in deletedset:
126 continue
126 continue
127 if value is None:
127 if value is None:
128 clean.append(fn)
128 clean.append(fn)
129 continue
129 continue
130 (node1, flag1), (node2, flag2) = value
130 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
131 if node1 is None:
132 added.append(fn)
132 added.append(fn)
133 elif node2 is None:
133 elif node2 is None:
134 removed.append(fn)
134 removed.append(fn)
135 elif flag1 != flag2:
135 elif flag1 != flag2:
136 modified.append(fn)
136 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
137 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
138 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
139 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
140 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
141 # to a file as a modification.
142 modified.append(fn)
142 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
143 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
144 modified.append(fn)
145 else:
145 else:
146 clean.append(fn)
146 clean.append(fn)
147
147
148 if removed:
148 if removed:
149 # need to filter files if they are already reported as removed
149 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
150 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
152 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
153 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
154 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
155 removed = [fn for fn in removed if fn not in deletedset]
156
156
157 return scmutil.status(modified, added, removed, deleted, unknown,
157 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
158 ignored, clean)
159
159
160 @propertycache
160 @propertycache
161 def substate(self):
161 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
162 return subrepoutil.state(self, self._repo.ui)
163
163
164 def subrev(self, subpath):
164 def subrev(self, subpath):
165 return self.substate[subpath][1]
165 return self.substate[subpath][1]
166
166
167 def rev(self):
167 def rev(self):
168 return self._rev
168 return self._rev
169 def node(self):
169 def node(self):
170 return self._node
170 return self._node
171 def hex(self):
171 def hex(self):
172 return hex(self.node())
172 return hex(self.node())
173 def manifest(self):
173 def manifest(self):
174 return self._manifest
174 return self._manifest
175 def manifestctx(self):
175 def manifestctx(self):
176 return self._manifestctx
176 return self._manifestctx
177 def repo(self):
177 def repo(self):
178 return self._repo
178 return self._repo
179 def phasestr(self):
179 def phasestr(self):
180 return phases.phasenames[self.phase()]
180 return phases.phasenames[self.phase()]
181 def mutable(self):
181 def mutable(self):
182 return self.phase() > phases.public
182 return self.phase() > phases.public
183
183
184 def matchfileset(self, expr, badfn=None):
184 def matchfileset(self, expr, badfn=None):
185 return fileset.match(self, expr, badfn=badfn)
185 return fileset.match(self, expr, badfn=badfn)
186
186
187 def obsolete(self):
187 def obsolete(self):
188 """True if the changeset is obsolete"""
188 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
190
191 def extinct(self):
191 def extinct(self):
192 """True if the changeset is extinct"""
192 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
194
195 def orphan(self):
195 def orphan(self):
196 """True if the changeset is not obsolete but it's ancestor are"""
196 """True if the changeset is not obsolete but it's ancestor are"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
198
199 def phasedivergent(self):
199 def phasedivergent(self):
200 """True if the changeset try to be a successor of a public changeset
200 """True if the changeset try to be a successor of a public changeset
201
201
202 Only non-public and non-obsolete changesets may be bumped.
202 Only non-public and non-obsolete changesets may be bumped.
203 """
203 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
205
206 def contentdivergent(self):
206 def contentdivergent(self):
207 """Is a successors of a changeset with multiple possible successors set
207 """Is a successors of a changeset with multiple possible successors set
208
208
209 Only non-public and non-obsolete changesets may be divergent.
209 Only non-public and non-obsolete changesets may be divergent.
210 """
210 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
212
213 def isunstable(self):
213 def isunstable(self):
214 """True if the changeset is either unstable, bumped or divergent"""
214 """True if the changeset is either unstable, bumped or divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380 class changectx(basectx):
380 class changectx(basectx):
381 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
382 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
383 the repo."""
383 the repo."""
384 def __init__(self, repo, changeid='.'):
384 def __init__(self, repo, changeid='.'):
385 """changeid is a revision number, node, or tag"""
385 """changeid is a revision number, node, or tag"""
386 super(changectx, self).__init__(repo)
386 super(changectx, self).__init__(repo)
387
387
388 try:
388 try:
389 if isinstance(changeid, int):
389 if isinstance(changeid, int):
390 self._node = repo.changelog.node(changeid)
390 self._node = repo.changelog.node(changeid)
391 self._rev = changeid
391 self._rev = changeid
392 return
392 return
393 elif changeid == 'null':
393 elif changeid == 'null':
394 self._node = nullid
394 self._node = nullid
395 self._rev = nullrev
395 self._rev = nullrev
396 return
396 return
397 elif changeid == 'tip':
397 elif changeid == 'tip':
398 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
399 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
400 return
400 return
401 elif (changeid == '.'
401 elif (changeid == '.'
402 or repo.local() and changeid == repo.dirstate.p1()):
402 or repo.local() and changeid == repo.dirstate.p1()):
403 # this is a hack to delay/avoid loading obsmarkers
403 # this is a hack to delay/avoid loading obsmarkers
404 # when we know that '.' won't be hidden
404 # when we know that '.' won't be hidden
405 self._node = repo.dirstate.p1()
405 self._node = repo.dirstate.p1()
406 self._rev = repo.unfiltered().changelog.rev(self._node)
406 self._rev = repo.unfiltered().changelog.rev(self._node)
407 return
407 return
408 elif len(changeid) == 20:
408 elif len(changeid) == 20:
409 try:
409 try:
410 self._node = changeid
410 self._node = changeid
411 self._rev = repo.changelog.rev(changeid)
411 self._rev = repo.changelog.rev(changeid)
412 return
412 return
413 except error.FilteredLookupError:
413 except error.FilteredLookupError:
414 changeid = hex(changeid) # for the error message
414 changeid = hex(changeid) # for the error message
415 raise
415 raise
416 except LookupError:
416 except LookupError:
417 # check if it might have come from damaged dirstate
417 # check if it might have come from damaged dirstate
418 #
418 #
419 # XXX we could avoid the unfiltered if we had a recognizable
419 # XXX we could avoid the unfiltered if we had a recognizable
420 # exception for filtered changeset access
420 # exception for filtered changeset access
421 if (repo.local()
421 if (repo.local()
422 and changeid in repo.unfiltered().dirstate.parents()):
422 and changeid in repo.unfiltered().dirstate.parents()):
423 msg = _("working directory has unknown parent '%s'!")
423 msg = _("working directory has unknown parent '%s'!")
424 raise error.Abort(msg % short(changeid))
424 raise error.Abort(msg % short(changeid))
425 changeid = hex(changeid) # for the error message
425 changeid = hex(changeid) # for the error message
426
426
427 elif len(changeid) == 40:
427 elif len(changeid) == 40:
428 try:
428 try:
429 self._node = bin(changeid)
429 self._node = bin(changeid)
430 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
431 return
431 return
432 except error.FilteredLookupError:
432 except error.FilteredLookupError:
433 raise
433 raise
434 except (TypeError, LookupError):
434 except (TypeError, LookupError):
435 pass
435 pass
436 else:
436 else:
437 raise error.ProgrammingError(
437 raise error.ProgrammingError(
438 "unsupported changeid '%s' of type %s" %
438 "unsupported changeid '%s' of type %s" %
439 (changeid, type(changeid)))
439 (changeid, type(changeid)))
440
440
441 # lookup failed
441 # lookup failed
442 except (error.FilteredIndexError, error.FilteredLookupError):
442 except (error.FilteredIndexError, error.FilteredLookupError):
443 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
443 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
444 % pycompat.bytestr(changeid))
444 % pycompat.bytestr(changeid))
445 except error.FilteredRepoLookupError:
445 except error.FilteredRepoLookupError:
446 raise
446 raise
447 except IndexError:
447 except IndexError:
448 pass
448 pass
449 raise error.RepoLookupError(
449 raise error.RepoLookupError(
450 _("unknown revision '%s'") % changeid)
450 _("unknown revision '%s'") % changeid)
451
451
452 def __hash__(self):
452 def __hash__(self):
453 try:
453 try:
454 return hash(self._rev)
454 return hash(self._rev)
455 except AttributeError:
455 except AttributeError:
456 return id(self)
456 return id(self)
457
457
458 def __nonzero__(self):
458 def __nonzero__(self):
459 return self._rev != nullrev
459 return self._rev != nullrev
460
460
461 __bool__ = __nonzero__
461 __bool__ = __nonzero__
462
462
463 @propertycache
463 @propertycache
464 def _changeset(self):
464 def _changeset(self):
465 return self._repo.changelog.changelogrevision(self.rev())
465 return self._repo.changelog.changelogrevision(self.rev())
466
466
467 @propertycache
467 @propertycache
468 def _manifest(self):
468 def _manifest(self):
469 return self._manifestctx.read()
469 return self._manifestctx.read()
470
470
471 @property
471 @property
472 def _manifestctx(self):
472 def _manifestctx(self):
473 return self._repo.manifestlog[self._changeset.manifest]
473 return self._repo.manifestlog[self._changeset.manifest]
474
474
475 @propertycache
475 @propertycache
476 def _manifestdelta(self):
476 def _manifestdelta(self):
477 return self._manifestctx.readdelta()
477 return self._manifestctx.readdelta()
478
478
479 @propertycache
479 @propertycache
480 def _parents(self):
480 def _parents(self):
481 repo = self._repo
481 repo = self._repo
482 p1, p2 = repo.changelog.parentrevs(self._rev)
482 p1, p2 = repo.changelog.parentrevs(self._rev)
483 if p2 == nullrev:
483 if p2 == nullrev:
484 return [changectx(repo, p1)]
484 return [changectx(repo, p1)]
485 return [changectx(repo, p1), changectx(repo, p2)]
485 return [changectx(repo, p1), changectx(repo, p2)]
486
486
487 def changeset(self):
487 def changeset(self):
488 c = self._changeset
488 c = self._changeset
489 return (
489 return (
490 c.manifest,
490 c.manifest,
491 c.user,
491 c.user,
492 c.date,
492 c.date,
493 c.files,
493 c.files,
494 c.description,
494 c.description,
495 c.extra,
495 c.extra,
496 )
496 )
497 def manifestnode(self):
497 def manifestnode(self):
498 return self._changeset.manifest
498 return self._changeset.manifest
499
499
500 def user(self):
500 def user(self):
501 return self._changeset.user
501 return self._changeset.user
502 def date(self):
502 def date(self):
503 return self._changeset.date
503 return self._changeset.date
504 def files(self):
504 def files(self):
505 return self._changeset.files
505 return self._changeset.files
506 def description(self):
506 def description(self):
507 return self._changeset.description
507 return self._changeset.description
508 def branch(self):
508 def branch(self):
509 return encoding.tolocal(self._changeset.extra.get("branch"))
509 return encoding.tolocal(self._changeset.extra.get("branch"))
510 def closesbranch(self):
510 def closesbranch(self):
511 return 'close' in self._changeset.extra
511 return 'close' in self._changeset.extra
512 def extra(self):
512 def extra(self):
513 """Return a dict of extra information."""
513 """Return a dict of extra information."""
514 return self._changeset.extra
514 return self._changeset.extra
515 def tags(self):
515 def tags(self):
516 """Return a list of byte tag names"""
516 """Return a list of byte tag names"""
517 return self._repo.nodetags(self._node)
517 return self._repo.nodetags(self._node)
518 def bookmarks(self):
518 def bookmarks(self):
519 """Return a list of byte bookmark names."""
519 """Return a list of byte bookmark names."""
520 return self._repo.nodebookmarks(self._node)
520 return self._repo.nodebookmarks(self._node)
521 def phase(self):
521 def phase(self):
522 return self._repo._phasecache.phase(self._repo, self._rev)
522 return self._repo._phasecache.phase(self._repo, self._rev)
523 def hidden(self):
523 def hidden(self):
524 return self._rev in repoview.filterrevs(self._repo, 'visible')
524 return self._rev in repoview.filterrevs(self._repo, 'visible')
525
525
526 def isinmemory(self):
526 def isinmemory(self):
527 return False
527 return False
528
528
529 def children(self):
529 def children(self):
530 """return list of changectx contexts for each child changeset.
530 """return list of changectx contexts for each child changeset.
531
531
532 This returns only the immediate child changesets. Use descendants() to
532 This returns only the immediate child changesets. Use descendants() to
533 recursively walk children.
533 recursively walk children.
534 """
534 """
535 c = self._repo.changelog.children(self._node)
535 c = self._repo.changelog.children(self._node)
536 return [changectx(self._repo, x) for x in c]
536 return [changectx(self._repo, x) for x in c]
537
537
538 def ancestors(self):
538 def ancestors(self):
539 for a in self._repo.changelog.ancestors([self._rev]):
539 for a in self._repo.changelog.ancestors([self._rev]):
540 yield changectx(self._repo, a)
540 yield changectx(self._repo, a)
541
541
542 def descendants(self):
542 def descendants(self):
543 """Recursively yield all children of the changeset.
543 """Recursively yield all children of the changeset.
544
544
545 For just the immediate children, use children()
545 For just the immediate children, use children()
546 """
546 """
547 for d in self._repo.changelog.descendants([self._rev]):
547 for d in self._repo.changelog.descendants([self._rev]):
548 yield changectx(self._repo, d)
548 yield changectx(self._repo, d)
549
549
550 def filectx(self, path, fileid=None, filelog=None):
550 def filectx(self, path, fileid=None, filelog=None):
551 """get a file context from this changeset"""
551 """get a file context from this changeset"""
552 if fileid is None:
552 if fileid is None:
553 fileid = self.filenode(path)
553 fileid = self.filenode(path)
554 return filectx(self._repo, path, fileid=fileid,
554 return filectx(self._repo, path, fileid=fileid,
555 changectx=self, filelog=filelog)
555 changectx=self, filelog=filelog)
556
556
557 def ancestor(self, c2, warn=False):
557 def ancestor(self, c2, warn=False):
558 """return the "best" ancestor context of self and c2
558 """return the "best" ancestor context of self and c2
559
559
560 If there are multiple candidates, it will show a message and check
560 If there are multiple candidates, it will show a message and check
561 merge.preferancestor configuration before falling back to the
561 merge.preferancestor configuration before falling back to the
562 revlog ancestor."""
562 revlog ancestor."""
563 # deal with workingctxs
563 # deal with workingctxs
564 n2 = c2._node
564 n2 = c2._node
565 if n2 is None:
565 if n2 is None:
566 n2 = c2._parents[0]._node
566 n2 = c2._parents[0]._node
567 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
568 if not cahs:
568 if not cahs:
569 anc = nullid
569 anc = nullid
570 elif len(cahs) == 1:
570 elif len(cahs) == 1:
571 anc = cahs[0]
571 anc = cahs[0]
572 else:
572 else:
573 # experimental config: merge.preferancestor
573 # experimental config: merge.preferancestor
574 for r in self._repo.ui.configlist('merge', 'preferancestor'):
574 for r in self._repo.ui.configlist('merge', 'preferancestor'):
575 try:
575 try:
576 ctx = scmutil.revsymbol(self._repo, r)
576 ctx = scmutil.revsymbol(self._repo, r)
577 except error.RepoLookupError:
577 except error.RepoLookupError:
578 continue
578 continue
579 anc = ctx.node()
579 anc = ctx.node()
580 if anc in cahs:
580 if anc in cahs:
581 break
581 break
582 else:
582 else:
583 anc = self._repo.changelog.ancestor(self._node, n2)
583 anc = self._repo.changelog.ancestor(self._node, n2)
584 if warn:
584 if warn:
585 self._repo.ui.status(
585 self._repo.ui.status(
586 (_("note: using %s as ancestor of %s and %s\n") %
586 (_("note: using %s as ancestor of %s and %s\n") %
587 (short(anc), short(self._node), short(n2))) +
587 (short(anc), short(self._node), short(n2))) +
588 ''.join(_(" alternatively, use --config "
588 ''.join(_(" alternatively, use --config "
589 "merge.preferancestor=%s\n") %
589 "merge.preferancestor=%s\n") %
590 short(n) for n in sorted(cahs) if n != anc))
590 short(n) for n in sorted(cahs) if n != anc))
591 return changectx(self._repo, anc)
591 return changectx(self._repo, anc)
592
592
593 def descendant(self, other):
593 def descendant(self, other):
594 msg = (b'ctx.descendant(other) is deprecated, '
594 msg = (b'ctx.descendant(other) is deprecated, '
595 b'use ctx.isancestorof(other)')
595 b'use ctx.isancestorof(other)')
596 self._repo.ui.deprecwarn(msg, b'4.7')
596 self._repo.ui.deprecwarn(msg, b'4.7')
597 return self.isancestorof(other)
597 return self.isancestorof(other)
598
598
599 def isancestorof(self, other):
599 def isancestorof(self, other):
600 """True if this changeset is an ancestor of other"""
600 """True if this changeset is an ancestor of other"""
601 return self._repo.changelog.isancestorrev(self._rev, other._rev)
601 return self._repo.changelog.isancestorrev(self._rev, other._rev)
602
602
603 def walk(self, match):
603 def walk(self, match):
604 '''Generates matching file names.'''
604 '''Generates matching file names.'''
605
605
606 # Wrap match.bad method to have message with nodeid
606 # Wrap match.bad method to have message with nodeid
607 def bad(fn, msg):
607 def bad(fn, msg):
608 # The manifest doesn't know about subrepos, so don't complain about
608 # The manifest doesn't know about subrepos, so don't complain about
609 # paths into valid subrepos.
609 # paths into valid subrepos.
610 if any(fn == s or fn.startswith(s + '/')
610 if any(fn == s or fn.startswith(s + '/')
611 for s in self.substate):
611 for s in self.substate):
612 return
612 return
613 match.bad(fn, _('no such file in rev %s') % self)
613 match.bad(fn, _('no such file in rev %s') % self)
614
614
615 m = matchmod.badmatch(match, bad)
615 m = matchmod.badmatch(match, bad)
616 return self._manifest.walk(m)
616 return self._manifest.walk(m)
617
617
618 def matches(self, match):
618 def matches(self, match):
619 return self.walk(match)
619 return self.walk(match)
620
620
621 class basefilectx(object):
621 class basefilectx(object):
622 """A filecontext object represents the common logic for its children:
622 """A filecontext object represents the common logic for its children:
623 filectx: read-only access to a filerevision that is already present
623 filectx: read-only access to a filerevision that is already present
624 in the repo,
624 in the repo,
625 workingfilectx: a filecontext that represents files from the working
625 workingfilectx: a filecontext that represents files from the working
626 directory,
626 directory,
627 memfilectx: a filecontext that represents files in-memory,
627 memfilectx: a filecontext that represents files in-memory,
628 overlayfilectx: duplicate another filecontext with some fields overridden.
628 overlayfilectx: duplicate another filecontext with some fields overridden.
629 """
629 """
630 @propertycache
630 @propertycache
631 def _filelog(self):
631 def _filelog(self):
632 return self._repo.file(self._path)
632 return self._repo.file(self._path)
633
633
634 @propertycache
634 @propertycache
635 def _changeid(self):
635 def _changeid(self):
636 if r'_changeid' in self.__dict__:
636 if r'_changeid' in self.__dict__:
637 return self._changeid
637 return self._changeid
638 elif r'_changectx' in self.__dict__:
638 elif r'_changectx' in self.__dict__:
639 return self._changectx.rev()
639 return self._changectx.rev()
640 elif r'_descendantrev' in self.__dict__:
640 elif r'_descendantrev' in self.__dict__:
641 # this file context was created from a revision with a known
641 # this file context was created from a revision with a known
642 # descendant, we can (lazily) correct for linkrev aliases
642 # descendant, we can (lazily) correct for linkrev aliases
643 return self._adjustlinkrev(self._descendantrev)
643 return self._adjustlinkrev(self._descendantrev)
644 else:
644 else:
645 return self._filelog.linkrev(self._filerev)
645 return self._filelog.linkrev(self._filerev)
646
646
647 @propertycache
647 @propertycache
648 def _filenode(self):
648 def _filenode(self):
649 if r'_fileid' in self.__dict__:
649 if r'_fileid' in self.__dict__:
650 return self._filelog.lookup(self._fileid)
650 return self._filelog.lookup(self._fileid)
651 else:
651 else:
652 return self._changectx.filenode(self._path)
652 return self._changectx.filenode(self._path)
653
653
654 @propertycache
654 @propertycache
655 def _filerev(self):
655 def _filerev(self):
656 return self._filelog.rev(self._filenode)
656 return self._filelog.rev(self._filenode)
657
657
658 @propertycache
658 @propertycache
659 def _repopath(self):
659 def _repopath(self):
660 return self._path
660 return self._path
661
661
662 def __nonzero__(self):
662 def __nonzero__(self):
663 try:
663 try:
664 self._filenode
664 self._filenode
665 return True
665 return True
666 except error.LookupError:
666 except error.LookupError:
667 # file is missing
667 # file is missing
668 return False
668 return False
669
669
670 __bool__ = __nonzero__
670 __bool__ = __nonzero__
671
671
672 def __bytes__(self):
672 def __bytes__(self):
673 try:
673 try:
674 return "%s@%s" % (self.path(), self._changectx)
674 return "%s@%s" % (self.path(), self._changectx)
675 except error.LookupError:
675 except error.LookupError:
676 return "%s@???" % self.path()
676 return "%s@???" % self.path()
677
677
678 __str__ = encoding.strmethod(__bytes__)
678 __str__ = encoding.strmethod(__bytes__)
679
679
680 def __repr__(self):
680 def __repr__(self):
681 return r"<%s %s>" % (type(self).__name__, str(self))
681 return r"<%s %s>" % (type(self).__name__, str(self))
682
682
683 def __hash__(self):
683 def __hash__(self):
684 try:
684 try:
685 return hash((self._path, self._filenode))
685 return hash((self._path, self._filenode))
686 except AttributeError:
686 except AttributeError:
687 return id(self)
687 return id(self)
688
688
689 def __eq__(self, other):
689 def __eq__(self, other):
690 try:
690 try:
691 return (type(self) == type(other) and self._path == other._path
691 return (type(self) == type(other) and self._path == other._path
692 and self._filenode == other._filenode)
692 and self._filenode == other._filenode)
693 except AttributeError:
693 except AttributeError:
694 return False
694 return False
695
695
696 def __ne__(self, other):
696 def __ne__(self, other):
697 return not (self == other)
697 return not (self == other)
698
698
699 def filerev(self):
699 def filerev(self):
700 return self._filerev
700 return self._filerev
701 def filenode(self):
701 def filenode(self):
702 return self._filenode
702 return self._filenode
703 @propertycache
703 @propertycache
704 def _flags(self):
704 def _flags(self):
705 return self._changectx.flags(self._path)
705 return self._changectx.flags(self._path)
706 def flags(self):
706 def flags(self):
707 return self._flags
707 return self._flags
708 def filelog(self):
708 def filelog(self):
709 return self._filelog
709 return self._filelog
710 def rev(self):
710 def rev(self):
711 return self._changeid
711 return self._changeid
712 def linkrev(self):
712 def linkrev(self):
713 return self._filelog.linkrev(self._filerev)
713 return self._filelog.linkrev(self._filerev)
714 def node(self):
714 def node(self):
715 return self._changectx.node()
715 return self._changectx.node()
716 def hex(self):
716 def hex(self):
717 return self._changectx.hex()
717 return self._changectx.hex()
718 def user(self):
718 def user(self):
719 return self._changectx.user()
719 return self._changectx.user()
720 def date(self):
720 def date(self):
721 return self._changectx.date()
721 return self._changectx.date()
722 def files(self):
722 def files(self):
723 return self._changectx.files()
723 return self._changectx.files()
724 def description(self):
724 def description(self):
725 return self._changectx.description()
725 return self._changectx.description()
726 def branch(self):
726 def branch(self):
727 return self._changectx.branch()
727 return self._changectx.branch()
728 def extra(self):
728 def extra(self):
729 return self._changectx.extra()
729 return self._changectx.extra()
730 def phase(self):
730 def phase(self):
731 return self._changectx.phase()
731 return self._changectx.phase()
732 def phasestr(self):
732 def phasestr(self):
733 return self._changectx.phasestr()
733 return self._changectx.phasestr()
734 def obsolete(self):
734 def obsolete(self):
735 return self._changectx.obsolete()
735 return self._changectx.obsolete()
736 def instabilities(self):
736 def instabilities(self):
737 return self._changectx.instabilities()
737 return self._changectx.instabilities()
738 def manifest(self):
738 def manifest(self):
739 return self._changectx.manifest()
739 return self._changectx.manifest()
740 def changectx(self):
740 def changectx(self):
741 return self._changectx
741 return self._changectx
742 def renamed(self):
742 def renamed(self):
743 return self._copied
743 return self._copied
744 def repo(self):
744 def repo(self):
745 return self._repo
745 return self._repo
746 def size(self):
746 def size(self):
747 return len(self.data())
747 return len(self.data())
748
748
749 def path(self):
749 def path(self):
750 return self._path
750 return self._path
751
751
752 def isbinary(self):
752 def isbinary(self):
753 try:
753 try:
754 return stringutil.binary(self.data())
754 return stringutil.binary(self.data())
755 except IOError:
755 except IOError:
756 return False
756 return False
757 def isexec(self):
757 def isexec(self):
758 return 'x' in self.flags()
758 return 'x' in self.flags()
759 def islink(self):
759 def islink(self):
760 return 'l' in self.flags()
760 return 'l' in self.flags()
761
761
762 def isabsent(self):
762 def isabsent(self):
763 """whether this filectx represents a file not in self._changectx
763 """whether this filectx represents a file not in self._changectx
764
764
765 This is mainly for merge code to detect change/delete conflicts. This is
765 This is mainly for merge code to detect change/delete conflicts. This is
766 expected to be True for all subclasses of basectx."""
766 expected to be True for all subclasses of basectx."""
767 return False
767 return False
768
768
769 _customcmp = False
769 _customcmp = False
770 def cmp(self, fctx):
770 def cmp(self, fctx):
771 """compare with other file context
771 """compare with other file context
772
772
773 returns True if different than fctx.
773 returns True if different than fctx.
774 """
774 """
775 if fctx._customcmp:
775 if fctx._customcmp:
776 return fctx.cmp(self)
776 return fctx.cmp(self)
777
777
778 if (fctx._filenode is None
778 if (fctx._filenode is None
779 and (self._repo._encodefilterpats
779 and (self._repo._encodefilterpats
780 # if file data starts with '\1\n', empty metadata block is
780 # if file data starts with '\1\n', empty metadata block is
781 # prepended, which adds 4 bytes to filelog.size().
781 # prepended, which adds 4 bytes to filelog.size().
782 or self.size() - 4 == fctx.size())
782 or self.size() - 4 == fctx.size())
783 or self.size() == fctx.size()):
783 or self.size() == fctx.size()):
784 return self._filelog.cmp(self._filenode, fctx.data())
784 return self._filelog.cmp(self._filenode, fctx.data())
785
785
786 return True
786 return True
787
787
788 def _adjustlinkrev(self, srcrev, inclusive=False):
788 def _adjustlinkrev(self, srcrev, inclusive=False):
789 """return the first ancestor of <srcrev> introducing <fnode>
789 """return the first ancestor of <srcrev> introducing <fnode>
790
790
791 If the linkrev of the file revision does not point to an ancestor of
791 If the linkrev of the file revision does not point to an ancestor of
792 srcrev, we'll walk down the ancestors until we find one introducing
792 srcrev, we'll walk down the ancestors until we find one introducing
793 this file revision.
793 this file revision.
794
794
795 :srcrev: the changeset revision we search ancestors from
795 :srcrev: the changeset revision we search ancestors from
796 :inclusive: if true, the src revision will also be checked
796 :inclusive: if true, the src revision will also be checked
797 """
797 """
798 repo = self._repo
798 repo = self._repo
799 cl = repo.unfiltered().changelog
799 cl = repo.unfiltered().changelog
800 mfl = repo.manifestlog
800 mfl = repo.manifestlog
801 # fetch the linkrev
801 # fetch the linkrev
802 lkr = self.linkrev()
802 lkr = self.linkrev()
803 # hack to reuse ancestor computation when searching for renames
803 # hack to reuse ancestor computation when searching for renames
804 memberanc = getattr(self, '_ancestrycontext', None)
804 memberanc = getattr(self, '_ancestrycontext', None)
805 iteranc = None
805 iteranc = None
806 if srcrev is None:
806 if srcrev is None:
807 # wctx case, used by workingfilectx during mergecopy
807 # wctx case, used by workingfilectx during mergecopy
808 revs = [p.rev() for p in self._repo[None].parents()]
808 revs = [p.rev() for p in self._repo[None].parents()]
809 inclusive = True # we skipped the real (revless) source
809 inclusive = True # we skipped the real (revless) source
810 else:
810 else:
811 revs = [srcrev]
811 revs = [srcrev]
812 if memberanc is None:
812 if memberanc is None:
813 memberanc = iteranc = cl.ancestors(revs, lkr,
813 memberanc = iteranc = cl.ancestors(revs, lkr,
814 inclusive=inclusive)
814 inclusive=inclusive)
815 # check if this linkrev is an ancestor of srcrev
815 # check if this linkrev is an ancestor of srcrev
816 if lkr not in memberanc:
816 if lkr not in memberanc:
817 if iteranc is None:
817 if iteranc is None:
818 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
818 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
819 fnode = self._filenode
819 fnode = self._filenode
820 path = self._path
820 path = self._path
821 for a in iteranc:
821 for a in iteranc:
822 ac = cl.read(a) # get changeset data (we avoid object creation)
822 ac = cl.read(a) # get changeset data (we avoid object creation)
823 if path in ac[3]: # checking the 'files' field.
823 if path in ac[3]: # checking the 'files' field.
824 # The file has been touched, check if the content is
824 # The file has been touched, check if the content is
825 # similar to the one we search for.
825 # similar to the one we search for.
826 if fnode == mfl[ac[0]].readfast().get(path):
826 if fnode == mfl[ac[0]].readfast().get(path):
827 return a
827 return a
828 # In theory, we should never get out of that loop without a result.
828 # In theory, we should never get out of that loop without a result.
829 # But if manifest uses a buggy file revision (not children of the
829 # But if manifest uses a buggy file revision (not children of the
830 # one it replaces) we could. Such a buggy situation will likely
830 # one it replaces) we could. Such a buggy situation will likely
831 # result is crash somewhere else at to some point.
831 # result is crash somewhere else at to some point.
832 return lkr
832 return lkr
833
833
834 def introrev(self):
834 def introrev(self):
835 """return the rev of the changeset which introduced this file revision
835 """return the rev of the changeset which introduced this file revision
836
836
837 This method is different from linkrev because it take into account the
837 This method is different from linkrev because it take into account the
838 changeset the filectx was created from. It ensures the returned
838 changeset the filectx was created from. It ensures the returned
839 revision is one of its ancestors. This prevents bugs from
839 revision is one of its ancestors. This prevents bugs from
840 'linkrev-shadowing' when a file revision is used by multiple
840 'linkrev-shadowing' when a file revision is used by multiple
841 changesets.
841 changesets.
842 """
842 """
843 lkr = self.linkrev()
843 lkr = self.linkrev()
844 attrs = vars(self)
844 attrs = vars(self)
845 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
845 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
846 if noctx or self.rev() == lkr:
846 if noctx or self.rev() == lkr:
847 return self.linkrev()
847 return self.linkrev()
848 return self._adjustlinkrev(self.rev(), inclusive=True)
848 return self._adjustlinkrev(self.rev(), inclusive=True)
849
849
850 def introfilectx(self):
850 def introfilectx(self):
851 """Return filectx having identical contents, but pointing to the
851 """Return filectx having identical contents, but pointing to the
852 changeset revision where this filectx was introduced"""
852 changeset revision where this filectx was introduced"""
853 introrev = self.introrev()
853 introrev = self.introrev()
854 if self.rev() == introrev:
854 if self.rev() == introrev:
855 return self
855 return self
856 return self.filectx(self.filenode(), changeid=introrev)
856 return self.filectx(self.filenode(), changeid=introrev)
857
857
858 def _parentfilectx(self, path, fileid, filelog):
858 def _parentfilectx(self, path, fileid, filelog):
859 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
859 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
860 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
860 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
861 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
861 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
862 # If self is associated with a changeset (probably explicitly
862 # If self is associated with a changeset (probably explicitly
863 # fed), ensure the created filectx is associated with a
863 # fed), ensure the created filectx is associated with a
864 # changeset that is an ancestor of self.changectx.
864 # changeset that is an ancestor of self.changectx.
865 # This lets us later use _adjustlinkrev to get a correct link.
865 # This lets us later use _adjustlinkrev to get a correct link.
866 fctx._descendantrev = self.rev()
866 fctx._descendantrev = self.rev()
867 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
867 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
868 elif r'_descendantrev' in vars(self):
868 elif r'_descendantrev' in vars(self):
869 # Otherwise propagate _descendantrev if we have one associated.
869 # Otherwise propagate _descendantrev if we have one associated.
870 fctx._descendantrev = self._descendantrev
870 fctx._descendantrev = self._descendantrev
871 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
871 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
872 return fctx
872 return fctx
873
873
874 def parents(self):
874 def parents(self):
875 _path = self._path
875 _path = self._path
876 fl = self._filelog
876 fl = self._filelog
877 parents = self._filelog.parents(self._filenode)
877 parents = self._filelog.parents(self._filenode)
878 pl = [(_path, node, fl) for node in parents if node != nullid]
878 pl = [(_path, node, fl) for node in parents if node != nullid]
879
879
880 r = fl.renamed(self._filenode)
880 r = fl.renamed(self._filenode)
881 if r:
881 if r:
882 # - In the simple rename case, both parent are nullid, pl is empty.
882 # - In the simple rename case, both parent are nullid, pl is empty.
883 # - In case of merge, only one of the parent is null id and should
883 # - In case of merge, only one of the parent is null id and should
884 # be replaced with the rename information. This parent is -always-
884 # be replaced with the rename information. This parent is -always-
885 # the first one.
885 # the first one.
886 #
886 #
887 # As null id have always been filtered out in the previous list
887 # As null id have always been filtered out in the previous list
888 # comprehension, inserting to 0 will always result in "replacing
888 # comprehension, inserting to 0 will always result in "replacing
889 # first nullid parent with rename information.
889 # first nullid parent with rename information.
890 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
890 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
891
891
892 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
892 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
893
893
894 def p1(self):
894 def p1(self):
895 return self.parents()[0]
895 return self.parents()[0]
896
896
897 def p2(self):
897 def p2(self):
898 p = self.parents()
898 p = self.parents()
899 if len(p) == 2:
899 if len(p) == 2:
900 return p[1]
900 return p[1]
901 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
901 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
902
902
903 def annotate(self, follow=False, skiprevs=None, diffopts=None):
903 def annotate(self, follow=False, skiprevs=None, diffopts=None):
904 """Returns a list of annotateline objects for each line in the file
904 """Returns a list of annotateline objects for each line in the file
905
905
906 - line.fctx is the filectx of the node where that line was last changed
906 - line.fctx is the filectx of the node where that line was last changed
907 - line.lineno is the line number at the first appearance in the managed
907 - line.lineno is the line number at the first appearance in the managed
908 file
908 file
909 - line.text is the data on that line (including newline character)
909 - line.text is the data on that line (including newline character)
910 """
910 """
911 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
911 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
912
912
913 def parents(f):
913 def parents(f):
914 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
914 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
915 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
915 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
916 # from the topmost introrev (= srcrev) down to p.linkrev() if it
916 # from the topmost introrev (= srcrev) down to p.linkrev() if it
917 # isn't an ancestor of the srcrev.
917 # isn't an ancestor of the srcrev.
918 f._changeid
918 f._changeid
919 pl = f.parents()
919 pl = f.parents()
920
920
921 # Don't return renamed parents if we aren't following.
921 # Don't return renamed parents if we aren't following.
922 if not follow:
922 if not follow:
923 pl = [p for p in pl if p.path() == f.path()]
923 pl = [p for p in pl if p.path() == f.path()]
924
924
925 # renamed filectx won't have a filelog yet, so set it
925 # renamed filectx won't have a filelog yet, so set it
926 # from the cache to save time
926 # from the cache to save time
927 for p in pl:
927 for p in pl:
928 if not r'_filelog' in p.__dict__:
928 if not r'_filelog' in p.__dict__:
929 p._filelog = getlog(p.path())
929 p._filelog = getlog(p.path())
930
930
931 return pl
931 return pl
932
932
933 # use linkrev to find the first changeset where self appeared
933 # use linkrev to find the first changeset where self appeared
934 base = self.introfilectx()
934 base = self.introfilectx()
935 if getattr(base, '_ancestrycontext', None) is None:
935 if getattr(base, '_ancestrycontext', None) is None:
936 cl = self._repo.changelog
936 cl = self._repo.changelog
937 if base.rev() is None:
937 if base.rev() is None:
938 # wctx is not inclusive, but works because _ancestrycontext
938 # wctx is not inclusive, but works because _ancestrycontext
939 # is used to test filelog revisions
939 # is used to test filelog revisions
940 ac = cl.ancestors([p.rev() for p in base.parents()],
940 ac = cl.ancestors([p.rev() for p in base.parents()],
941 inclusive=True)
941 inclusive=True)
942 else:
942 else:
943 ac = cl.ancestors([base.rev()], inclusive=True)
943 ac = cl.ancestors([base.rev()], inclusive=True)
944 base._ancestrycontext = ac
944 base._ancestrycontext = ac
945
945
946 return dagop.annotate(base, parents, skiprevs=skiprevs,
946 return dagop.annotate(base, parents, skiprevs=skiprevs,
947 diffopts=diffopts)
947 diffopts=diffopts)
948
948
949 def ancestors(self, followfirst=False):
949 def ancestors(self, followfirst=False):
950 visit = {}
950 visit = {}
951 c = self
951 c = self
952 if followfirst:
952 if followfirst:
953 cut = 1
953 cut = 1
954 else:
954 else:
955 cut = None
955 cut = None
956
956
957 while True:
957 while True:
958 for parent in c.parents()[:cut]:
958 for parent in c.parents()[:cut]:
959 visit[(parent.linkrev(), parent.filenode())] = parent
959 visit[(parent.linkrev(), parent.filenode())] = parent
960 if not visit:
960 if not visit:
961 break
961 break
962 c = visit.pop(max(visit))
962 c = visit.pop(max(visit))
963 yield c
963 yield c
964
964
965 def decodeddata(self):
965 def decodeddata(self):
966 """Returns `data()` after running repository decoding filters.
966 """Returns `data()` after running repository decoding filters.
967
967
968 This is often equivalent to how the data would be expressed on disk.
968 This is often equivalent to how the data would be expressed on disk.
969 """
969 """
970 return self._repo.wwritedata(self.path(), self.data())
970 return self._repo.wwritedata(self.path(), self.data())
971
971
972 class filectx(basefilectx):
972 class filectx(basefilectx):
973 """A filecontext object makes access to data related to a particular
973 """A filecontext object makes access to data related to a particular
974 filerevision convenient."""
974 filerevision convenient."""
975 def __init__(self, repo, path, changeid=None, fileid=None,
975 def __init__(self, repo, path, changeid=None, fileid=None,
976 filelog=None, changectx=None):
976 filelog=None, changectx=None):
977 """changeid can be a changeset revision, node, or tag.
977 """changeid can be a changeset revision, node, or tag.
978 fileid can be a file revision or node."""
978 fileid can be a file revision or node."""
979 self._repo = repo
979 self._repo = repo
980 self._path = path
980 self._path = path
981
981
982 assert (changeid is not None
982 assert (changeid is not None
983 or fileid is not None
983 or fileid is not None
984 or changectx is not None), \
984 or changectx is not None), \
985 ("bad args: changeid=%r, fileid=%r, changectx=%r"
985 ("bad args: changeid=%r, fileid=%r, changectx=%r"
986 % (changeid, fileid, changectx))
986 % (changeid, fileid, changectx))
987
987
988 if filelog is not None:
988 if filelog is not None:
989 self._filelog = filelog
989 self._filelog = filelog
990
990
991 if changeid is not None:
991 if changeid is not None:
992 self._changeid = changeid
992 self._changeid = changeid
993 if changectx is not None:
993 if changectx is not None:
994 self._changectx = changectx
994 self._changectx = changectx
995 if fileid is not None:
995 if fileid is not None:
996 self._fileid = fileid
996 self._fileid = fileid
997
997
998 @propertycache
998 @propertycache
999 def _changectx(self):
999 def _changectx(self):
1000 try:
1000 try:
1001 return changectx(self._repo, self._changeid)
1001 return changectx(self._repo, self._changeid)
1002 except error.FilteredRepoLookupError:
1002 except error.FilteredRepoLookupError:
1003 # Linkrev may point to any revision in the repository. When the
1003 # Linkrev may point to any revision in the repository. When the
1004 # repository is filtered this may lead to `filectx` trying to build
1004 # repository is filtered this may lead to `filectx` trying to build
1005 # `changectx` for filtered revision. In such case we fallback to
1005 # `changectx` for filtered revision. In such case we fallback to
1006 # creating `changectx` on the unfiltered version of the reposition.
1006 # creating `changectx` on the unfiltered version of the reposition.
1007 # This fallback should not be an issue because `changectx` from
1007 # This fallback should not be an issue because `changectx` from
1008 # `filectx` are not used in complex operations that care about
1008 # `filectx` are not used in complex operations that care about
1009 # filtering.
1009 # filtering.
1010 #
1010 #
1011 # This fallback is a cheap and dirty fix that prevent several
1011 # This fallback is a cheap and dirty fix that prevent several
1012 # crashes. It does not ensure the behavior is correct. However the
1012 # crashes. It does not ensure the behavior is correct. However the
1013 # behavior was not correct before filtering either and "incorrect
1013 # behavior was not correct before filtering either and "incorrect
1014 # behavior" is seen as better as "crash"
1014 # behavior" is seen as better as "crash"
1015 #
1015 #
1016 # Linkrevs have several serious troubles with filtering that are
1016 # Linkrevs have several serious troubles with filtering that are
1017 # complicated to solve. Proper handling of the issue here should be
1017 # complicated to solve. Proper handling of the issue here should be
1018 # considered when solving linkrev issue are on the table.
1018 # considered when solving linkrev issue are on the table.
1019 return changectx(self._repo.unfiltered(), self._changeid)
1019 return changectx(self._repo.unfiltered(), self._changeid)
1020
1020
1021 def filectx(self, fileid, changeid=None):
1021 def filectx(self, fileid, changeid=None):
1022 '''opens an arbitrary revision of the file without
1022 '''opens an arbitrary revision of the file without
1023 opening a new filelog'''
1023 opening a new filelog'''
1024 return filectx(self._repo, self._path, fileid=fileid,
1024 return filectx(self._repo, self._path, fileid=fileid,
1025 filelog=self._filelog, changeid=changeid)
1025 filelog=self._filelog, changeid=changeid)
1026
1026
1027 def rawdata(self):
1027 def rawdata(self):
1028 return self._filelog.revision(self._filenode, raw=True)
1028 return self._filelog.revision(self._filenode, raw=True)
1029
1029
1030 def rawflags(self):
1030 def rawflags(self):
1031 """low-level revlog flags"""
1031 """low-level revlog flags"""
1032 return self._filelog.flags(self._filerev)
1032 return self._filelog.flags(self._filerev)
1033
1033
1034 def data(self):
1034 def data(self):
1035 try:
1035 try:
1036 return self._filelog.read(self._filenode)
1036 return self._filelog.read(self._filenode)
1037 except error.CensoredNodeError:
1037 except error.CensoredNodeError:
1038 if self._repo.ui.config("censor", "policy") == "ignore":
1038 if self._repo.ui.config("censor", "policy") == "ignore":
1039 return ""
1039 return ""
1040 raise error.Abort(_("censored node: %s") % short(self._filenode),
1040 raise error.Abort(_("censored node: %s") % short(self._filenode),
1041 hint=_("set censor.policy to ignore errors"))
1041 hint=_("set censor.policy to ignore errors"))
1042
1042
1043 def size(self):
1043 def size(self):
1044 return self._filelog.size(self._filerev)
1044 return self._filelog.size(self._filerev)
1045
1045
1046 @propertycache
1046 @propertycache
1047 def _copied(self):
1047 def _copied(self):
1048 """check if file was actually renamed in this changeset revision
1048 """check if file was actually renamed in this changeset revision
1049
1049
1050 If rename logged in file revision, we report copy for changeset only
1050 If rename logged in file revision, we report copy for changeset only
1051 if file revisions linkrev points back to the changeset in question
1051 if file revisions linkrev points back to the changeset in question
1052 or both changeset parents contain different file revisions.
1052 or both changeset parents contain different file revisions.
1053 """
1053 """
1054
1054
1055 renamed = self._filelog.renamed(self._filenode)
1055 renamed = self._filelog.renamed(self._filenode)
1056 if not renamed:
1056 if not renamed:
1057 return renamed
1057 return renamed
1058
1058
1059 if self.rev() == self.linkrev():
1059 if self.rev() == self.linkrev():
1060 return renamed
1060 return renamed
1061
1061
1062 name = self.path()
1062 name = self.path()
1063 fnode = self._filenode
1063 fnode = self._filenode
1064 for p in self._changectx.parents():
1064 for p in self._changectx.parents():
1065 try:
1065 try:
1066 if fnode == p.filenode(name):
1066 if fnode == p.filenode(name):
1067 return None
1067 return None
1068 except error.LookupError:
1068 except error.LookupError:
1069 pass
1069 pass
1070 return renamed
1070 return renamed
1071
1071
1072 def children(self):
1072 def children(self):
1073 # hard for renames
1073 # hard for renames
1074 c = self._filelog.children(self._filenode)
1074 c = self._filelog.children(self._filenode)
1075 return [filectx(self._repo, self._path, fileid=x,
1075 return [filectx(self._repo, self._path, fileid=x,
1076 filelog=self._filelog) for x in c]
1076 filelog=self._filelog) for x in c]
1077
1077
1078 class committablectx(basectx):
1078 class committablectx(basectx):
1079 """A committablectx object provides common functionality for a context that
1079 """A committablectx object provides common functionality for a context that
1080 wants the ability to commit, e.g. workingctx or memctx."""
1080 wants the ability to commit, e.g. workingctx or memctx."""
1081 def __init__(self, repo, text="", user=None, date=None, extra=None,
1081 def __init__(self, repo, text="", user=None, date=None, extra=None,
1082 changes=None):
1082 changes=None):
1083 super(committablectx, self).__init__(repo)
1083 super(committablectx, self).__init__(repo)
1084 self._rev = None
1084 self._rev = None
1085 self._node = None
1085 self._node = None
1086 self._text = text
1086 self._text = text
1087 if date:
1087 if date:
1088 self._date = dateutil.parsedate(date)
1088 self._date = dateutil.parsedate(date)
1089 if user:
1089 if user:
1090 self._user = user
1090 self._user = user
1091 if changes:
1091 if changes:
1092 self._status = changes
1092 self._status = changes
1093
1093
1094 self._extra = {}
1094 self._extra = {}
1095 if extra:
1095 if extra:
1096 self._extra = extra.copy()
1096 self._extra = extra.copy()
1097 if 'branch' not in self._extra:
1097 if 'branch' not in self._extra:
1098 try:
1098 try:
1099 branch = encoding.fromlocal(self._repo.dirstate.branch())
1099 branch = encoding.fromlocal(self._repo.dirstate.branch())
1100 except UnicodeDecodeError:
1100 except UnicodeDecodeError:
1101 raise error.Abort(_('branch name not in UTF-8!'))
1101 raise error.Abort(_('branch name not in UTF-8!'))
1102 self._extra['branch'] = branch
1102 self._extra['branch'] = branch
1103 if self._extra['branch'] == '':
1103 if self._extra['branch'] == '':
1104 self._extra['branch'] = 'default'
1104 self._extra['branch'] = 'default'
1105
1105
1106 def __bytes__(self):
1106 def __bytes__(self):
1107 return bytes(self._parents[0]) + "+"
1107 return bytes(self._parents[0]) + "+"
1108
1108
1109 __str__ = encoding.strmethod(__bytes__)
1109 __str__ = encoding.strmethod(__bytes__)
1110
1110
1111 def __nonzero__(self):
1111 def __nonzero__(self):
1112 return True
1112 return True
1113
1113
1114 __bool__ = __nonzero__
1114 __bool__ = __nonzero__
1115
1115
1116 def _buildflagfunc(self):
1116 def _buildflagfunc(self):
1117 # Create a fallback function for getting file flags when the
1117 # Create a fallback function for getting file flags when the
1118 # filesystem doesn't support them
1118 # filesystem doesn't support them
1119
1119
1120 copiesget = self._repo.dirstate.copies().get
1120 copiesget = self._repo.dirstate.copies().get
1121 parents = self.parents()
1121 parents = self.parents()
1122 if len(parents) < 2:
1122 if len(parents) < 2:
1123 # when we have one parent, it's easy: copy from parent
1123 # when we have one parent, it's easy: copy from parent
1124 man = parents[0].manifest()
1124 man = parents[0].manifest()
1125 def func(f):
1125 def func(f):
1126 f = copiesget(f, f)
1126 f = copiesget(f, f)
1127 return man.flags(f)
1127 return man.flags(f)
1128 else:
1128 else:
1129 # merges are tricky: we try to reconstruct the unstored
1129 # merges are tricky: we try to reconstruct the unstored
1130 # result from the merge (issue1802)
1130 # result from the merge (issue1802)
1131 p1, p2 = parents
1131 p1, p2 = parents
1132 pa = p1.ancestor(p2)
1132 pa = p1.ancestor(p2)
1133 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1133 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1134
1134
1135 def func(f):
1135 def func(f):
1136 f = copiesget(f, f) # may be wrong for merges with copies
1136 f = copiesget(f, f) # may be wrong for merges with copies
1137 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1137 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1138 if fl1 == fl2:
1138 if fl1 == fl2:
1139 return fl1
1139 return fl1
1140 if fl1 == fla:
1140 if fl1 == fla:
1141 return fl2
1141 return fl2
1142 if fl2 == fla:
1142 if fl2 == fla:
1143 return fl1
1143 return fl1
1144 return '' # punt for conflicts
1144 return '' # punt for conflicts
1145
1145
1146 return func
1146 return func
1147
1147
1148 @propertycache
1148 @propertycache
1149 def _flagfunc(self):
1149 def _flagfunc(self):
1150 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1150 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1151
1151
1152 @propertycache
1152 @propertycache
1153 def _status(self):
1153 def _status(self):
1154 return self._repo.status()
1154 return self._repo.status()
1155
1155
1156 @propertycache
1156 @propertycache
1157 def _user(self):
1157 def _user(self):
1158 return self._repo.ui.username()
1158 return self._repo.ui.username()
1159
1159
1160 @propertycache
1160 @propertycache
1161 def _date(self):
1161 def _date(self):
1162 ui = self._repo.ui
1162 ui = self._repo.ui
1163 date = ui.configdate('devel', 'default-date')
1163 date = ui.configdate('devel', 'default-date')
1164 if date is None:
1164 if date is None:
1165 date = dateutil.makedate()
1165 date = dateutil.makedate()
1166 return date
1166 return date
1167
1167
1168 def subrev(self, subpath):
1168 def subrev(self, subpath):
1169 return None
1169 return None
1170
1170
1171 def manifestnode(self):
1171 def manifestnode(self):
1172 return None
1172 return None
1173 def user(self):
1173 def user(self):
1174 return self._user or self._repo.ui.username()
1174 return self._user or self._repo.ui.username()
1175 def date(self):
1175 def date(self):
1176 return self._date
1176 return self._date
1177 def description(self):
1177 def description(self):
1178 return self._text
1178 return self._text
1179 def files(self):
1179 def files(self):
1180 return sorted(self._status.modified + self._status.added +
1180 return sorted(self._status.modified + self._status.added +
1181 self._status.removed)
1181 self._status.removed)
1182
1182
1183 def modified(self):
1183 def modified(self):
1184 return self._status.modified
1184 return self._status.modified
1185 def added(self):
1185 def added(self):
1186 return self._status.added
1186 return self._status.added
1187 def removed(self):
1187 def removed(self):
1188 return self._status.removed
1188 return self._status.removed
1189 def deleted(self):
1189 def deleted(self):
1190 return self._status.deleted
1190 return self._status.deleted
1191 def branch(self):
1191 def branch(self):
1192 return encoding.tolocal(self._extra['branch'])
1192 return encoding.tolocal(self._extra['branch'])
1193 def closesbranch(self):
1193 def closesbranch(self):
1194 return 'close' in self._extra
1194 return 'close' in self._extra
1195 def extra(self):
1195 def extra(self):
1196 return self._extra
1196 return self._extra
1197
1197
1198 def isinmemory(self):
1198 def isinmemory(self):
1199 return False
1199 return False
1200
1200
1201 def tags(self):
1201 def tags(self):
1202 return []
1202 return []
1203
1203
1204 def bookmarks(self):
1204 def bookmarks(self):
1205 b = []
1205 b = []
1206 for p in self.parents():
1206 for p in self.parents():
1207 b.extend(p.bookmarks())
1207 b.extend(p.bookmarks())
1208 return b
1208 return b
1209
1209
1210 def phase(self):
1210 def phase(self):
1211 phase = phases.draft # default phase to draft
1211 phase = phases.draft # default phase to draft
1212 for p in self.parents():
1212 for p in self.parents():
1213 phase = max(phase, p.phase())
1213 phase = max(phase, p.phase())
1214 return phase
1214 return phase
1215
1215
1216 def hidden(self):
1216 def hidden(self):
1217 return False
1217 return False
1218
1218
1219 def children(self):
1219 def children(self):
1220 return []
1220 return []
1221
1221
1222 def flags(self, path):
1222 def flags(self, path):
1223 if r'_manifest' in self.__dict__:
1223 if r'_manifest' in self.__dict__:
1224 try:
1224 try:
1225 return self._manifest.flags(path)
1225 return self._manifest.flags(path)
1226 except KeyError:
1226 except KeyError:
1227 return ''
1227 return ''
1228
1228
1229 try:
1229 try:
1230 return self._flagfunc(path)
1230 return self._flagfunc(path)
1231 except OSError:
1231 except OSError:
1232 return ''
1232 return ''
1233
1233
1234 def ancestor(self, c2):
1234 def ancestor(self, c2):
1235 """return the "best" ancestor context of self and c2"""
1235 """return the "best" ancestor context of self and c2"""
1236 return self._parents[0].ancestor(c2) # punt on two parents for now
1236 return self._parents[0].ancestor(c2) # punt on two parents for now
1237
1237
1238 def walk(self, match):
1238 def walk(self, match):
1239 '''Generates matching file names.'''
1239 '''Generates matching file names.'''
1240 return sorted(self._repo.dirstate.walk(match,
1240 return sorted(self._repo.dirstate.walk(match,
1241 subrepos=sorted(self.substate),
1241 subrepos=sorted(self.substate),
1242 unknown=True, ignored=False))
1242 unknown=True, ignored=False))
1243
1243
1244 def matches(self, match):
1244 def matches(self, match):
1245 ds = self._repo.dirstate
1245 ds = self._repo.dirstate
1246 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1246 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1247
1247
1248 def ancestors(self):
1248 def ancestors(self):
1249 for p in self._parents:
1249 for p in self._parents:
1250 yield p
1250 yield p
1251 for a in self._repo.changelog.ancestors(
1251 for a in self._repo.changelog.ancestors(
1252 [p.rev() for p in self._parents]):
1252 [p.rev() for p in self._parents]):
1253 yield changectx(self._repo, a)
1253 yield changectx(self._repo, a)
1254
1254
1255 def markcommitted(self, node):
1255 def markcommitted(self, node):
1256 """Perform post-commit cleanup necessary after committing this ctx
1256 """Perform post-commit cleanup necessary after committing this ctx
1257
1257
1258 Specifically, this updates backing stores this working context
1258 Specifically, this updates backing stores this working context
1259 wraps to reflect the fact that the changes reflected by this
1259 wraps to reflect the fact that the changes reflected by this
1260 workingctx have been committed. For example, it marks
1260 workingctx have been committed. For example, it marks
1261 modified and added files as normal in the dirstate.
1261 modified and added files as normal in the dirstate.
1262
1262
1263 """
1263 """
1264
1264
1265 with self._repo.dirstate.parentchange():
1265 with self._repo.dirstate.parentchange():
1266 for f in self.modified() + self.added():
1266 for f in self.modified() + self.added():
1267 self._repo.dirstate.normal(f)
1267 self._repo.dirstate.normal(f)
1268 for f in self.removed():
1268 for f in self.removed():
1269 self._repo.dirstate.drop(f)
1269 self._repo.dirstate.drop(f)
1270 self._repo.dirstate.setparents(node)
1270 self._repo.dirstate.setparents(node)
1271
1271
1272 # write changes out explicitly, because nesting wlock at
1272 # write changes out explicitly, because nesting wlock at
1273 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1273 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1274 # from immediately doing so for subsequent changing files
1274 # from immediately doing so for subsequent changing files
1275 self._repo.dirstate.write(self._repo.currenttransaction())
1275 self._repo.dirstate.write(self._repo.currenttransaction())
1276
1276
1277 def dirty(self, missing=False, merge=True, branch=True):
1277 def dirty(self, missing=False, merge=True, branch=True):
1278 return False
1278 return False
1279
1279
1280 class workingctx(committablectx):
1280 class workingctx(committablectx):
1281 """A workingctx object makes access to data related to
1281 """A workingctx object makes access to data related to
1282 the current working directory convenient.
1282 the current working directory convenient.
1283 date - any valid date string or (unixtime, offset), or None.
1283 date - any valid date string or (unixtime, offset), or None.
1284 user - username string, or None.
1284 user - username string, or None.
1285 extra - a dictionary of extra values, or None.
1285 extra - a dictionary of extra values, or None.
1286 changes - a list of file lists as returned by localrepo.status()
1286 changes - a list of file lists as returned by localrepo.status()
1287 or None to use the repository status.
1287 or None to use the repository status.
1288 """
1288 """
1289 def __init__(self, repo, text="", user=None, date=None, extra=None,
1289 def __init__(self, repo, text="", user=None, date=None, extra=None,
1290 changes=None):
1290 changes=None):
1291 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1291 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1292
1292
1293 def __iter__(self):
1293 def __iter__(self):
1294 d = self._repo.dirstate
1294 d = self._repo.dirstate
1295 for f in d:
1295 for f in d:
1296 if d[f] != 'r':
1296 if d[f] != 'r':
1297 yield f
1297 yield f
1298
1298
1299 def __contains__(self, key):
1299 def __contains__(self, key):
1300 return self._repo.dirstate[key] not in "?r"
1300 return self._repo.dirstate[key] not in "?r"
1301
1301
1302 def hex(self):
1302 def hex(self):
1303 return hex(wdirid)
1303 return hex(wdirid)
1304
1304
1305 @propertycache
1305 @propertycache
1306 def _parents(self):
1306 def _parents(self):
1307 p = self._repo.dirstate.parents()
1307 p = self._repo.dirstate.parents()
1308 if p[1] == nullid:
1308 if p[1] == nullid:
1309 p = p[:-1]
1309 p = p[:-1]
1310 return [changectx(self._repo, x) for x in p]
1310 return [changectx(self._repo, x) for x in p]
1311
1311
1312 def _fileinfo(self, path):
1312 def _fileinfo(self, path):
1313 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1313 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1314 self._manifest
1314 self._manifest
1315 return super(workingctx, self)._fileinfo(path)
1315 return super(workingctx, self)._fileinfo(path)
1316
1316
1317 def filectx(self, path, filelog=None):
1317 def filectx(self, path, filelog=None):
1318 """get a file context from the working directory"""
1318 """get a file context from the working directory"""
1319 return workingfilectx(self._repo, path, workingctx=self,
1319 return workingfilectx(self._repo, path, workingctx=self,
1320 filelog=filelog)
1320 filelog=filelog)
1321
1321
1322 def dirty(self, missing=False, merge=True, branch=True):
1322 def dirty(self, missing=False, merge=True, branch=True):
1323 "check whether a working directory is modified"
1323 "check whether a working directory is modified"
1324 # check subrepos first
1324 # check subrepos first
1325 for s in sorted(self.substate):
1325 for s in sorted(self.substate):
1326 if self.sub(s).dirty(missing=missing):
1326 if self.sub(s).dirty(missing=missing):
1327 return True
1327 return True
1328 # check current working dir
1328 # check current working dir
1329 return ((merge and self.p2()) or
1329 return ((merge and self.p2()) or
1330 (branch and self.branch() != self.p1().branch()) or
1330 (branch and self.branch() != self.p1().branch()) or
1331 self.modified() or self.added() or self.removed() or
1331 self.modified() or self.added() or self.removed() or
1332 (missing and self.deleted()))
1332 (missing and self.deleted()))
1333
1333
1334 def add(self, list, prefix=""):
1334 def add(self, list, prefix=""):
1335 with self._repo.wlock():
1335 with self._repo.wlock():
1336 ui, ds = self._repo.ui, self._repo.dirstate
1336 ui, ds = self._repo.ui, self._repo.dirstate
1337 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1337 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1338 rejected = []
1338 rejected = []
1339 lstat = self._repo.wvfs.lstat
1339 lstat = self._repo.wvfs.lstat
1340 for f in list:
1340 for f in list:
1341 # ds.pathto() returns an absolute file when this is invoked from
1341 # ds.pathto() returns an absolute file when this is invoked from
1342 # the keyword extension. That gets flagged as non-portable on
1342 # the keyword extension. That gets flagged as non-portable on
1343 # Windows, since it contains the drive letter and colon.
1343 # Windows, since it contains the drive letter and colon.
1344 scmutil.checkportable(ui, os.path.join(prefix, f))
1344 scmutil.checkportable(ui, os.path.join(prefix, f))
1345 try:
1345 try:
1346 st = lstat(f)
1346 st = lstat(f)
1347 except OSError:
1347 except OSError:
1348 ui.warn(_("%s does not exist!\n") % uipath(f))
1348 ui.warn(_("%s does not exist!\n") % uipath(f))
1349 rejected.append(f)
1349 rejected.append(f)
1350 continue
1350 continue
1351 limit = ui.configbytes('ui', 'large-file-limit')
1351 limit = ui.configbytes('ui', 'large-file-limit')
1352 if limit != 0 and st.st_size > limit:
1352 if limit != 0 and st.st_size > limit:
1353 ui.warn(_("%s: up to %d MB of RAM may be required "
1353 ui.warn(_("%s: up to %d MB of RAM may be required "
1354 "to manage this file\n"
1354 "to manage this file\n"
1355 "(use 'hg revert %s' to cancel the "
1355 "(use 'hg revert %s' to cancel the "
1356 "pending addition)\n")
1356 "pending addition)\n")
1357 % (f, 3 * st.st_size // 1000000, uipath(f)))
1357 % (f, 3 * st.st_size // 1000000, uipath(f)))
1358 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1358 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1359 ui.warn(_("%s not added: only files and symlinks "
1359 ui.warn(_("%s not added: only files and symlinks "
1360 "supported currently\n") % uipath(f))
1360 "supported currently\n") % uipath(f))
1361 rejected.append(f)
1361 rejected.append(f)
1362 elif ds[f] in 'amn':
1362 elif ds[f] in 'amn':
1363 ui.warn(_("%s already tracked!\n") % uipath(f))
1363 ui.warn(_("%s already tracked!\n") % uipath(f))
1364 elif ds[f] == 'r':
1364 elif ds[f] == 'r':
1365 ds.normallookup(f)
1365 ds.normallookup(f)
1366 else:
1366 else:
1367 ds.add(f)
1367 ds.add(f)
1368 return rejected
1368 return rejected
1369
1369
1370 def forget(self, files, prefix=""):
1370 def forget(self, files, prefix=""):
1371 with self._repo.wlock():
1371 with self._repo.wlock():
1372 ds = self._repo.dirstate
1372 ds = self._repo.dirstate
1373 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1373 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1374 rejected = []
1374 rejected = []
1375 for f in files:
1375 for f in files:
1376 if f not in self._repo.dirstate:
1376 if f not in self._repo.dirstate:
1377 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1377 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1378 rejected.append(f)
1378 rejected.append(f)
1379 elif self._repo.dirstate[f] != 'a':
1379 elif self._repo.dirstate[f] != 'a':
1380 self._repo.dirstate.remove(f)
1380 self._repo.dirstate.remove(f)
1381 else:
1381 else:
1382 self._repo.dirstate.drop(f)
1382 self._repo.dirstate.drop(f)
1383 return rejected
1383 return rejected
1384
1384
1385 def undelete(self, list):
1385 def undelete(self, list):
1386 pctxs = self.parents()
1386 pctxs = self.parents()
1387 with self._repo.wlock():
1387 with self._repo.wlock():
1388 ds = self._repo.dirstate
1388 ds = self._repo.dirstate
1389 for f in list:
1389 for f in list:
1390 if self._repo.dirstate[f] != 'r':
1390 if self._repo.dirstate[f] != 'r':
1391 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1391 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1392 else:
1392 else:
1393 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1393 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1394 t = fctx.data()
1394 t = fctx.data()
1395 self._repo.wwrite(f, t, fctx.flags())
1395 self._repo.wwrite(f, t, fctx.flags())
1396 self._repo.dirstate.normal(f)
1396 self._repo.dirstate.normal(f)
1397
1397
1398 def copy(self, source, dest):
1398 def copy(self, source, dest):
1399 try:
1399 try:
1400 st = self._repo.wvfs.lstat(dest)
1400 st = self._repo.wvfs.lstat(dest)
1401 except OSError as err:
1401 except OSError as err:
1402 if err.errno != errno.ENOENT:
1402 if err.errno != errno.ENOENT:
1403 raise
1403 raise
1404 self._repo.ui.warn(_("%s does not exist!\n")
1404 self._repo.ui.warn(_("%s does not exist!\n")
1405 % self._repo.dirstate.pathto(dest))
1405 % self._repo.dirstate.pathto(dest))
1406 return
1406 return
1407 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1407 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1408 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1408 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1409 "symbolic link\n")
1409 "symbolic link\n")
1410 % self._repo.dirstate.pathto(dest))
1410 % self._repo.dirstate.pathto(dest))
1411 else:
1411 else:
1412 with self._repo.wlock():
1412 with self._repo.wlock():
1413 if self._repo.dirstate[dest] in '?':
1413 if self._repo.dirstate[dest] in '?':
1414 self._repo.dirstate.add(dest)
1414 self._repo.dirstate.add(dest)
1415 elif self._repo.dirstate[dest] in 'r':
1415 elif self._repo.dirstate[dest] in 'r':
1416 self._repo.dirstate.normallookup(dest)
1416 self._repo.dirstate.normallookup(dest)
1417 self._repo.dirstate.copy(source, dest)
1417 self._repo.dirstate.copy(source, dest)
1418
1418
1419 def match(self, pats=None, include=None, exclude=None, default='glob',
1419 def match(self, pats=None, include=None, exclude=None, default='glob',
1420 listsubrepos=False, badfn=None):
1420 listsubrepos=False, badfn=None):
1421 r = self._repo
1421 r = self._repo
1422
1422
1423 # Only a case insensitive filesystem needs magic to translate user input
1423 # Only a case insensitive filesystem needs magic to translate user input
1424 # to actual case in the filesystem.
1424 # to actual case in the filesystem.
1425 icasefs = not util.fscasesensitive(r.root)
1425 icasefs = not util.fscasesensitive(r.root)
1426 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1426 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1427 default, auditor=r.auditor, ctx=self,
1427 default, auditor=r.auditor, ctx=self,
1428 listsubrepos=listsubrepos, badfn=badfn,
1428 listsubrepos=listsubrepos, badfn=badfn,
1429 icasefs=icasefs)
1429 icasefs=icasefs)
1430
1430
1431 def _filtersuspectsymlink(self, files):
1431 def _filtersuspectsymlink(self, files):
1432 if not files or self._repo.dirstate._checklink:
1432 if not files or self._repo.dirstate._checklink:
1433 return files
1433 return files
1434
1434
1435 # Symlink placeholders may get non-symlink-like contents
1435 # Symlink placeholders may get non-symlink-like contents
1436 # via user error or dereferencing by NFS or Samba servers,
1436 # via user error or dereferencing by NFS or Samba servers,
1437 # so we filter out any placeholders that don't look like a
1437 # so we filter out any placeholders that don't look like a
1438 # symlink
1438 # symlink
1439 sane = []
1439 sane = []
1440 for f in files:
1440 for f in files:
1441 if self.flags(f) == 'l':
1441 if self.flags(f) == 'l':
1442 d = self[f].data()
1442 d = self[f].data()
1443 if (d == '' or len(d) >= 1024 or '\n' in d
1443 if (d == '' or len(d) >= 1024 or '\n' in d
1444 or stringutil.binary(d)):
1444 or stringutil.binary(d)):
1445 self._repo.ui.debug('ignoring suspect symlink placeholder'
1445 self._repo.ui.debug('ignoring suspect symlink placeholder'
1446 ' "%s"\n' % f)
1446 ' "%s"\n' % f)
1447 continue
1447 continue
1448 sane.append(f)
1448 sane.append(f)
1449 return sane
1449 return sane
1450
1450
1451 def _checklookup(self, files):
1451 def _checklookup(self, files):
1452 # check for any possibly clean files
1452 # check for any possibly clean files
1453 if not files:
1453 if not files:
1454 return [], [], []
1454 return [], [], []
1455
1455
1456 modified = []
1456 modified = []
1457 deleted = []
1457 deleted = []
1458 fixup = []
1458 fixup = []
1459 pctx = self._parents[0]
1459 pctx = self._parents[0]
1460 # do a full compare of any files that might have changed
1460 # do a full compare of any files that might have changed
1461 for f in sorted(files):
1461 for f in sorted(files):
1462 try:
1462 try:
1463 # This will return True for a file that got replaced by a
1463 # This will return True for a file that got replaced by a
1464 # directory in the interim, but fixing that is pretty hard.
1464 # directory in the interim, but fixing that is pretty hard.
1465 if (f not in pctx or self.flags(f) != pctx.flags(f)
1465 if (f not in pctx or self.flags(f) != pctx.flags(f)
1466 or pctx[f].cmp(self[f])):
1466 or pctx[f].cmp(self[f])):
1467 modified.append(f)
1467 modified.append(f)
1468 else:
1468 else:
1469 fixup.append(f)
1469 fixup.append(f)
1470 except (IOError, OSError):
1470 except (IOError, OSError):
1471 # A file become inaccessible in between? Mark it as deleted,
1471 # A file become inaccessible in between? Mark it as deleted,
1472 # matching dirstate behavior (issue5584).
1472 # matching dirstate behavior (issue5584).
1473 # The dirstate has more complex behavior around whether a
1473 # The dirstate has more complex behavior around whether a
1474 # missing file matches a directory, etc, but we don't need to
1474 # missing file matches a directory, etc, but we don't need to
1475 # bother with that: if f has made it to this point, we're sure
1475 # bother with that: if f has made it to this point, we're sure
1476 # it's in the dirstate.
1476 # it's in the dirstate.
1477 deleted.append(f)
1477 deleted.append(f)
1478
1478
1479 return modified, deleted, fixup
1479 return modified, deleted, fixup
1480
1480
1481 def _poststatusfixup(self, status, fixup):
1481 def _poststatusfixup(self, status, fixup):
1482 """update dirstate for files that are actually clean"""
1482 """update dirstate for files that are actually clean"""
1483 poststatus = self._repo.postdsstatus()
1483 poststatus = self._repo.postdsstatus()
1484 if fixup or poststatus:
1484 if fixup or poststatus:
1485 try:
1485 try:
1486 oldid = self._repo.dirstate.identity()
1486 oldid = self._repo.dirstate.identity()
1487
1487
1488 # updating the dirstate is optional
1488 # updating the dirstate is optional
1489 # so we don't wait on the lock
1489 # so we don't wait on the lock
1490 # wlock can invalidate the dirstate, so cache normal _after_
1490 # wlock can invalidate the dirstate, so cache normal _after_
1491 # taking the lock
1491 # taking the lock
1492 with self._repo.wlock(False):
1492 with self._repo.wlock(False):
1493 if self._repo.dirstate.identity() == oldid:
1493 if self._repo.dirstate.identity() == oldid:
1494 if fixup:
1494 if fixup:
1495 normal = self._repo.dirstate.normal
1495 normal = self._repo.dirstate.normal
1496 for f in fixup:
1496 for f in fixup:
1497 normal(f)
1497 normal(f)
1498 # write changes out explicitly, because nesting
1498 # write changes out explicitly, because nesting
1499 # wlock at runtime may prevent 'wlock.release()'
1499 # wlock at runtime may prevent 'wlock.release()'
1500 # after this block from doing so for subsequent
1500 # after this block from doing so for subsequent
1501 # changing files
1501 # changing files
1502 tr = self._repo.currenttransaction()
1502 tr = self._repo.currenttransaction()
1503 self._repo.dirstate.write(tr)
1503 self._repo.dirstate.write(tr)
1504
1504
1505 if poststatus:
1505 if poststatus:
1506 for ps in poststatus:
1506 for ps in poststatus:
1507 ps(self, status)
1507 ps(self, status)
1508 else:
1508 else:
1509 # in this case, writing changes out breaks
1509 # in this case, writing changes out breaks
1510 # consistency, because .hg/dirstate was
1510 # consistency, because .hg/dirstate was
1511 # already changed simultaneously after last
1511 # already changed simultaneously after last
1512 # caching (see also issue5584 for detail)
1512 # caching (see also issue5584 for detail)
1513 self._repo.ui.debug('skip updating dirstate: '
1513 self._repo.ui.debug('skip updating dirstate: '
1514 'identity mismatch\n')
1514 'identity mismatch\n')
1515 except error.LockError:
1515 except error.LockError:
1516 pass
1516 pass
1517 finally:
1517 finally:
1518 # Even if the wlock couldn't be grabbed, clear out the list.
1518 # Even if the wlock couldn't be grabbed, clear out the list.
1519 self._repo.clearpostdsstatus()
1519 self._repo.clearpostdsstatus()
1520
1520
1521 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1521 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1522 '''Gets the status from the dirstate -- internal use only.'''
1522 '''Gets the status from the dirstate -- internal use only.'''
1523 subrepos = []
1523 subrepos = []
1524 if '.hgsub' in self:
1524 if '.hgsub' in self:
1525 subrepos = sorted(self.substate)
1525 subrepos = sorted(self.substate)
1526 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1526 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1527 clean=clean, unknown=unknown)
1527 clean=clean, unknown=unknown)
1528
1528
1529 # check for any possibly clean files
1529 # check for any possibly clean files
1530 fixup = []
1530 fixup = []
1531 if cmp:
1531 if cmp:
1532 modified2, deleted2, fixup = self._checklookup(cmp)
1532 modified2, deleted2, fixup = self._checklookup(cmp)
1533 s.modified.extend(modified2)
1533 s.modified.extend(modified2)
1534 s.deleted.extend(deleted2)
1534 s.deleted.extend(deleted2)
1535
1535
1536 if fixup and clean:
1536 if fixup and clean:
1537 s.clean.extend(fixup)
1537 s.clean.extend(fixup)
1538
1538
1539 self._poststatusfixup(s, fixup)
1539 self._poststatusfixup(s, fixup)
1540
1540
1541 if match.always():
1541 if match.always():
1542 # cache for performance
1542 # cache for performance
1543 if s.unknown or s.ignored or s.clean:
1543 if s.unknown or s.ignored or s.clean:
1544 # "_status" is cached with list*=False in the normal route
1544 # "_status" is cached with list*=False in the normal route
1545 self._status = scmutil.status(s.modified, s.added, s.removed,
1545 self._status = scmutil.status(s.modified, s.added, s.removed,
1546 s.deleted, [], [], [])
1546 s.deleted, [], [], [])
1547 else:
1547 else:
1548 self._status = s
1548 self._status = s
1549
1549
1550 return s
1550 return s
1551
1551
1552 @propertycache
1552 @propertycache
1553 def _manifest(self):
1553 def _manifest(self):
1554 """generate a manifest corresponding to the values in self._status
1554 """generate a manifest corresponding to the values in self._status
1555
1555
1556 This reuse the file nodeid from parent, but we use special node
1556 This reuse the file nodeid from parent, but we use special node
1557 identifiers for added and modified files. This is used by manifests
1557 identifiers for added and modified files. This is used by manifests
1558 merge to see that files are different and by update logic to avoid
1558 merge to see that files are different and by update logic to avoid
1559 deleting newly added files.
1559 deleting newly added files.
1560 """
1560 """
1561 return self._buildstatusmanifest(self._status)
1561 return self._buildstatusmanifest(self._status)
1562
1562
1563 def _buildstatusmanifest(self, status):
1563 def _buildstatusmanifest(self, status):
1564 """Builds a manifest that includes the given status results."""
1564 """Builds a manifest that includes the given status results."""
1565 parents = self.parents()
1565 parents = self.parents()
1566
1566
1567 man = parents[0].manifest().copy()
1567 man = parents[0].manifest().copy()
1568
1568
1569 ff = self._flagfunc
1569 ff = self._flagfunc
1570 for i, l in ((addednodeid, status.added),
1570 for i, l in ((addednodeid, status.added),
1571 (modifiednodeid, status.modified)):
1571 (modifiednodeid, status.modified)):
1572 for f in l:
1572 for f in l:
1573 man[f] = i
1573 man[f] = i
1574 try:
1574 try:
1575 man.setflag(f, ff(f))
1575 man.setflag(f, ff(f))
1576 except OSError:
1576 except OSError:
1577 pass
1577 pass
1578
1578
1579 for f in status.deleted + status.removed:
1579 for f in status.deleted + status.removed:
1580 if f in man:
1580 if f in man:
1581 del man[f]
1581 del man[f]
1582
1582
1583 return man
1583 return man
1584
1584
1585 def _buildstatus(self, other, s, match, listignored, listclean,
1585 def _buildstatus(self, other, s, match, listignored, listclean,
1586 listunknown):
1586 listunknown):
1587 """build a status with respect to another context
1587 """build a status with respect to another context
1588
1588
1589 This includes logic for maintaining the fast path of status when
1589 This includes logic for maintaining the fast path of status when
1590 comparing the working directory against its parent, which is to skip
1590 comparing the working directory against its parent, which is to skip
1591 building a new manifest if self (working directory) is not comparing
1591 building a new manifest if self (working directory) is not comparing
1592 against its parent (repo['.']).
1592 against its parent (repo['.']).
1593 """
1593 """
1594 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1594 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1595 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1595 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1596 # might have accidentally ended up with the entire contents of the file
1596 # might have accidentally ended up with the entire contents of the file
1597 # they are supposed to be linking to.
1597 # they are supposed to be linking to.
1598 s.modified[:] = self._filtersuspectsymlink(s.modified)
1598 s.modified[:] = self._filtersuspectsymlink(s.modified)
1599 if other != self._repo['.']:
1599 if other != self._repo['.']:
1600 s = super(workingctx, self)._buildstatus(other, s, match,
1600 s = super(workingctx, self)._buildstatus(other, s, match,
1601 listignored, listclean,
1601 listignored, listclean,
1602 listunknown)
1602 listunknown)
1603 return s
1603 return s
1604
1604
1605 def _matchstatus(self, other, match):
1605 def _matchstatus(self, other, match):
1606 """override the match method with a filter for directory patterns
1606 """override the match method with a filter for directory patterns
1607
1607
1608 We use inheritance to customize the match.bad method only in cases of
1608 We use inheritance to customize the match.bad method only in cases of
1609 workingctx since it belongs only to the working directory when
1609 workingctx since it belongs only to the working directory when
1610 comparing against the parent changeset.
1610 comparing against the parent changeset.
1611
1611
1612 If we aren't comparing against the working directory's parent, then we
1612 If we aren't comparing against the working directory's parent, then we
1613 just use the default match object sent to us.
1613 just use the default match object sent to us.
1614 """
1614 """
1615 if other != self._repo['.']:
1615 if other != self._repo['.']:
1616 def bad(f, msg):
1616 def bad(f, msg):
1617 # 'f' may be a directory pattern from 'match.files()',
1617 # 'f' may be a directory pattern from 'match.files()',
1618 # so 'f not in ctx1' is not enough
1618 # so 'f not in ctx1' is not enough
1619 if f not in other and not other.hasdir(f):
1619 if f not in other and not other.hasdir(f):
1620 self._repo.ui.warn('%s: %s\n' %
1620 self._repo.ui.warn('%s: %s\n' %
1621 (self._repo.dirstate.pathto(f), msg))
1621 (self._repo.dirstate.pathto(f), msg))
1622 match.bad = bad
1622 match.bad = bad
1623 return match
1623 return match
1624
1624
1625 def markcommitted(self, node):
1625 def markcommitted(self, node):
1626 super(workingctx, self).markcommitted(node)
1626 super(workingctx, self).markcommitted(node)
1627
1627
1628 sparse.aftercommit(self._repo, node)
1628 sparse.aftercommit(self._repo, node)
1629
1629
1630 class committablefilectx(basefilectx):
1630 class committablefilectx(basefilectx):
1631 """A committablefilectx provides common functionality for a file context
1631 """A committablefilectx provides common functionality for a file context
1632 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1632 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1633 def __init__(self, repo, path, filelog=None, ctx=None):
1633 def __init__(self, repo, path, filelog=None, ctx=None):
1634 self._repo = repo
1634 self._repo = repo
1635 self._path = path
1635 self._path = path
1636 self._changeid = None
1636 self._changeid = None
1637 self._filerev = self._filenode = None
1637 self._filerev = self._filenode = None
1638
1638
1639 if filelog is not None:
1639 if filelog is not None:
1640 self._filelog = filelog
1640 self._filelog = filelog
1641 if ctx:
1641 if ctx:
1642 self._changectx = ctx
1642 self._changectx = ctx
1643
1643
1644 def __nonzero__(self):
1644 def __nonzero__(self):
1645 return True
1645 return True
1646
1646
1647 __bool__ = __nonzero__
1647 __bool__ = __nonzero__
1648
1648
1649 def linkrev(self):
1649 def linkrev(self):
1650 # linked to self._changectx no matter if file is modified or not
1650 # linked to self._changectx no matter if file is modified or not
1651 return self.rev()
1651 return self.rev()
1652
1652
1653 def parents(self):
1653 def parents(self):
1654 '''return parent filectxs, following copies if necessary'''
1654 '''return parent filectxs, following copies if necessary'''
1655 def filenode(ctx, path):
1655 def filenode(ctx, path):
1656 return ctx._manifest.get(path, nullid)
1656 return ctx._manifest.get(path, nullid)
1657
1657
1658 path = self._path
1658 path = self._path
1659 fl = self._filelog
1659 fl = self._filelog
1660 pcl = self._changectx._parents
1660 pcl = self._changectx._parents
1661 renamed = self.renamed()
1661 renamed = self.renamed()
1662
1662
1663 if renamed:
1663 if renamed:
1664 pl = [renamed + (None,)]
1664 pl = [renamed + (None,)]
1665 else:
1665 else:
1666 pl = [(path, filenode(pcl[0], path), fl)]
1666 pl = [(path, filenode(pcl[0], path), fl)]
1667
1667
1668 for pc in pcl[1:]:
1668 for pc in pcl[1:]:
1669 pl.append((path, filenode(pc, path), fl))
1669 pl.append((path, filenode(pc, path), fl))
1670
1670
1671 return [self._parentfilectx(p, fileid=n, filelog=l)
1671 return [self._parentfilectx(p, fileid=n, filelog=l)
1672 for p, n, l in pl if n != nullid]
1672 for p, n, l in pl if n != nullid]
1673
1673
1674 def children(self):
1674 def children(self):
1675 return []
1675 return []
1676
1676
1677 class workingfilectx(committablefilectx):
1677 class workingfilectx(committablefilectx):
1678 """A workingfilectx object makes access to data related to a particular
1678 """A workingfilectx object makes access to data related to a particular
1679 file in the working directory convenient."""
1679 file in the working directory convenient."""
1680 def __init__(self, repo, path, filelog=None, workingctx=None):
1680 def __init__(self, repo, path, filelog=None, workingctx=None):
1681 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1681 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1682
1682
1683 @propertycache
1683 @propertycache
1684 def _changectx(self):
1684 def _changectx(self):
1685 return workingctx(self._repo)
1685 return workingctx(self._repo)
1686
1686
1687 def data(self):
1687 def data(self):
1688 return self._repo.wread(self._path)
1688 return self._repo.wread(self._path)
1689 def renamed(self):
1689 def renamed(self):
1690 rp = self._repo.dirstate.copied(self._path)
1690 rp = self._repo.dirstate.copied(self._path)
1691 if not rp:
1691 if not rp:
1692 return None
1692 return None
1693 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1693 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1694
1694
1695 def size(self):
1695 def size(self):
1696 return self._repo.wvfs.lstat(self._path).st_size
1696 return self._repo.wvfs.lstat(self._path).st_size
1697 def date(self):
1697 def date(self):
1698 t, tz = self._changectx.date()
1698 t, tz = self._changectx.date()
1699 try:
1699 try:
1700 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1700 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1701 except OSError as err:
1701 except OSError as err:
1702 if err.errno != errno.ENOENT:
1702 if err.errno != errno.ENOENT:
1703 raise
1703 raise
1704 return (t, tz)
1704 return (t, tz)
1705
1705
1706 def exists(self):
1706 def exists(self):
1707 return self._repo.wvfs.exists(self._path)
1707 return self._repo.wvfs.exists(self._path)
1708
1708
1709 def lexists(self):
1709 def lexists(self):
1710 return self._repo.wvfs.lexists(self._path)
1710 return self._repo.wvfs.lexists(self._path)
1711
1711
1712 def audit(self):
1712 def audit(self):
1713 return self._repo.wvfs.audit(self._path)
1713 return self._repo.wvfs.audit(self._path)
1714
1714
1715 def cmp(self, fctx):
1715 def cmp(self, fctx):
1716 """compare with other file context
1716 """compare with other file context
1717
1717
1718 returns True if different than fctx.
1718 returns True if different than fctx.
1719 """
1719 """
1720 # fctx should be a filectx (not a workingfilectx)
1720 # fctx should be a filectx (not a workingfilectx)
1721 # invert comparison to reuse the same code path
1721 # invert comparison to reuse the same code path
1722 return fctx.cmp(self)
1722 return fctx.cmp(self)
1723
1723
1724 def remove(self, ignoremissing=False):
1724 def remove(self, ignoremissing=False):
1725 """wraps unlink for a repo's working directory"""
1725 """wraps unlink for a repo's working directory"""
1726 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1726 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1727 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1727 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1728 rmdir=rmdir)
1728 rmdir=rmdir)
1729
1729
1730 def write(self, data, flags, backgroundclose=False, **kwargs):
1730 def write(self, data, flags, backgroundclose=False, **kwargs):
1731 """wraps repo.wwrite"""
1731 """wraps repo.wwrite"""
1732 self._repo.wwrite(self._path, data, flags,
1732 self._repo.wwrite(self._path, data, flags,
1733 backgroundclose=backgroundclose,
1733 backgroundclose=backgroundclose,
1734 **kwargs)
1734 **kwargs)
1735
1735
1736 def markcopied(self, src):
1736 def markcopied(self, src):
1737 """marks this file a copy of `src`"""
1737 """marks this file a copy of `src`"""
1738 if self._repo.dirstate[self._path] in "nma":
1738 if self._repo.dirstate[self._path] in "nma":
1739 self._repo.dirstate.copy(src, self._path)
1739 self._repo.dirstate.copy(src, self._path)
1740
1740
1741 def clearunknown(self):
1741 def clearunknown(self):
1742 """Removes conflicting items in the working directory so that
1742 """Removes conflicting items in the working directory so that
1743 ``write()`` can be called successfully.
1743 ``write()`` can be called successfully.
1744 """
1744 """
1745 wvfs = self._repo.wvfs
1745 wvfs = self._repo.wvfs
1746 f = self._path
1746 f = self._path
1747 wvfs.audit(f)
1747 wvfs.audit(f)
1748 if wvfs.isdir(f) and not wvfs.islink(f):
1748 if wvfs.isdir(f) and not wvfs.islink(f):
1749 wvfs.rmtree(f, forcibly=True)
1749 wvfs.rmtree(f, forcibly=True)
1750 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1750 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1751 for p in reversed(list(util.finddirs(f))):
1751 for p in reversed(list(util.finddirs(f))):
1752 if wvfs.isfileorlink(p):
1752 if wvfs.isfileorlink(p):
1753 wvfs.unlink(p)
1753 wvfs.unlink(p)
1754 break
1754 break
1755
1755
1756 def setflags(self, l, x):
1756 def setflags(self, l, x):
1757 self._repo.wvfs.setflags(self._path, l, x)
1757 self._repo.wvfs.setflags(self._path, l, x)
1758
1758
1759 class overlayworkingctx(committablectx):
1759 class overlayworkingctx(committablectx):
1760 """Wraps another mutable context with a write-back cache that can be
1760 """Wraps another mutable context with a write-back cache that can be
1761 converted into a commit context.
1761 converted into a commit context.
1762
1762
1763 self._cache[path] maps to a dict with keys: {
1763 self._cache[path] maps to a dict with keys: {
1764 'exists': bool?
1764 'exists': bool?
1765 'date': date?
1765 'date': date?
1766 'data': str?
1766 'data': str?
1767 'flags': str?
1767 'flags': str?
1768 'copied': str? (path or None)
1768 'copied': str? (path or None)
1769 }
1769 }
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1771 is `False`, the file was deleted.
1771 is `False`, the file was deleted.
1772 """
1772 """
1773
1773
1774 def __init__(self, repo):
1774 def __init__(self, repo):
1775 super(overlayworkingctx, self).__init__(repo)
1775 super(overlayworkingctx, self).__init__(repo)
1776 self.clean()
1776 self.clean()
1777
1777
1778 def setbase(self, wrappedctx):
1778 def setbase(self, wrappedctx):
1779 self._wrappedctx = wrappedctx
1779 self._wrappedctx = wrappedctx
1780 self._parents = [wrappedctx]
1780 self._parents = [wrappedctx]
1781 # Drop old manifest cache as it is now out of date.
1781 # Drop old manifest cache as it is now out of date.
1782 # This is necessary when, e.g., rebasing several nodes with one
1782 # This is necessary when, e.g., rebasing several nodes with one
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1784 util.clearcachedproperty(self, '_manifest')
1784 util.clearcachedproperty(self, '_manifest')
1785
1785
1786 def data(self, path):
1786 def data(self, path):
1787 if self.isdirty(path):
1787 if self.isdirty(path):
1788 if self._cache[path]['exists']:
1788 if self._cache[path]['exists']:
1789 if self._cache[path]['data']:
1789 if self._cache[path]['data']:
1790 return self._cache[path]['data']
1790 return self._cache[path]['data']
1791 else:
1791 else:
1792 # Must fallback here, too, because we only set flags.
1792 # Must fallback here, too, because we only set flags.
1793 return self._wrappedctx[path].data()
1793 return self._wrappedctx[path].data()
1794 else:
1794 else:
1795 raise error.ProgrammingError("No such file or directory: %s" %
1795 raise error.ProgrammingError("No such file or directory: %s" %
1796 path)
1796 path)
1797 else:
1797 else:
1798 return self._wrappedctx[path].data()
1798 return self._wrappedctx[path].data()
1799
1799
1800 @propertycache
1800 @propertycache
1801 def _manifest(self):
1801 def _manifest(self):
1802 parents = self.parents()
1802 parents = self.parents()
1803 man = parents[0].manifest().copy()
1803 man = parents[0].manifest().copy()
1804
1804
1805 flag = self._flagfunc
1805 flag = self._flagfunc
1806 for path in self.added():
1806 for path in self.added():
1807 man[path] = addednodeid
1807 man[path] = addednodeid
1808 man.setflag(path, flag(path))
1808 man.setflag(path, flag(path))
1809 for path in self.modified():
1809 for path in self.modified():
1810 man[path] = modifiednodeid
1810 man[path] = modifiednodeid
1811 man.setflag(path, flag(path))
1811 man.setflag(path, flag(path))
1812 for path in self.removed():
1812 for path in self.removed():
1813 del man[path]
1813 del man[path]
1814 return man
1814 return man
1815
1815
1816 @propertycache
1816 @propertycache
1817 def _flagfunc(self):
1817 def _flagfunc(self):
1818 def f(path):
1818 def f(path):
1819 return self._cache[path]['flags']
1819 return self._cache[path]['flags']
1820 return f
1820 return f
1821
1821
1822 def files(self):
1822 def files(self):
1823 return sorted(self.added() + self.modified() + self.removed())
1823 return sorted(self.added() + self.modified() + self.removed())
1824
1824
1825 def modified(self):
1825 def modified(self):
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1827 self._existsinparent(f)]
1827 self._existsinparent(f)]
1828
1828
1829 def added(self):
1829 def added(self):
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 not self._existsinparent(f)]
1831 not self._existsinparent(f)]
1832
1832
1833 def removed(self):
1833 def removed(self):
1834 return [f for f in self._cache.keys() if
1834 return [f for f in self._cache.keys() if
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1836
1836
1837 def isinmemory(self):
1837 def isinmemory(self):
1838 return True
1838 return True
1839
1839
1840 def filedate(self, path):
1840 def filedate(self, path):
1841 if self.isdirty(path):
1841 if self.isdirty(path):
1842 return self._cache[path]['date']
1842 return self._cache[path]['date']
1843 else:
1843 else:
1844 return self._wrappedctx[path].date()
1844 return self._wrappedctx[path].date()
1845
1845
1846 def markcopied(self, path, origin):
1846 def markcopied(self, path, origin):
1847 if self.isdirty(path):
1847 if self.isdirty(path):
1848 self._cache[path]['copied'] = origin
1848 self._cache[path]['copied'] = origin
1849 else:
1849 else:
1850 raise error.ProgrammingError('markcopied() called on clean context')
1850 raise error.ProgrammingError('markcopied() called on clean context')
1851
1851
1852 def copydata(self, path):
1852 def copydata(self, path):
1853 if self.isdirty(path):
1853 if self.isdirty(path):
1854 return self._cache[path]['copied']
1854 return self._cache[path]['copied']
1855 else:
1855 else:
1856 raise error.ProgrammingError('copydata() called on clean context')
1856 raise error.ProgrammingError('copydata() called on clean context')
1857
1857
1858 def flags(self, path):
1858 def flags(self, path):
1859 if self.isdirty(path):
1859 if self.isdirty(path):
1860 if self._cache[path]['exists']:
1860 if self._cache[path]['exists']:
1861 return self._cache[path]['flags']
1861 return self._cache[path]['flags']
1862 else:
1862 else:
1863 raise error.ProgrammingError("No such file or directory: %s" %
1863 raise error.ProgrammingError("No such file or directory: %s" %
1864 self._path)
1864 self._path)
1865 else:
1865 else:
1866 return self._wrappedctx[path].flags()
1866 return self._wrappedctx[path].flags()
1867
1867
1868 def _existsinparent(self, path):
1868 def _existsinparent(self, path):
1869 try:
1869 try:
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1872 # with an ``exists()`` function.
1872 # with an ``exists()`` function.
1873 self._wrappedctx[path]
1873 self._wrappedctx[path]
1874 return True
1874 return True
1875 except error.ManifestLookupError:
1875 except error.ManifestLookupError:
1876 return False
1876 return False
1877
1877
1878 def _auditconflicts(self, path):
1878 def _auditconflicts(self, path):
1879 """Replicates conflict checks done by wvfs.write().
1879 """Replicates conflict checks done by wvfs.write().
1880
1880
1881 Since we never write to the filesystem and never call `applyupdates` in
1881 Since we never write to the filesystem and never call `applyupdates` in
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1884 """
1884 """
1885 def fail(path, component):
1885 def fail(path, component):
1886 # p1() is the base and we're receiving "writes" for p2()'s
1886 # p1() is the base and we're receiving "writes" for p2()'s
1887 # files.
1887 # files.
1888 if 'l' in self.p1()[component].flags():
1888 if 'l' in self.p1()[component].flags():
1889 raise error.Abort("error: %s conflicts with symlink %s "
1889 raise error.Abort("error: %s conflicts with symlink %s "
1890 "in %s." % (path, component,
1890 "in %s." % (path, component,
1891 self.p1().rev()))
1891 self.p1().rev()))
1892 else:
1892 else:
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1894 "%s." % (path, component,
1894 "%s." % (path, component,
1895 self.p1().rev()))
1895 self.p1().rev()))
1896
1896
1897 # Test that each new directory to be created to write this path from p2
1897 # Test that each new directory to be created to write this path from p2
1898 # is not a file in p1.
1898 # is not a file in p1.
1899 components = path.split('/')
1899 components = path.split('/')
1900 for i in xrange(len(components)):
1900 for i in xrange(len(components)):
1901 component = "/".join(components[0:i])
1901 component = "/".join(components[0:i])
1902 if component in self.p1():
1902 if component in self.p1():
1903 fail(path, component)
1903 fail(path, component)
1904
1904
1905 # Test the other direction -- that this path from p2 isn't a directory
1905 # Test the other direction -- that this path from p2 isn't a directory
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1908 matches = self.p1().manifest().matches(match)
1908 matches = self.p1().manifest().matches(match)
1909 if len(matches) > 0:
1909 if len(matches) > 0:
1910 if len(matches) == 1 and matches.keys()[0] == path:
1910 if len(matches) == 1 and matches.keys()[0] == path:
1911 return
1911 return
1912 raise error.Abort("error: file '%s' cannot be written because "
1912 raise error.Abort("error: file '%s' cannot be written because "
1913 " '%s/' is a folder in %s (containing %d "
1913 " '%s/' is a folder in %s (containing %d "
1914 "entries: %s)"
1914 "entries: %s)"
1915 % (path, path, self.p1(), len(matches),
1915 % (path, path, self.p1(), len(matches),
1916 ', '.join(matches.keys())))
1916 ', '.join(matches.keys())))
1917
1917
1918 def write(self, path, data, flags='', **kwargs):
1918 def write(self, path, data, flags='', **kwargs):
1919 if data is None:
1919 if data is None:
1920 raise error.ProgrammingError("data must be non-None")
1920 raise error.ProgrammingError("data must be non-None")
1921 self._auditconflicts(path)
1921 self._auditconflicts(path)
1922 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1922 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1923 flags=flags)
1923 flags=flags)
1924
1924
1925 def setflags(self, path, l, x):
1925 def setflags(self, path, l, x):
1926 self._markdirty(path, exists=True, date=dateutil.makedate(),
1926 self._markdirty(path, exists=True, date=dateutil.makedate(),
1927 flags=(l and 'l' or '') + (x and 'x' or ''))
1927 flags=(l and 'l' or '') + (x and 'x' or ''))
1928
1928
1929 def remove(self, path):
1929 def remove(self, path):
1930 self._markdirty(path, exists=False)
1930 self._markdirty(path, exists=False)
1931
1931
1932 def exists(self, path):
1932 def exists(self, path):
1933 """exists behaves like `lexists`, but needs to follow symlinks and
1933 """exists behaves like `lexists`, but needs to follow symlinks and
1934 return False if they are broken.
1934 return False if they are broken.
1935 """
1935 """
1936 if self.isdirty(path):
1936 if self.isdirty(path):
1937 # If this path exists and is a symlink, "follow" it by calling
1937 # If this path exists and is a symlink, "follow" it by calling
1938 # exists on the destination path.
1938 # exists on the destination path.
1939 if (self._cache[path]['exists'] and
1939 if (self._cache[path]['exists'] and
1940 'l' in self._cache[path]['flags']):
1940 'l' in self._cache[path]['flags']):
1941 return self.exists(self._cache[path]['data'].strip())
1941 return self.exists(self._cache[path]['data'].strip())
1942 else:
1942 else:
1943 return self._cache[path]['exists']
1943 return self._cache[path]['exists']
1944
1944
1945 return self._existsinparent(path)
1945 return self._existsinparent(path)
1946
1946
1947 def lexists(self, path):
1947 def lexists(self, path):
1948 """lexists returns True if the path exists"""
1948 """lexists returns True if the path exists"""
1949 if self.isdirty(path):
1949 if self.isdirty(path):
1950 return self._cache[path]['exists']
1950 return self._cache[path]['exists']
1951
1951
1952 return self._existsinparent(path)
1952 return self._existsinparent(path)
1953
1953
1954 def size(self, path):
1954 def size(self, path):
1955 if self.isdirty(path):
1955 if self.isdirty(path):
1956 if self._cache[path]['exists']:
1956 if self._cache[path]['exists']:
1957 return len(self._cache[path]['data'])
1957 return len(self._cache[path]['data'])
1958 else:
1958 else:
1959 raise error.ProgrammingError("No such file or directory: %s" %
1959 raise error.ProgrammingError("No such file or directory: %s" %
1960 self._path)
1960 self._path)
1961 return self._wrappedctx[path].size()
1961 return self._wrappedctx[path].size()
1962
1962
1963 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1963 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1964 user=None, editor=None):
1964 user=None, editor=None):
1965 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1965 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1966 committed.
1966 committed.
1967
1967
1968 ``text`` is the commit message.
1968 ``text`` is the commit message.
1969 ``parents`` (optional) are rev numbers.
1969 ``parents`` (optional) are rev numbers.
1970 """
1970 """
1971 # Default parents to the wrapped contexts' if not passed.
1971 # Default parents to the wrapped contexts' if not passed.
1972 if parents is None:
1972 if parents is None:
1973 parents = self._wrappedctx.parents()
1973 parents = self._wrappedctx.parents()
1974 if len(parents) == 1:
1974 if len(parents) == 1:
1975 parents = (parents[0], None)
1975 parents = (parents[0], None)
1976
1976
1977 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1977 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1978 if parents[1] is None:
1978 if parents[1] is None:
1979 parents = (self._repo[parents[0]], None)
1979 parents = (self._repo[parents[0]], None)
1980 else:
1980 else:
1981 parents = (self._repo[parents[0]], self._repo[parents[1]])
1981 parents = (self._repo[parents[0]], self._repo[parents[1]])
1982
1982
1983 files = self._cache.keys()
1983 files = self._cache.keys()
1984 def getfile(repo, memctx, path):
1984 def getfile(repo, memctx, path):
1985 if self._cache[path]['exists']:
1985 if self._cache[path]['exists']:
1986 return memfilectx(repo, memctx, path,
1986 return memfilectx(repo, memctx, path,
1987 self._cache[path]['data'],
1987 self._cache[path]['data'],
1988 'l' in self._cache[path]['flags'],
1988 'l' in self._cache[path]['flags'],
1989 'x' in self._cache[path]['flags'],
1989 'x' in self._cache[path]['flags'],
1990 self._cache[path]['copied'])
1990 self._cache[path]['copied'])
1991 else:
1991 else:
1992 # Returning None, but including the path in `files`, is
1992 # Returning None, but including the path in `files`, is
1993 # necessary for memctx to register a deletion.
1993 # necessary for memctx to register a deletion.
1994 return None
1994 return None
1995 return memctx(self._repo, parents, text, files, getfile, date=date,
1995 return memctx(self._repo, parents, text, files, getfile, date=date,
1996 extra=extra, user=user, branch=branch, editor=editor)
1996 extra=extra, user=user, branch=branch, editor=editor)
1997
1997
1998 def isdirty(self, path):
1998 def isdirty(self, path):
1999 return path in self._cache
1999 return path in self._cache
2000
2000
2001 def isempty(self):
2001 def isempty(self):
2002 # We need to discard any keys that are actually clean before the empty
2002 # We need to discard any keys that are actually clean before the empty
2003 # commit check.
2003 # commit check.
2004 self._compact()
2004 self._compact()
2005 return len(self._cache) == 0
2005 return len(self._cache) == 0
2006
2006
2007 def clean(self):
2007 def clean(self):
2008 self._cache = {}
2008 self._cache = {}
2009
2009
2010 def _compact(self):
2010 def _compact(self):
2011 """Removes keys from the cache that are actually clean, by comparing
2011 """Removes keys from the cache that are actually clean, by comparing
2012 them with the underlying context.
2012 them with the underlying context.
2013
2013
2014 This can occur during the merge process, e.g. by passing --tool :local
2014 This can occur during the merge process, e.g. by passing --tool :local
2015 to resolve a conflict.
2015 to resolve a conflict.
2016 """
2016 """
2017 keys = []
2017 keys = []
2018 for path in self._cache.keys():
2018 for path in self._cache.keys():
2019 cache = self._cache[path]
2019 cache = self._cache[path]
2020 try:
2020 try:
2021 underlying = self._wrappedctx[path]
2021 underlying = self._wrappedctx[path]
2022 if (underlying.data() == cache['data'] and
2022 if (underlying.data() == cache['data'] and
2023 underlying.flags() == cache['flags']):
2023 underlying.flags() == cache['flags']):
2024 keys.append(path)
2024 keys.append(path)
2025 except error.ManifestLookupError:
2025 except error.ManifestLookupError:
2026 # Path not in the underlying manifest (created).
2026 # Path not in the underlying manifest (created).
2027 continue
2027 continue
2028
2028
2029 for path in keys:
2029 for path in keys:
2030 del self._cache[path]
2030 del self._cache[path]
2031 return keys
2031 return keys
2032
2032
2033 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2033 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2034 self._cache[path] = {
2034 self._cache[path] = {
2035 'exists': exists,
2035 'exists': exists,
2036 'data': data,
2036 'data': data,
2037 'date': date,
2037 'date': date,
2038 'flags': flags,
2038 'flags': flags,
2039 'copied': None,
2039 'copied': None,
2040 }
2040 }
2041
2041
2042 def filectx(self, path, filelog=None):
2042 def filectx(self, path, filelog=None):
2043 return overlayworkingfilectx(self._repo, path, parent=self,
2043 return overlayworkingfilectx(self._repo, path, parent=self,
2044 filelog=filelog)
2044 filelog=filelog)
2045
2045
2046 class overlayworkingfilectx(committablefilectx):
2046 class overlayworkingfilectx(committablefilectx):
2047 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2047 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2048 cache, which can be flushed through later by calling ``flush()``."""
2048 cache, which can be flushed through later by calling ``flush()``."""
2049
2049
2050 def __init__(self, repo, path, filelog=None, parent=None):
2050 def __init__(self, repo, path, filelog=None, parent=None):
2051 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2051 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2052 parent)
2052 parent)
2053 self._repo = repo
2053 self._repo = repo
2054 self._parent = parent
2054 self._parent = parent
2055 self._path = path
2055 self._path = path
2056
2056
2057 def cmp(self, fctx):
2057 def cmp(self, fctx):
2058 return self.data() != fctx.data()
2058 return self.data() != fctx.data()
2059
2059
2060 def changectx(self):
2060 def changectx(self):
2061 return self._parent
2061 return self._parent
2062
2062
2063 def data(self):
2063 def data(self):
2064 return self._parent.data(self._path)
2064 return self._parent.data(self._path)
2065
2065
2066 def date(self):
2066 def date(self):
2067 return self._parent.filedate(self._path)
2067 return self._parent.filedate(self._path)
2068
2068
2069 def exists(self):
2069 def exists(self):
2070 return self.lexists()
2070 return self.lexists()
2071
2071
2072 def lexists(self):
2072 def lexists(self):
2073 return self._parent.exists(self._path)
2073 return self._parent.exists(self._path)
2074
2074
2075 def renamed(self):
2075 def renamed(self):
2076 path = self._parent.copydata(self._path)
2076 path = self._parent.copydata(self._path)
2077 if not path:
2077 if not path:
2078 return None
2078 return None
2079 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2079 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2080
2080
2081 def size(self):
2081 def size(self):
2082 return self._parent.size(self._path)
2082 return self._parent.size(self._path)
2083
2083
2084 def markcopied(self, origin):
2084 def markcopied(self, origin):
2085 self._parent.markcopied(self._path, origin)
2085 self._parent.markcopied(self._path, origin)
2086
2086
2087 def audit(self):
2087 def audit(self):
2088 pass
2088 pass
2089
2089
2090 def flags(self):
2090 def flags(self):
2091 return self._parent.flags(self._path)
2091 return self._parent.flags(self._path)
2092
2092
2093 def setflags(self, islink, isexec):
2093 def setflags(self, islink, isexec):
2094 return self._parent.setflags(self._path, islink, isexec)
2094 return self._parent.setflags(self._path, islink, isexec)
2095
2095
2096 def write(self, data, flags, backgroundclose=False, **kwargs):
2096 def write(self, data, flags, backgroundclose=False, **kwargs):
2097 return self._parent.write(self._path, data, flags, **kwargs)
2097 return self._parent.write(self._path, data, flags, **kwargs)
2098
2098
2099 def remove(self, ignoremissing=False):
2099 def remove(self, ignoremissing=False):
2100 return self._parent.remove(self._path)
2100 return self._parent.remove(self._path)
2101
2101
2102 def clearunknown(self):
2102 def clearunknown(self):
2103 pass
2103 pass
2104
2104
2105 class workingcommitctx(workingctx):
2105 class workingcommitctx(workingctx):
2106 """A workingcommitctx object makes access to data related to
2106 """A workingcommitctx object makes access to data related to
2107 the revision being committed convenient.
2107 the revision being committed convenient.
2108
2108
2109 This hides changes in the working directory, if they aren't
2109 This hides changes in the working directory, if they aren't
2110 committed in this context.
2110 committed in this context.
2111 """
2111 """
2112 def __init__(self, repo, changes,
2112 def __init__(self, repo, changes,
2113 text="", user=None, date=None, extra=None):
2113 text="", user=None, date=None, extra=None):
2114 super(workingctx, self).__init__(repo, text, user, date, extra,
2114 super(workingctx, self).__init__(repo, text, user, date, extra,
2115 changes)
2115 changes)
2116
2116
2117 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2117 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2118 """Return matched files only in ``self._status``
2118 """Return matched files only in ``self._status``
2119
2119
2120 Uncommitted files appear "clean" via this context, even if
2120 Uncommitted files appear "clean" via this context, even if
2121 they aren't actually so in the working directory.
2121 they aren't actually so in the working directory.
2122 """
2122 """
2123 if clean:
2123 if clean:
2124 clean = [f for f in self._manifest if f not in self._changedset]
2124 clean = [f for f in self._manifest if f not in self._changedset]
2125 else:
2125 else:
2126 clean = []
2126 clean = []
2127 return scmutil.status([f for f in self._status.modified if match(f)],
2127 return scmutil.status([f for f in self._status.modified if match(f)],
2128 [f for f in self._status.added if match(f)],
2128 [f for f in self._status.added if match(f)],
2129 [f for f in self._status.removed if match(f)],
2129 [f for f in self._status.removed if match(f)],
2130 [], [], [], clean)
2130 [], [], [], clean)
2131
2131
2132 @propertycache
2132 @propertycache
2133 def _changedset(self):
2133 def _changedset(self):
2134 """Return the set of files changed in this context
2134 """Return the set of files changed in this context
2135 """
2135 """
2136 changed = set(self._status.modified)
2136 changed = set(self._status.modified)
2137 changed.update(self._status.added)
2137 changed.update(self._status.added)
2138 changed.update(self._status.removed)
2138 changed.update(self._status.removed)
2139 return changed
2139 return changed
2140
2140
2141 def makecachingfilectxfn(func):
2141 def makecachingfilectxfn(func):
2142 """Create a filectxfn that caches based on the path.
2142 """Create a filectxfn that caches based on the path.
2143
2143
2144 We can't use util.cachefunc because it uses all arguments as the cache
2144 We can't use util.cachefunc because it uses all arguments as the cache
2145 key and this creates a cycle since the arguments include the repo and
2145 key and this creates a cycle since the arguments include the repo and
2146 memctx.
2146 memctx.
2147 """
2147 """
2148 cache = {}
2148 cache = {}
2149
2149
2150 def getfilectx(repo, memctx, path):
2150 def getfilectx(repo, memctx, path):
2151 if path not in cache:
2151 if path not in cache:
2152 cache[path] = func(repo, memctx, path)
2152 cache[path] = func(repo, memctx, path)
2153 return cache[path]
2153 return cache[path]
2154
2154
2155 return getfilectx
2155 return getfilectx
2156
2156
2157 def memfilefromctx(ctx):
2157 def memfilefromctx(ctx):
2158 """Given a context return a memfilectx for ctx[path]
2158 """Given a context return a memfilectx for ctx[path]
2159
2159
2160 This is a convenience method for building a memctx based on another
2160 This is a convenience method for building a memctx based on another
2161 context.
2161 context.
2162 """
2162 """
2163 def getfilectx(repo, memctx, path):
2163 def getfilectx(repo, memctx, path):
2164 fctx = ctx[path]
2164 fctx = ctx[path]
2165 # this is weird but apparently we only keep track of one parent
2165 # this is weird but apparently we only keep track of one parent
2166 # (why not only store that instead of a tuple?)
2166 # (why not only store that instead of a tuple?)
2167 copied = fctx.renamed()
2167 copied = fctx.renamed()
2168 if copied:
2168 if copied:
2169 copied = copied[0]
2169 copied = copied[0]
2170 return memfilectx(repo, memctx, path, fctx.data(),
2170 return memfilectx(repo, memctx, path, fctx.data(),
2171 islink=fctx.islink(), isexec=fctx.isexec(),
2171 islink=fctx.islink(), isexec=fctx.isexec(),
2172 copied=copied)
2172 copied=copied)
2173
2173
2174 return getfilectx
2174 return getfilectx
2175
2175
2176 def memfilefrompatch(patchstore):
2176 def memfilefrompatch(patchstore):
2177 """Given a patch (e.g. patchstore object) return a memfilectx
2177 """Given a patch (e.g. patchstore object) return a memfilectx
2178
2178
2179 This is a convenience method for building a memctx based on a patchstore.
2179 This is a convenience method for building a memctx based on a patchstore.
2180 """
2180 """
2181 def getfilectx(repo, memctx, path):
2181 def getfilectx(repo, memctx, path):
2182 data, mode, copied = patchstore.getfile(path)
2182 data, mode, copied = patchstore.getfile(path)
2183 if data is None:
2183 if data is None:
2184 return None
2184 return None
2185 islink, isexec = mode
2185 islink, isexec = mode
2186 return memfilectx(repo, memctx, path, data, islink=islink,
2186 return memfilectx(repo, memctx, path, data, islink=islink,
2187 isexec=isexec, copied=copied)
2187 isexec=isexec, copied=copied)
2188
2188
2189 return getfilectx
2189 return getfilectx
2190
2190
2191 class memctx(committablectx):
2191 class memctx(committablectx):
2192 """Use memctx to perform in-memory commits via localrepo.commitctx().
2192 """Use memctx to perform in-memory commits via localrepo.commitctx().
2193
2193
2194 Revision information is supplied at initialization time while
2194 Revision information is supplied at initialization time while
2195 related files data and is made available through a callback
2195 related files data and is made available through a callback
2196 mechanism. 'repo' is the current localrepo, 'parents' is a
2196 mechanism. 'repo' is the current localrepo, 'parents' is a
2197 sequence of two parent revisions identifiers (pass None for every
2197 sequence of two parent revisions identifiers (pass None for every
2198 missing parent), 'text' is the commit message and 'files' lists
2198 missing parent), 'text' is the commit message and 'files' lists
2199 names of files touched by the revision (normalized and relative to
2199 names of files touched by the revision (normalized and relative to
2200 repository root).
2200 repository root).
2201
2201
2202 filectxfn(repo, memctx, path) is a callable receiving the
2202 filectxfn(repo, memctx, path) is a callable receiving the
2203 repository, the current memctx object and the normalized path of
2203 repository, the current memctx object and the normalized path of
2204 requested file, relative to repository root. It is fired by the
2204 requested file, relative to repository root. It is fired by the
2205 commit function for every file in 'files', but calls order is
2205 commit function for every file in 'files', but calls order is
2206 undefined. If the file is available in the revision being
2206 undefined. If the file is available in the revision being
2207 committed (updated or added), filectxfn returns a memfilectx
2207 committed (updated or added), filectxfn returns a memfilectx
2208 object. If the file was removed, filectxfn return None for recent
2208 object. If the file was removed, filectxfn return None for recent
2209 Mercurial. Moved files are represented by marking the source file
2209 Mercurial. Moved files are represented by marking the source file
2210 removed and the new file added with copy information (see
2210 removed and the new file added with copy information (see
2211 memfilectx).
2211 memfilectx).
2212
2212
2213 user receives the committer name and defaults to current
2213 user receives the committer name and defaults to current
2214 repository username, date is the commit date in any format
2214 repository username, date is the commit date in any format
2215 supported by dateutil.parsedate() and defaults to current date, extra
2215 supported by dateutil.parsedate() and defaults to current date, extra
2216 is a dictionary of metadata or is left empty.
2216 is a dictionary of metadata or is left empty.
2217 """
2217 """
2218
2218
2219 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2219 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2220 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2220 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2221 # this field to determine what to do in filectxfn.
2221 # this field to determine what to do in filectxfn.
2222 _returnnoneformissingfiles = True
2222 _returnnoneformissingfiles = True
2223
2223
2224 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2224 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2225 date=None, extra=None, branch=None, editor=False):
2225 date=None, extra=None, branch=None, editor=False):
2226 super(memctx, self).__init__(repo, text, user, date, extra)
2226 super(memctx, self).__init__(repo, text, user, date, extra)
2227 self._rev = None
2227 self._rev = None
2228 self._node = None
2228 self._node = None
2229 parents = [(p or nullid) for p in parents]
2229 parents = [(p or nullid) for p in parents]
2230 p1, p2 = parents
2230 p1, p2 = parents
2231 self._parents = [self._repo[p] for p in (p1, p2)]
2231 self._parents = [self._repo[p] for p in (p1, p2)]
2232 files = sorted(set(files))
2232 files = sorted(set(files))
2233 self._files = files
2233 self._files = files
2234 if branch is not None:
2234 if branch is not None:
2235 self._extra['branch'] = encoding.fromlocal(branch)
2235 self._extra['branch'] = encoding.fromlocal(branch)
2236 self.substate = {}
2236 self.substate = {}
2237
2237
2238 if isinstance(filectxfn, patch.filestore):
2238 if isinstance(filectxfn, patch.filestore):
2239 filectxfn = memfilefrompatch(filectxfn)
2239 filectxfn = memfilefrompatch(filectxfn)
2240 elif not callable(filectxfn):
2240 elif not callable(filectxfn):
2241 # if store is not callable, wrap it in a function
2241 # if store is not callable, wrap it in a function
2242 filectxfn = memfilefromctx(filectxfn)
2242 filectxfn = memfilefromctx(filectxfn)
2243
2243
2244 # memoizing increases performance for e.g. vcs convert scenarios.
2244 # memoizing increases performance for e.g. vcs convert scenarios.
2245 self._filectxfn = makecachingfilectxfn(filectxfn)
2245 self._filectxfn = makecachingfilectxfn(filectxfn)
2246
2246
2247 if editor:
2247 if editor:
2248 self._text = editor(self._repo, self, [])
2248 self._text = editor(self._repo, self, [])
2249 self._repo.savecommitmessage(self._text)
2249 self._repo.savecommitmessage(self._text)
2250
2250
2251 def filectx(self, path, filelog=None):
2251 def filectx(self, path, filelog=None):
2252 """get a file context from the working directory
2252 """get a file context from the working directory
2253
2253
2254 Returns None if file doesn't exist and should be removed."""
2254 Returns None if file doesn't exist and should be removed."""
2255 return self._filectxfn(self._repo, self, path)
2255 return self._filectxfn(self._repo, self, path)
2256
2256
2257 def commit(self):
2257 def commit(self):
2258 """commit context to the repo"""
2258 """commit context to the repo"""
2259 return self._repo.commitctx(self)
2259 return self._repo.commitctx(self)
2260
2260
2261 @propertycache
2261 @propertycache
2262 def _manifest(self):
2262 def _manifest(self):
2263 """generate a manifest based on the return values of filectxfn"""
2263 """generate a manifest based on the return values of filectxfn"""
2264
2264
2265 # keep this simple for now; just worry about p1
2265 # keep this simple for now; just worry about p1
2266 pctx = self._parents[0]
2266 pctx = self._parents[0]
2267 man = pctx.manifest().copy()
2267 man = pctx.manifest().copy()
2268
2268
2269 for f in self._status.modified:
2269 for f in self._status.modified:
2270 p1node = nullid
2270 p1node = nullid
2271 p2node = nullid
2271 p2node = nullid
2272 p = pctx[f].parents() # if file isn't in pctx, check p2?
2272 p = pctx[f].parents() # if file isn't in pctx, check p2?
2273 if len(p) > 0:
2273 if len(p) > 0:
2274 p1node = p[0].filenode()
2274 p1node = p[0].filenode()
2275 if len(p) > 1:
2275 if len(p) > 1:
2276 p2node = p[1].filenode()
2276 p2node = p[1].filenode()
2277 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2277 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2278
2278
2279 for f in self._status.added:
2279 for f in self._status.added:
2280 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2280 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2281
2281
2282 for f in self._status.removed:
2282 for f in self._status.removed:
2283 if f in man:
2283 if f in man:
2284 del man[f]
2284 del man[f]
2285
2285
2286 return man
2286 return man
2287
2287
2288 @propertycache
2288 @propertycache
2289 def _status(self):
2289 def _status(self):
2290 """Calculate exact status from ``files`` specified at construction
2290 """Calculate exact status from ``files`` specified at construction
2291 """
2291 """
2292 man1 = self.p1().manifest()
2292 man1 = self.p1().manifest()
2293 p2 = self._parents[1]
2293 p2 = self._parents[1]
2294 # "1 < len(self._parents)" can't be used for checking
2294 # "1 < len(self._parents)" can't be used for checking
2295 # existence of the 2nd parent, because "memctx._parents" is
2295 # existence of the 2nd parent, because "memctx._parents" is
2296 # explicitly initialized by the list, of which length is 2.
2296 # explicitly initialized by the list, of which length is 2.
2297 if p2.node() != nullid:
2297 if p2.node() != nullid:
2298 man2 = p2.manifest()
2298 man2 = p2.manifest()
2299 managing = lambda f: f in man1 or f in man2
2299 managing = lambda f: f in man1 or f in man2
2300 else:
2300 else:
2301 managing = lambda f: f in man1
2301 managing = lambda f: f in man1
2302
2302
2303 modified, added, removed = [], [], []
2303 modified, added, removed = [], [], []
2304 for f in self._files:
2304 for f in self._files:
2305 if not managing(f):
2305 if not managing(f):
2306 added.append(f)
2306 added.append(f)
2307 elif self[f]:
2307 elif self[f]:
2308 modified.append(f)
2308 modified.append(f)
2309 else:
2309 else:
2310 removed.append(f)
2310 removed.append(f)
2311
2311
2312 return scmutil.status(modified, added, removed, [], [], [], [])
2312 return scmutil.status(modified, added, removed, [], [], [], [])
2313
2313
2314 class memfilectx(committablefilectx):
2314 class memfilectx(committablefilectx):
2315 """memfilectx represents an in-memory file to commit.
2315 """memfilectx represents an in-memory file to commit.
2316
2316
2317 See memctx and committablefilectx for more details.
2317 See memctx and committablefilectx for more details.
2318 """
2318 """
2319 def __init__(self, repo, changectx, path, data, islink=False,
2319 def __init__(self, repo, changectx, path, data, islink=False,
2320 isexec=False, copied=None):
2320 isexec=False, copied=None):
2321 """
2321 """
2322 path is the normalized file path relative to repository root.
2322 path is the normalized file path relative to repository root.
2323 data is the file content as a string.
2323 data is the file content as a string.
2324 islink is True if the file is a symbolic link.
2324 islink is True if the file is a symbolic link.
2325 isexec is True if the file is executable.
2325 isexec is True if the file is executable.
2326 copied is the source file path if current file was copied in the
2326 copied is the source file path if current file was copied in the
2327 revision being committed, or None."""
2327 revision being committed, or None."""
2328 super(memfilectx, self).__init__(repo, path, None, changectx)
2328 super(memfilectx, self).__init__(repo, path, None, changectx)
2329 self._data = data
2329 self._data = data
2330 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2330 if islink:
2331 self._flags = 'l'
2332 elif isexec:
2333 self._flags = 'x'
2334 else:
2335 self._flags = ''
2331 self._copied = None
2336 self._copied = None
2332 if copied:
2337 if copied:
2333 self._copied = (copied, nullid)
2338 self._copied = (copied, nullid)
2334
2339
2335 def data(self):
2340 def data(self):
2336 return self._data
2341 return self._data
2337
2342
2338 def remove(self, ignoremissing=False):
2343 def remove(self, ignoremissing=False):
2339 """wraps unlink for a repo's working directory"""
2344 """wraps unlink for a repo's working directory"""
2340 # need to figure out what to do here
2345 # need to figure out what to do here
2341 del self._changectx[self._path]
2346 del self._changectx[self._path]
2342
2347
2343 def write(self, data, flags, **kwargs):
2348 def write(self, data, flags, **kwargs):
2344 """wraps repo.wwrite"""
2349 """wraps repo.wwrite"""
2345 self._data = data
2350 self._data = data
2346
2351
2347 class overlayfilectx(committablefilectx):
2352 class overlayfilectx(committablefilectx):
2348 """Like memfilectx but take an original filectx and optional parameters to
2353 """Like memfilectx but take an original filectx and optional parameters to
2349 override parts of it. This is useful when fctx.data() is expensive (i.e.
2354 override parts of it. This is useful when fctx.data() is expensive (i.e.
2350 flag processor is expensive) and raw data, flags, and filenode could be
2355 flag processor is expensive) and raw data, flags, and filenode could be
2351 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2356 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2352 """
2357 """
2353
2358
2354 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2359 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2355 copied=None, ctx=None):
2360 copied=None, ctx=None):
2356 """originalfctx: filecontext to duplicate
2361 """originalfctx: filecontext to duplicate
2357
2362
2358 datafunc: None or a function to override data (file content). It is a
2363 datafunc: None or a function to override data (file content). It is a
2359 function to be lazy. path, flags, copied, ctx: None or overridden value
2364 function to be lazy. path, flags, copied, ctx: None or overridden value
2360
2365
2361 copied could be (path, rev), or False. copied could also be just path,
2366 copied could be (path, rev), or False. copied could also be just path,
2362 and will be converted to (path, nullid). This simplifies some callers.
2367 and will be converted to (path, nullid). This simplifies some callers.
2363 """
2368 """
2364
2369
2365 if path is None:
2370 if path is None:
2366 path = originalfctx.path()
2371 path = originalfctx.path()
2367 if ctx is None:
2372 if ctx is None:
2368 ctx = originalfctx.changectx()
2373 ctx = originalfctx.changectx()
2369 ctxmatch = lambda: True
2374 ctxmatch = lambda: True
2370 else:
2375 else:
2371 ctxmatch = lambda: ctx == originalfctx.changectx()
2376 ctxmatch = lambda: ctx == originalfctx.changectx()
2372
2377
2373 repo = originalfctx.repo()
2378 repo = originalfctx.repo()
2374 flog = originalfctx.filelog()
2379 flog = originalfctx.filelog()
2375 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2380 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2376
2381
2377 if copied is None:
2382 if copied is None:
2378 copied = originalfctx.renamed()
2383 copied = originalfctx.renamed()
2379 copiedmatch = lambda: True
2384 copiedmatch = lambda: True
2380 else:
2385 else:
2381 if copied and not isinstance(copied, tuple):
2386 if copied and not isinstance(copied, tuple):
2382 # repo._filecommit will recalculate copyrev so nullid is okay
2387 # repo._filecommit will recalculate copyrev so nullid is okay
2383 copied = (copied, nullid)
2388 copied = (copied, nullid)
2384 copiedmatch = lambda: copied == originalfctx.renamed()
2389 copiedmatch = lambda: copied == originalfctx.renamed()
2385
2390
2386 # When data, copied (could affect data), ctx (could affect filelog
2391 # When data, copied (could affect data), ctx (could affect filelog
2387 # parents) are not overridden, rawdata, rawflags, and filenode may be
2392 # parents) are not overridden, rawdata, rawflags, and filenode may be
2388 # reused (repo._filecommit should double check filelog parents).
2393 # reused (repo._filecommit should double check filelog parents).
2389 #
2394 #
2390 # path, flags are not hashed in filelog (but in manifestlog) so they do
2395 # path, flags are not hashed in filelog (but in manifestlog) so they do
2391 # not affect reusable here.
2396 # not affect reusable here.
2392 #
2397 #
2393 # If ctx or copied is overridden to a same value with originalfctx,
2398 # If ctx or copied is overridden to a same value with originalfctx,
2394 # still consider it's reusable. originalfctx.renamed() may be a bit
2399 # still consider it's reusable. originalfctx.renamed() may be a bit
2395 # expensive so it's not called unless necessary. Assuming datafunc is
2400 # expensive so it's not called unless necessary. Assuming datafunc is
2396 # always expensive, do not call it for this "reusable" test.
2401 # always expensive, do not call it for this "reusable" test.
2397 reusable = datafunc is None and ctxmatch() and copiedmatch()
2402 reusable = datafunc is None and ctxmatch() and copiedmatch()
2398
2403
2399 if datafunc is None:
2404 if datafunc is None:
2400 datafunc = originalfctx.data
2405 datafunc = originalfctx.data
2401 if flags is None:
2406 if flags is None:
2402 flags = originalfctx.flags()
2407 flags = originalfctx.flags()
2403
2408
2404 self._datafunc = datafunc
2409 self._datafunc = datafunc
2405 self._flags = flags
2410 self._flags = flags
2406 self._copied = copied
2411 self._copied = copied
2407
2412
2408 if reusable:
2413 if reusable:
2409 # copy extra fields from originalfctx
2414 # copy extra fields from originalfctx
2410 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2415 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2411 for attr_ in attrs:
2416 for attr_ in attrs:
2412 if util.safehasattr(originalfctx, attr_):
2417 if util.safehasattr(originalfctx, attr_):
2413 setattr(self, attr_, getattr(originalfctx, attr_))
2418 setattr(self, attr_, getattr(originalfctx, attr_))
2414
2419
2415 def data(self):
2420 def data(self):
2416 return self._datafunc()
2421 return self._datafunc()
2417
2422
2418 class metadataonlyctx(committablectx):
2423 class metadataonlyctx(committablectx):
2419 """Like memctx but it's reusing the manifest of different commit.
2424 """Like memctx but it's reusing the manifest of different commit.
2420 Intended to be used by lightweight operations that are creating
2425 Intended to be used by lightweight operations that are creating
2421 metadata-only changes.
2426 metadata-only changes.
2422
2427
2423 Revision information is supplied at initialization time. 'repo' is the
2428 Revision information is supplied at initialization time. 'repo' is the
2424 current localrepo, 'ctx' is original revision which manifest we're reuisng
2429 current localrepo, 'ctx' is original revision which manifest we're reuisng
2425 'parents' is a sequence of two parent revisions identifiers (pass None for
2430 'parents' is a sequence of two parent revisions identifiers (pass None for
2426 every missing parent), 'text' is the commit.
2431 every missing parent), 'text' is the commit.
2427
2432
2428 user receives the committer name and defaults to current repository
2433 user receives the committer name and defaults to current repository
2429 username, date is the commit date in any format supported by
2434 username, date is the commit date in any format supported by
2430 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2435 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2431 metadata or is left empty.
2436 metadata or is left empty.
2432 """
2437 """
2433 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2438 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2434 date=None, extra=None, editor=False):
2439 date=None, extra=None, editor=False):
2435 if text is None:
2440 if text is None:
2436 text = originalctx.description()
2441 text = originalctx.description()
2437 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2442 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2438 self._rev = None
2443 self._rev = None
2439 self._node = None
2444 self._node = None
2440 self._originalctx = originalctx
2445 self._originalctx = originalctx
2441 self._manifestnode = originalctx.manifestnode()
2446 self._manifestnode = originalctx.manifestnode()
2442 if parents is None:
2447 if parents is None:
2443 parents = originalctx.parents()
2448 parents = originalctx.parents()
2444 else:
2449 else:
2445 parents = [repo[p] for p in parents if p is not None]
2450 parents = [repo[p] for p in parents if p is not None]
2446 parents = parents[:]
2451 parents = parents[:]
2447 while len(parents) < 2:
2452 while len(parents) < 2:
2448 parents.append(repo[nullid])
2453 parents.append(repo[nullid])
2449 p1, p2 = self._parents = parents
2454 p1, p2 = self._parents = parents
2450
2455
2451 # sanity check to ensure that the reused manifest parents are
2456 # sanity check to ensure that the reused manifest parents are
2452 # manifests of our commit parents
2457 # manifests of our commit parents
2453 mp1, mp2 = self.manifestctx().parents
2458 mp1, mp2 = self.manifestctx().parents
2454 if p1 != nullid and p1.manifestnode() != mp1:
2459 if p1 != nullid and p1.manifestnode() != mp1:
2455 raise RuntimeError('can\'t reuse the manifest: '
2460 raise RuntimeError('can\'t reuse the manifest: '
2456 'its p1 doesn\'t match the new ctx p1')
2461 'its p1 doesn\'t match the new ctx p1')
2457 if p2 != nullid and p2.manifestnode() != mp2:
2462 if p2 != nullid and p2.manifestnode() != mp2:
2458 raise RuntimeError('can\'t reuse the manifest: '
2463 raise RuntimeError('can\'t reuse the manifest: '
2459 'its p2 doesn\'t match the new ctx p2')
2464 'its p2 doesn\'t match the new ctx p2')
2460
2465
2461 self._files = originalctx.files()
2466 self._files = originalctx.files()
2462 self.substate = {}
2467 self.substate = {}
2463
2468
2464 if editor:
2469 if editor:
2465 self._text = editor(self._repo, self, [])
2470 self._text = editor(self._repo, self, [])
2466 self._repo.savecommitmessage(self._text)
2471 self._repo.savecommitmessage(self._text)
2467
2472
2468 def manifestnode(self):
2473 def manifestnode(self):
2469 return self._manifestnode
2474 return self._manifestnode
2470
2475
2471 @property
2476 @property
2472 def _manifestctx(self):
2477 def _manifestctx(self):
2473 return self._repo.manifestlog[self._manifestnode]
2478 return self._repo.manifestlog[self._manifestnode]
2474
2479
2475 def filectx(self, path, filelog=None):
2480 def filectx(self, path, filelog=None):
2476 return self._originalctx.filectx(path, filelog=filelog)
2481 return self._originalctx.filectx(path, filelog=filelog)
2477
2482
2478 def commit(self):
2483 def commit(self):
2479 """commit context to the repo"""
2484 """commit context to the repo"""
2480 return self._repo.commitctx(self)
2485 return self._repo.commitctx(self)
2481
2486
2482 @property
2487 @property
2483 def _manifest(self):
2488 def _manifest(self):
2484 return self._originalctx.manifest()
2489 return self._originalctx.manifest()
2485
2490
2486 @propertycache
2491 @propertycache
2487 def _status(self):
2492 def _status(self):
2488 """Calculate exact status from ``files`` specified in the ``origctx``
2493 """Calculate exact status from ``files`` specified in the ``origctx``
2489 and parents manifests.
2494 and parents manifests.
2490 """
2495 """
2491 man1 = self.p1().manifest()
2496 man1 = self.p1().manifest()
2492 p2 = self._parents[1]
2497 p2 = self._parents[1]
2493 # "1 < len(self._parents)" can't be used for checking
2498 # "1 < len(self._parents)" can't be used for checking
2494 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2499 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2495 # explicitly initialized by the list, of which length is 2.
2500 # explicitly initialized by the list, of which length is 2.
2496 if p2.node() != nullid:
2501 if p2.node() != nullid:
2497 man2 = p2.manifest()
2502 man2 = p2.manifest()
2498 managing = lambda f: f in man1 or f in man2
2503 managing = lambda f: f in man1 or f in man2
2499 else:
2504 else:
2500 managing = lambda f: f in man1
2505 managing = lambda f: f in man1
2501
2506
2502 modified, added, removed = [], [], []
2507 modified, added, removed = [], [], []
2503 for f in self._files:
2508 for f in self._files:
2504 if not managing(f):
2509 if not managing(f):
2505 added.append(f)
2510 added.append(f)
2506 elif f in self:
2511 elif f in self:
2507 modified.append(f)
2512 modified.append(f)
2508 else:
2513 else:
2509 removed.append(f)
2514 removed.append(f)
2510
2515
2511 return scmutil.status(modified, added, removed, [], [], [], [])
2516 return scmutil.status(modified, added, removed, [], [], [], [])
2512
2517
2513 class arbitraryfilectx(object):
2518 class arbitraryfilectx(object):
2514 """Allows you to use filectx-like functions on a file in an arbitrary
2519 """Allows you to use filectx-like functions on a file in an arbitrary
2515 location on disk, possibly not in the working directory.
2520 location on disk, possibly not in the working directory.
2516 """
2521 """
2517 def __init__(self, path, repo=None):
2522 def __init__(self, path, repo=None):
2518 # Repo is optional because contrib/simplemerge uses this class.
2523 # Repo is optional because contrib/simplemerge uses this class.
2519 self._repo = repo
2524 self._repo = repo
2520 self._path = path
2525 self._path = path
2521
2526
2522 def cmp(self, fctx):
2527 def cmp(self, fctx):
2523 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2528 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2524 # path if either side is a symlink.
2529 # path if either side is a symlink.
2525 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2530 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2526 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2531 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2527 # Add a fast-path for merge if both sides are disk-backed.
2532 # Add a fast-path for merge if both sides are disk-backed.
2528 # Note that filecmp uses the opposite return values (True if same)
2533 # Note that filecmp uses the opposite return values (True if same)
2529 # from our cmp functions (True if different).
2534 # from our cmp functions (True if different).
2530 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2535 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2531 return self.data() != fctx.data()
2536 return self.data() != fctx.data()
2532
2537
2533 def path(self):
2538 def path(self):
2534 return self._path
2539 return self._path
2535
2540
2536 def flags(self):
2541 def flags(self):
2537 return ''
2542 return ''
2538
2543
2539 def data(self):
2544 def data(self):
2540 return util.readfile(self._path)
2545 return util.readfile(self._path)
2541
2546
2542 def decodeddata(self):
2547 def decodeddata(self):
2543 with open(self._path, "rb") as f:
2548 with open(self._path, "rb") as f:
2544 return f.read()
2549 return f.read()
2545
2550
2546 def remove(self):
2551 def remove(self):
2547 util.unlink(self._path)
2552 util.unlink(self._path)
2548
2553
2549 def write(self, data, flags, **kwargs):
2554 def write(self, data, flags, **kwargs):
2550 assert not flags
2555 assert not flags
2551 with open(self._path, "w") as f:
2556 with open(self._path, "w") as f:
2552 f.write(data)
2557 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now