##// END OF EJS Templates
context: safegaurd from 'lx' being passed as file flag in manifest...
Pulkit Goyal -
r39101:2488dcfa default
parent child Browse files
Show More
@@ -1,2560 +1,2565 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 sparse,
41 sparse,
42 subrepo,
42 subrepo,
43 subrepoutil,
43 subrepoutil,
44 util,
44 util,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 dateutil,
47 dateutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
70 return r"<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(self, other, s, match, listignored, listclean,
102 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
103 listunknown):
104 """build a status with respect to another context"""
104 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
105 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
110 # delta application.
111 mf2 = None
111 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
112 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
113 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
115 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117
117
118 modified, added = [], []
118 modified, added = [], []
119 removed = []
119 removed = []
120 clean = []
120 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
122 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
123 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
124 for fn, value in d.iteritems():
125 if fn in deletedset:
125 if fn in deletedset:
126 continue
126 continue
127 if value is None:
127 if value is None:
128 clean.append(fn)
128 clean.append(fn)
129 continue
129 continue
130 (node1, flag1), (node2, flag2) = value
130 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
131 if node1 is None:
132 added.append(fn)
132 added.append(fn)
133 elif node2 is None:
133 elif node2 is None:
134 removed.append(fn)
134 removed.append(fn)
135 elif flag1 != flag2:
135 elif flag1 != flag2:
136 modified.append(fn)
136 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
137 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
138 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
139 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
140 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
141 # to a file as a modification.
142 modified.append(fn)
142 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
143 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
144 modified.append(fn)
145 else:
145 else:
146 clean.append(fn)
146 clean.append(fn)
147
147
148 if removed:
148 if removed:
149 # need to filter files if they are already reported as removed
149 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
150 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
152 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
153 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
154 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
155 removed = [fn for fn in removed if fn not in deletedset]
156
156
157 return scmutil.status(modified, added, removed, deleted, unknown,
157 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
158 ignored, clean)
159
159
160 @propertycache
160 @propertycache
161 def substate(self):
161 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
162 return subrepoutil.state(self, self._repo.ui)
163
163
164 def subrev(self, subpath):
164 def subrev(self, subpath):
165 return self.substate[subpath][1]
165 return self.substate[subpath][1]
166
166
167 def rev(self):
167 def rev(self):
168 return self._rev
168 return self._rev
169 def node(self):
169 def node(self):
170 return self._node
170 return self._node
171 def hex(self):
171 def hex(self):
172 return hex(self.node())
172 return hex(self.node())
173 def manifest(self):
173 def manifest(self):
174 return self._manifest
174 return self._manifest
175 def manifestctx(self):
175 def manifestctx(self):
176 return self._manifestctx
176 return self._manifestctx
177 def repo(self):
177 def repo(self):
178 return self._repo
178 return self._repo
179 def phasestr(self):
179 def phasestr(self):
180 return phases.phasenames[self.phase()]
180 return phases.phasenames[self.phase()]
181 def mutable(self):
181 def mutable(self):
182 return self.phase() > phases.public
182 return self.phase() > phases.public
183
183
184 def matchfileset(self, expr, badfn=None):
184 def matchfileset(self, expr, badfn=None):
185 return fileset.match(self, expr, badfn=badfn)
185 return fileset.match(self, expr, badfn=badfn)
186
186
187 def obsolete(self):
187 def obsolete(self):
188 """True if the changeset is obsolete"""
188 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
190
191 def extinct(self):
191 def extinct(self):
192 """True if the changeset is extinct"""
192 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
194
195 def orphan(self):
195 def orphan(self):
196 """True if the changeset is not obsolete but it's ancestor are"""
196 """True if the changeset is not obsolete but it's ancestor are"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
198
199 def phasedivergent(self):
199 def phasedivergent(self):
200 """True if the changeset try to be a successor of a public changeset
200 """True if the changeset try to be a successor of a public changeset
201
201
202 Only non-public and non-obsolete changesets may be bumped.
202 Only non-public and non-obsolete changesets may be bumped.
203 """
203 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
205
206 def contentdivergent(self):
206 def contentdivergent(self):
207 """Is a successors of a changeset with multiple possible successors set
207 """Is a successors of a changeset with multiple possible successors set
208
208
209 Only non-public and non-obsolete changesets may be divergent.
209 Only non-public and non-obsolete changesets may be divergent.
210 """
210 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
212
213 def isunstable(self):
213 def isunstable(self):
214 """True if the changeset is either unstable, bumped or divergent"""
214 """True if the changeset is either unstable, bumped or divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 narrowmatch = self._repo.narrowmatch()
375 narrowmatch = self._repo.narrowmatch()
376 if not narrowmatch.always():
376 if not narrowmatch.always():
377 for l in r:
377 for l in r:
378 l[:] = list(filter(narrowmatch, l))
378 l[:] = list(filter(narrowmatch, l))
379 for l in r:
379 for l in r:
380 l.sort()
380 l.sort()
381
381
382 return r
382 return r
383
383
384 class changectx(basectx):
384 class changectx(basectx):
385 """A changecontext object makes access to data related to a particular
385 """A changecontext object makes access to data related to a particular
386 changeset convenient. It represents a read-only context already present in
386 changeset convenient. It represents a read-only context already present in
387 the repo."""
387 the repo."""
388 def __init__(self, repo, changeid='.'):
388 def __init__(self, repo, changeid='.'):
389 """changeid is a revision number, node, or tag"""
389 """changeid is a revision number, node, or tag"""
390 super(changectx, self).__init__(repo)
390 super(changectx, self).__init__(repo)
391
391
392 try:
392 try:
393 if isinstance(changeid, int):
393 if isinstance(changeid, int):
394 self._node = repo.changelog.node(changeid)
394 self._node = repo.changelog.node(changeid)
395 self._rev = changeid
395 self._rev = changeid
396 return
396 return
397 elif changeid == 'null':
397 elif changeid == 'null':
398 self._node = nullid
398 self._node = nullid
399 self._rev = nullrev
399 self._rev = nullrev
400 return
400 return
401 elif changeid == 'tip':
401 elif changeid == 'tip':
402 self._node = repo.changelog.tip()
402 self._node = repo.changelog.tip()
403 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
404 return
404 return
405 elif (changeid == '.'
405 elif (changeid == '.'
406 or repo.local() and changeid == repo.dirstate.p1()):
406 or repo.local() and changeid == repo.dirstate.p1()):
407 # this is a hack to delay/avoid loading obsmarkers
407 # this is a hack to delay/avoid loading obsmarkers
408 # when we know that '.' won't be hidden
408 # when we know that '.' won't be hidden
409 self._node = repo.dirstate.p1()
409 self._node = repo.dirstate.p1()
410 self._rev = repo.unfiltered().changelog.rev(self._node)
410 self._rev = repo.unfiltered().changelog.rev(self._node)
411 return
411 return
412 elif len(changeid) == 20:
412 elif len(changeid) == 20:
413 try:
413 try:
414 self._node = changeid
414 self._node = changeid
415 self._rev = repo.changelog.rev(changeid)
415 self._rev = repo.changelog.rev(changeid)
416 return
416 return
417 except error.FilteredLookupError:
417 except error.FilteredLookupError:
418 changeid = hex(changeid) # for the error message
418 changeid = hex(changeid) # for the error message
419 raise
419 raise
420 except LookupError:
420 except LookupError:
421 # check if it might have come from damaged dirstate
421 # check if it might have come from damaged dirstate
422 #
422 #
423 # XXX we could avoid the unfiltered if we had a recognizable
423 # XXX we could avoid the unfiltered if we had a recognizable
424 # exception for filtered changeset access
424 # exception for filtered changeset access
425 if (repo.local()
425 if (repo.local()
426 and changeid in repo.unfiltered().dirstate.parents()):
426 and changeid in repo.unfiltered().dirstate.parents()):
427 msg = _("working directory has unknown parent '%s'!")
427 msg = _("working directory has unknown parent '%s'!")
428 raise error.Abort(msg % short(changeid))
428 raise error.Abort(msg % short(changeid))
429 changeid = hex(changeid) # for the error message
429 changeid = hex(changeid) # for the error message
430
430
431 elif len(changeid) == 40:
431 elif len(changeid) == 40:
432 try:
432 try:
433 self._node = bin(changeid)
433 self._node = bin(changeid)
434 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
435 return
435 return
436 except error.FilteredLookupError:
436 except error.FilteredLookupError:
437 raise
437 raise
438 except (TypeError, LookupError):
438 except (TypeError, LookupError):
439 pass
439 pass
440 else:
440 else:
441 raise error.ProgrammingError(
441 raise error.ProgrammingError(
442 "unsupported changeid '%s' of type %s" %
442 "unsupported changeid '%s' of type %s" %
443 (changeid, type(changeid)))
443 (changeid, type(changeid)))
444
444
445 except (error.FilteredIndexError, error.FilteredLookupError):
445 except (error.FilteredIndexError, error.FilteredLookupError):
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
447 % pycompat.bytestr(changeid))
447 % pycompat.bytestr(changeid))
448 except error.FilteredRepoLookupError:
448 except error.FilteredRepoLookupError:
449 raise
449 raise
450 except IndexError:
450 except IndexError:
451 pass
451 pass
452 raise error.RepoLookupError(
452 raise error.RepoLookupError(
453 _("unknown revision '%s'") % changeid)
453 _("unknown revision '%s'") % changeid)
454
454
455 def __hash__(self):
455 def __hash__(self):
456 try:
456 try:
457 return hash(self._rev)
457 return hash(self._rev)
458 except AttributeError:
458 except AttributeError:
459 return id(self)
459 return id(self)
460
460
461 def __nonzero__(self):
461 def __nonzero__(self):
462 return self._rev != nullrev
462 return self._rev != nullrev
463
463
464 __bool__ = __nonzero__
464 __bool__ = __nonzero__
465
465
466 @propertycache
466 @propertycache
467 def _changeset(self):
467 def _changeset(self):
468 return self._repo.changelog.changelogrevision(self.rev())
468 return self._repo.changelog.changelogrevision(self.rev())
469
469
470 @propertycache
470 @propertycache
471 def _manifest(self):
471 def _manifest(self):
472 return self._manifestctx.read()
472 return self._manifestctx.read()
473
473
474 @property
474 @property
475 def _manifestctx(self):
475 def _manifestctx(self):
476 return self._repo.manifestlog[self._changeset.manifest]
476 return self._repo.manifestlog[self._changeset.manifest]
477
477
478 @propertycache
478 @propertycache
479 def _manifestdelta(self):
479 def _manifestdelta(self):
480 return self._manifestctx.readdelta()
480 return self._manifestctx.readdelta()
481
481
482 @propertycache
482 @propertycache
483 def _parents(self):
483 def _parents(self):
484 repo = self._repo
484 repo = self._repo
485 p1, p2 = repo.changelog.parentrevs(self._rev)
485 p1, p2 = repo.changelog.parentrevs(self._rev)
486 if p2 == nullrev:
486 if p2 == nullrev:
487 return [changectx(repo, p1)]
487 return [changectx(repo, p1)]
488 return [changectx(repo, p1), changectx(repo, p2)]
488 return [changectx(repo, p1), changectx(repo, p2)]
489
489
490 def changeset(self):
490 def changeset(self):
491 c = self._changeset
491 c = self._changeset
492 return (
492 return (
493 c.manifest,
493 c.manifest,
494 c.user,
494 c.user,
495 c.date,
495 c.date,
496 c.files,
496 c.files,
497 c.description,
497 c.description,
498 c.extra,
498 c.extra,
499 )
499 )
500 def manifestnode(self):
500 def manifestnode(self):
501 return self._changeset.manifest
501 return self._changeset.manifest
502
502
503 def user(self):
503 def user(self):
504 return self._changeset.user
504 return self._changeset.user
505 def date(self):
505 def date(self):
506 return self._changeset.date
506 return self._changeset.date
507 def files(self):
507 def files(self):
508 return self._changeset.files
508 return self._changeset.files
509 def description(self):
509 def description(self):
510 return self._changeset.description
510 return self._changeset.description
511 def branch(self):
511 def branch(self):
512 return encoding.tolocal(self._changeset.extra.get("branch"))
512 return encoding.tolocal(self._changeset.extra.get("branch"))
513 def closesbranch(self):
513 def closesbranch(self):
514 return 'close' in self._changeset.extra
514 return 'close' in self._changeset.extra
515 def extra(self):
515 def extra(self):
516 """Return a dict of extra information."""
516 """Return a dict of extra information."""
517 return self._changeset.extra
517 return self._changeset.extra
518 def tags(self):
518 def tags(self):
519 """Return a list of byte tag names"""
519 """Return a list of byte tag names"""
520 return self._repo.nodetags(self._node)
520 return self._repo.nodetags(self._node)
521 def bookmarks(self):
521 def bookmarks(self):
522 """Return a list of byte bookmark names."""
522 """Return a list of byte bookmark names."""
523 return self._repo.nodebookmarks(self._node)
523 return self._repo.nodebookmarks(self._node)
524 def phase(self):
524 def phase(self):
525 return self._repo._phasecache.phase(self._repo, self._rev)
525 return self._repo._phasecache.phase(self._repo, self._rev)
526 def hidden(self):
526 def hidden(self):
527 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 return self._rev in repoview.filterrevs(self._repo, 'visible')
528
528
529 def isinmemory(self):
529 def isinmemory(self):
530 return False
530 return False
531
531
532 def children(self):
532 def children(self):
533 """return list of changectx contexts for each child changeset.
533 """return list of changectx contexts for each child changeset.
534
534
535 This returns only the immediate child changesets. Use descendants() to
535 This returns only the immediate child changesets. Use descendants() to
536 recursively walk children.
536 recursively walk children.
537 """
537 """
538 c = self._repo.changelog.children(self._node)
538 c = self._repo.changelog.children(self._node)
539 return [changectx(self._repo, x) for x in c]
539 return [changectx(self._repo, x) for x in c]
540
540
541 def ancestors(self):
541 def ancestors(self):
542 for a in self._repo.changelog.ancestors([self._rev]):
542 for a in self._repo.changelog.ancestors([self._rev]):
543 yield changectx(self._repo, a)
543 yield changectx(self._repo, a)
544
544
545 def descendants(self):
545 def descendants(self):
546 """Recursively yield all children of the changeset.
546 """Recursively yield all children of the changeset.
547
547
548 For just the immediate children, use children()
548 For just the immediate children, use children()
549 """
549 """
550 for d in self._repo.changelog.descendants([self._rev]):
550 for d in self._repo.changelog.descendants([self._rev]):
551 yield changectx(self._repo, d)
551 yield changectx(self._repo, d)
552
552
553 def filectx(self, path, fileid=None, filelog=None):
553 def filectx(self, path, fileid=None, filelog=None):
554 """get a file context from this changeset"""
554 """get a file context from this changeset"""
555 if fileid is None:
555 if fileid is None:
556 fileid = self.filenode(path)
556 fileid = self.filenode(path)
557 return filectx(self._repo, path, fileid=fileid,
557 return filectx(self._repo, path, fileid=fileid,
558 changectx=self, filelog=filelog)
558 changectx=self, filelog=filelog)
559
559
560 def ancestor(self, c2, warn=False):
560 def ancestor(self, c2, warn=False):
561 """return the "best" ancestor context of self and c2
561 """return the "best" ancestor context of self and c2
562
562
563 If there are multiple candidates, it will show a message and check
563 If there are multiple candidates, it will show a message and check
564 merge.preferancestor configuration before falling back to the
564 merge.preferancestor configuration before falling back to the
565 revlog ancestor."""
565 revlog ancestor."""
566 # deal with workingctxs
566 # deal with workingctxs
567 n2 = c2._node
567 n2 = c2._node
568 if n2 is None:
568 if n2 is None:
569 n2 = c2._parents[0]._node
569 n2 = c2._parents[0]._node
570 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
570 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
571 if not cahs:
571 if not cahs:
572 anc = nullid
572 anc = nullid
573 elif len(cahs) == 1:
573 elif len(cahs) == 1:
574 anc = cahs[0]
574 anc = cahs[0]
575 else:
575 else:
576 # experimental config: merge.preferancestor
576 # experimental config: merge.preferancestor
577 for r in self._repo.ui.configlist('merge', 'preferancestor'):
577 for r in self._repo.ui.configlist('merge', 'preferancestor'):
578 try:
578 try:
579 ctx = scmutil.revsymbol(self._repo, r)
579 ctx = scmutil.revsymbol(self._repo, r)
580 except error.RepoLookupError:
580 except error.RepoLookupError:
581 continue
581 continue
582 anc = ctx.node()
582 anc = ctx.node()
583 if anc in cahs:
583 if anc in cahs:
584 break
584 break
585 else:
585 else:
586 anc = self._repo.changelog.ancestor(self._node, n2)
586 anc = self._repo.changelog.ancestor(self._node, n2)
587 if warn:
587 if warn:
588 self._repo.ui.status(
588 self._repo.ui.status(
589 (_("note: using %s as ancestor of %s and %s\n") %
589 (_("note: using %s as ancestor of %s and %s\n") %
590 (short(anc), short(self._node), short(n2))) +
590 (short(anc), short(self._node), short(n2))) +
591 ''.join(_(" alternatively, use --config "
591 ''.join(_(" alternatively, use --config "
592 "merge.preferancestor=%s\n") %
592 "merge.preferancestor=%s\n") %
593 short(n) for n in sorted(cahs) if n != anc))
593 short(n) for n in sorted(cahs) if n != anc))
594 return changectx(self._repo, anc)
594 return changectx(self._repo, anc)
595
595
596 def descendant(self, other):
596 def descendant(self, other):
597 msg = (b'ctx.descendant(other) is deprecated, '
597 msg = (b'ctx.descendant(other) is deprecated, '
598 b'use ctx.isancestorof(other)')
598 b'use ctx.isancestorof(other)')
599 self._repo.ui.deprecwarn(msg, b'4.7')
599 self._repo.ui.deprecwarn(msg, b'4.7')
600 return self.isancestorof(other)
600 return self.isancestorof(other)
601
601
602 def isancestorof(self, other):
602 def isancestorof(self, other):
603 """True if this changeset is an ancestor of other"""
603 """True if this changeset is an ancestor of other"""
604 return self._repo.changelog.isancestorrev(self._rev, other._rev)
604 return self._repo.changelog.isancestorrev(self._rev, other._rev)
605
605
606 def walk(self, match):
606 def walk(self, match):
607 '''Generates matching file names.'''
607 '''Generates matching file names.'''
608
608
609 # Wrap match.bad method to have message with nodeid
609 # Wrap match.bad method to have message with nodeid
610 def bad(fn, msg):
610 def bad(fn, msg):
611 # The manifest doesn't know about subrepos, so don't complain about
611 # The manifest doesn't know about subrepos, so don't complain about
612 # paths into valid subrepos.
612 # paths into valid subrepos.
613 if any(fn == s or fn.startswith(s + '/')
613 if any(fn == s or fn.startswith(s + '/')
614 for s in self.substate):
614 for s in self.substate):
615 return
615 return
616 match.bad(fn, _('no such file in rev %s') % self)
616 match.bad(fn, _('no such file in rev %s') % self)
617
617
618 m = matchmod.badmatch(match, bad)
618 m = matchmod.badmatch(match, bad)
619 return self._manifest.walk(m)
619 return self._manifest.walk(m)
620
620
621 def matches(self, match):
621 def matches(self, match):
622 return self.walk(match)
622 return self.walk(match)
623
623
624 class basefilectx(object):
624 class basefilectx(object):
625 """A filecontext object represents the common logic for its children:
625 """A filecontext object represents the common logic for its children:
626 filectx: read-only access to a filerevision that is already present
626 filectx: read-only access to a filerevision that is already present
627 in the repo,
627 in the repo,
628 workingfilectx: a filecontext that represents files from the working
628 workingfilectx: a filecontext that represents files from the working
629 directory,
629 directory,
630 memfilectx: a filecontext that represents files in-memory,
630 memfilectx: a filecontext that represents files in-memory,
631 overlayfilectx: duplicate another filecontext with some fields overridden.
631 overlayfilectx: duplicate another filecontext with some fields overridden.
632 """
632 """
633 @propertycache
633 @propertycache
634 def _filelog(self):
634 def _filelog(self):
635 return self._repo.file(self._path)
635 return self._repo.file(self._path)
636
636
637 @propertycache
637 @propertycache
638 def _changeid(self):
638 def _changeid(self):
639 if r'_changeid' in self.__dict__:
639 if r'_changeid' in self.__dict__:
640 return self._changeid
640 return self._changeid
641 elif r'_changectx' in self.__dict__:
641 elif r'_changectx' in self.__dict__:
642 return self._changectx.rev()
642 return self._changectx.rev()
643 elif r'_descendantrev' in self.__dict__:
643 elif r'_descendantrev' in self.__dict__:
644 # this file context was created from a revision with a known
644 # this file context was created from a revision with a known
645 # descendant, we can (lazily) correct for linkrev aliases
645 # descendant, we can (lazily) correct for linkrev aliases
646 return self._adjustlinkrev(self._descendantrev)
646 return self._adjustlinkrev(self._descendantrev)
647 else:
647 else:
648 return self._filelog.linkrev(self._filerev)
648 return self._filelog.linkrev(self._filerev)
649
649
650 @propertycache
650 @propertycache
651 def _filenode(self):
651 def _filenode(self):
652 if r'_fileid' in self.__dict__:
652 if r'_fileid' in self.__dict__:
653 return self._filelog.lookup(self._fileid)
653 return self._filelog.lookup(self._fileid)
654 else:
654 else:
655 return self._changectx.filenode(self._path)
655 return self._changectx.filenode(self._path)
656
656
657 @propertycache
657 @propertycache
658 def _filerev(self):
658 def _filerev(self):
659 return self._filelog.rev(self._filenode)
659 return self._filelog.rev(self._filenode)
660
660
661 @propertycache
661 @propertycache
662 def _repopath(self):
662 def _repopath(self):
663 return self._path
663 return self._path
664
664
665 def __nonzero__(self):
665 def __nonzero__(self):
666 try:
666 try:
667 self._filenode
667 self._filenode
668 return True
668 return True
669 except error.LookupError:
669 except error.LookupError:
670 # file is missing
670 # file is missing
671 return False
671 return False
672
672
673 __bool__ = __nonzero__
673 __bool__ = __nonzero__
674
674
675 def __bytes__(self):
675 def __bytes__(self):
676 try:
676 try:
677 return "%s@%s" % (self.path(), self._changectx)
677 return "%s@%s" % (self.path(), self._changectx)
678 except error.LookupError:
678 except error.LookupError:
679 return "%s@???" % self.path()
679 return "%s@???" % self.path()
680
680
681 __str__ = encoding.strmethod(__bytes__)
681 __str__ = encoding.strmethod(__bytes__)
682
682
683 def __repr__(self):
683 def __repr__(self):
684 return r"<%s %s>" % (type(self).__name__, str(self))
684 return r"<%s %s>" % (type(self).__name__, str(self))
685
685
686 def __hash__(self):
686 def __hash__(self):
687 try:
687 try:
688 return hash((self._path, self._filenode))
688 return hash((self._path, self._filenode))
689 except AttributeError:
689 except AttributeError:
690 return id(self)
690 return id(self)
691
691
692 def __eq__(self, other):
692 def __eq__(self, other):
693 try:
693 try:
694 return (type(self) == type(other) and self._path == other._path
694 return (type(self) == type(other) and self._path == other._path
695 and self._filenode == other._filenode)
695 and self._filenode == other._filenode)
696 except AttributeError:
696 except AttributeError:
697 return False
697 return False
698
698
699 def __ne__(self, other):
699 def __ne__(self, other):
700 return not (self == other)
700 return not (self == other)
701
701
702 def filerev(self):
702 def filerev(self):
703 return self._filerev
703 return self._filerev
704 def filenode(self):
704 def filenode(self):
705 return self._filenode
705 return self._filenode
706 @propertycache
706 @propertycache
707 def _flags(self):
707 def _flags(self):
708 return self._changectx.flags(self._path)
708 return self._changectx.flags(self._path)
709 def flags(self):
709 def flags(self):
710 return self._flags
710 return self._flags
711 def filelog(self):
711 def filelog(self):
712 return self._filelog
712 return self._filelog
713 def rev(self):
713 def rev(self):
714 return self._changeid
714 return self._changeid
715 def linkrev(self):
715 def linkrev(self):
716 return self._filelog.linkrev(self._filerev)
716 return self._filelog.linkrev(self._filerev)
717 def node(self):
717 def node(self):
718 return self._changectx.node()
718 return self._changectx.node()
719 def hex(self):
719 def hex(self):
720 return self._changectx.hex()
720 return self._changectx.hex()
721 def user(self):
721 def user(self):
722 return self._changectx.user()
722 return self._changectx.user()
723 def date(self):
723 def date(self):
724 return self._changectx.date()
724 return self._changectx.date()
725 def files(self):
725 def files(self):
726 return self._changectx.files()
726 return self._changectx.files()
727 def description(self):
727 def description(self):
728 return self._changectx.description()
728 return self._changectx.description()
729 def branch(self):
729 def branch(self):
730 return self._changectx.branch()
730 return self._changectx.branch()
731 def extra(self):
731 def extra(self):
732 return self._changectx.extra()
732 return self._changectx.extra()
733 def phase(self):
733 def phase(self):
734 return self._changectx.phase()
734 return self._changectx.phase()
735 def phasestr(self):
735 def phasestr(self):
736 return self._changectx.phasestr()
736 return self._changectx.phasestr()
737 def obsolete(self):
737 def obsolete(self):
738 return self._changectx.obsolete()
738 return self._changectx.obsolete()
739 def instabilities(self):
739 def instabilities(self):
740 return self._changectx.instabilities()
740 return self._changectx.instabilities()
741 def manifest(self):
741 def manifest(self):
742 return self._changectx.manifest()
742 return self._changectx.manifest()
743 def changectx(self):
743 def changectx(self):
744 return self._changectx
744 return self._changectx
745 def renamed(self):
745 def renamed(self):
746 return self._copied
746 return self._copied
747 def repo(self):
747 def repo(self):
748 return self._repo
748 return self._repo
749 def size(self):
749 def size(self):
750 return len(self.data())
750 return len(self.data())
751
751
752 def path(self):
752 def path(self):
753 return self._path
753 return self._path
754
754
755 def isbinary(self):
755 def isbinary(self):
756 try:
756 try:
757 return stringutil.binary(self.data())
757 return stringutil.binary(self.data())
758 except IOError:
758 except IOError:
759 return False
759 return False
760 def isexec(self):
760 def isexec(self):
761 return 'x' in self.flags()
761 return 'x' in self.flags()
762 def islink(self):
762 def islink(self):
763 return 'l' in self.flags()
763 return 'l' in self.flags()
764
764
765 def isabsent(self):
765 def isabsent(self):
766 """whether this filectx represents a file not in self._changectx
766 """whether this filectx represents a file not in self._changectx
767
767
768 This is mainly for merge code to detect change/delete conflicts. This is
768 This is mainly for merge code to detect change/delete conflicts. This is
769 expected to be True for all subclasses of basectx."""
769 expected to be True for all subclasses of basectx."""
770 return False
770 return False
771
771
772 _customcmp = False
772 _customcmp = False
773 def cmp(self, fctx):
773 def cmp(self, fctx):
774 """compare with other file context
774 """compare with other file context
775
775
776 returns True if different than fctx.
776 returns True if different than fctx.
777 """
777 """
778 if fctx._customcmp:
778 if fctx._customcmp:
779 return fctx.cmp(self)
779 return fctx.cmp(self)
780
780
781 if (fctx._filenode is None
781 if (fctx._filenode is None
782 and (self._repo._encodefilterpats
782 and (self._repo._encodefilterpats
783 # if file data starts with '\1\n', empty metadata block is
783 # if file data starts with '\1\n', empty metadata block is
784 # prepended, which adds 4 bytes to filelog.size().
784 # prepended, which adds 4 bytes to filelog.size().
785 or self.size() - 4 == fctx.size())
785 or self.size() - 4 == fctx.size())
786 or self.size() == fctx.size()):
786 or self.size() == fctx.size()):
787 return self._filelog.cmp(self._filenode, fctx.data())
787 return self._filelog.cmp(self._filenode, fctx.data())
788
788
789 return True
789 return True
790
790
791 def _adjustlinkrev(self, srcrev, inclusive=False):
791 def _adjustlinkrev(self, srcrev, inclusive=False):
792 """return the first ancestor of <srcrev> introducing <fnode>
792 """return the first ancestor of <srcrev> introducing <fnode>
793
793
794 If the linkrev of the file revision does not point to an ancestor of
794 If the linkrev of the file revision does not point to an ancestor of
795 srcrev, we'll walk down the ancestors until we find one introducing
795 srcrev, we'll walk down the ancestors until we find one introducing
796 this file revision.
796 this file revision.
797
797
798 :srcrev: the changeset revision we search ancestors from
798 :srcrev: the changeset revision we search ancestors from
799 :inclusive: if true, the src revision will also be checked
799 :inclusive: if true, the src revision will also be checked
800 """
800 """
801 repo = self._repo
801 repo = self._repo
802 cl = repo.unfiltered().changelog
802 cl = repo.unfiltered().changelog
803 mfl = repo.manifestlog
803 mfl = repo.manifestlog
804 # fetch the linkrev
804 # fetch the linkrev
805 lkr = self.linkrev()
805 lkr = self.linkrev()
806 # hack to reuse ancestor computation when searching for renames
806 # hack to reuse ancestor computation when searching for renames
807 memberanc = getattr(self, '_ancestrycontext', None)
807 memberanc = getattr(self, '_ancestrycontext', None)
808 iteranc = None
808 iteranc = None
809 if srcrev is None:
809 if srcrev is None:
810 # wctx case, used by workingfilectx during mergecopy
810 # wctx case, used by workingfilectx during mergecopy
811 revs = [p.rev() for p in self._repo[None].parents()]
811 revs = [p.rev() for p in self._repo[None].parents()]
812 inclusive = True # we skipped the real (revless) source
812 inclusive = True # we skipped the real (revless) source
813 else:
813 else:
814 revs = [srcrev]
814 revs = [srcrev]
815 if memberanc is None:
815 if memberanc is None:
816 memberanc = iteranc = cl.ancestors(revs, lkr,
816 memberanc = iteranc = cl.ancestors(revs, lkr,
817 inclusive=inclusive)
817 inclusive=inclusive)
818 # check if this linkrev is an ancestor of srcrev
818 # check if this linkrev is an ancestor of srcrev
819 if lkr not in memberanc:
819 if lkr not in memberanc:
820 if iteranc is None:
820 if iteranc is None:
821 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
821 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
822 fnode = self._filenode
822 fnode = self._filenode
823 path = self._path
823 path = self._path
824 for a in iteranc:
824 for a in iteranc:
825 ac = cl.read(a) # get changeset data (we avoid object creation)
825 ac = cl.read(a) # get changeset data (we avoid object creation)
826 if path in ac[3]: # checking the 'files' field.
826 if path in ac[3]: # checking the 'files' field.
827 # The file has been touched, check if the content is
827 # The file has been touched, check if the content is
828 # similar to the one we search for.
828 # similar to the one we search for.
829 if fnode == mfl[ac[0]].readfast().get(path):
829 if fnode == mfl[ac[0]].readfast().get(path):
830 return a
830 return a
831 # In theory, we should never get out of that loop without a result.
831 # In theory, we should never get out of that loop without a result.
832 # But if manifest uses a buggy file revision (not children of the
832 # But if manifest uses a buggy file revision (not children of the
833 # one it replaces) we could. Such a buggy situation will likely
833 # one it replaces) we could. Such a buggy situation will likely
834 # result is crash somewhere else at to some point.
834 # result is crash somewhere else at to some point.
835 return lkr
835 return lkr
836
836
837 def introrev(self):
837 def introrev(self):
838 """return the rev of the changeset which introduced this file revision
838 """return the rev of the changeset which introduced this file revision
839
839
840 This method is different from linkrev because it take into account the
840 This method is different from linkrev because it take into account the
841 changeset the filectx was created from. It ensures the returned
841 changeset the filectx was created from. It ensures the returned
842 revision is one of its ancestors. This prevents bugs from
842 revision is one of its ancestors. This prevents bugs from
843 'linkrev-shadowing' when a file revision is used by multiple
843 'linkrev-shadowing' when a file revision is used by multiple
844 changesets.
844 changesets.
845 """
845 """
846 lkr = self.linkrev()
846 lkr = self.linkrev()
847 attrs = vars(self)
847 attrs = vars(self)
848 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
848 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
849 if noctx or self.rev() == lkr:
849 if noctx or self.rev() == lkr:
850 return self.linkrev()
850 return self.linkrev()
851 return self._adjustlinkrev(self.rev(), inclusive=True)
851 return self._adjustlinkrev(self.rev(), inclusive=True)
852
852
853 def introfilectx(self):
853 def introfilectx(self):
854 """Return filectx having identical contents, but pointing to the
854 """Return filectx having identical contents, but pointing to the
855 changeset revision where this filectx was introduced"""
855 changeset revision where this filectx was introduced"""
856 introrev = self.introrev()
856 introrev = self.introrev()
857 if self.rev() == introrev:
857 if self.rev() == introrev:
858 return self
858 return self
859 return self.filectx(self.filenode(), changeid=introrev)
859 return self.filectx(self.filenode(), changeid=introrev)
860
860
861 def _parentfilectx(self, path, fileid, filelog):
861 def _parentfilectx(self, path, fileid, filelog):
862 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
862 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
863 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
863 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
864 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
864 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
865 # If self is associated with a changeset (probably explicitly
865 # If self is associated with a changeset (probably explicitly
866 # fed), ensure the created filectx is associated with a
866 # fed), ensure the created filectx is associated with a
867 # changeset that is an ancestor of self.changectx.
867 # changeset that is an ancestor of self.changectx.
868 # This lets us later use _adjustlinkrev to get a correct link.
868 # This lets us later use _adjustlinkrev to get a correct link.
869 fctx._descendantrev = self.rev()
869 fctx._descendantrev = self.rev()
870 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
870 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
871 elif r'_descendantrev' in vars(self):
871 elif r'_descendantrev' in vars(self):
872 # Otherwise propagate _descendantrev if we have one associated.
872 # Otherwise propagate _descendantrev if we have one associated.
873 fctx._descendantrev = self._descendantrev
873 fctx._descendantrev = self._descendantrev
874 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
874 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
875 return fctx
875 return fctx
876
876
877 def parents(self):
877 def parents(self):
878 _path = self._path
878 _path = self._path
879 fl = self._filelog
879 fl = self._filelog
880 parents = self._filelog.parents(self._filenode)
880 parents = self._filelog.parents(self._filenode)
881 pl = [(_path, node, fl) for node in parents if node != nullid]
881 pl = [(_path, node, fl) for node in parents if node != nullid]
882
882
883 r = fl.renamed(self._filenode)
883 r = fl.renamed(self._filenode)
884 if r:
884 if r:
885 # - In the simple rename case, both parent are nullid, pl is empty.
885 # - In the simple rename case, both parent are nullid, pl is empty.
886 # - In case of merge, only one of the parent is null id and should
886 # - In case of merge, only one of the parent is null id and should
887 # be replaced with the rename information. This parent is -always-
887 # be replaced with the rename information. This parent is -always-
888 # the first one.
888 # the first one.
889 #
889 #
890 # As null id have always been filtered out in the previous list
890 # As null id have always been filtered out in the previous list
891 # comprehension, inserting to 0 will always result in "replacing
891 # comprehension, inserting to 0 will always result in "replacing
892 # first nullid parent with rename information.
892 # first nullid parent with rename information.
893 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
893 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
894
894
895 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
895 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
896
896
897 def p1(self):
897 def p1(self):
898 return self.parents()[0]
898 return self.parents()[0]
899
899
900 def p2(self):
900 def p2(self):
901 p = self.parents()
901 p = self.parents()
902 if len(p) == 2:
902 if len(p) == 2:
903 return p[1]
903 return p[1]
904 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
904 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
905
905
906 def annotate(self, follow=False, skiprevs=None, diffopts=None):
906 def annotate(self, follow=False, skiprevs=None, diffopts=None):
907 """Returns a list of annotateline objects for each line in the file
907 """Returns a list of annotateline objects for each line in the file
908
908
909 - line.fctx is the filectx of the node where that line was last changed
909 - line.fctx is the filectx of the node where that line was last changed
910 - line.lineno is the line number at the first appearance in the managed
910 - line.lineno is the line number at the first appearance in the managed
911 file
911 file
912 - line.text is the data on that line (including newline character)
912 - line.text is the data on that line (including newline character)
913 """
913 """
914 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
914 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
915
915
916 def parents(f):
916 def parents(f):
917 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
917 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
918 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
918 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
919 # from the topmost introrev (= srcrev) down to p.linkrev() if it
919 # from the topmost introrev (= srcrev) down to p.linkrev() if it
920 # isn't an ancestor of the srcrev.
920 # isn't an ancestor of the srcrev.
921 f._changeid
921 f._changeid
922 pl = f.parents()
922 pl = f.parents()
923
923
924 # Don't return renamed parents if we aren't following.
924 # Don't return renamed parents if we aren't following.
925 if not follow:
925 if not follow:
926 pl = [p for p in pl if p.path() == f.path()]
926 pl = [p for p in pl if p.path() == f.path()]
927
927
928 # renamed filectx won't have a filelog yet, so set it
928 # renamed filectx won't have a filelog yet, so set it
929 # from the cache to save time
929 # from the cache to save time
930 for p in pl:
930 for p in pl:
931 if not r'_filelog' in p.__dict__:
931 if not r'_filelog' in p.__dict__:
932 p._filelog = getlog(p.path())
932 p._filelog = getlog(p.path())
933
933
934 return pl
934 return pl
935
935
936 # use linkrev to find the first changeset where self appeared
936 # use linkrev to find the first changeset where self appeared
937 base = self.introfilectx()
937 base = self.introfilectx()
938 if getattr(base, '_ancestrycontext', None) is None:
938 if getattr(base, '_ancestrycontext', None) is None:
939 cl = self._repo.changelog
939 cl = self._repo.changelog
940 if base.rev() is None:
940 if base.rev() is None:
941 # wctx is not inclusive, but works because _ancestrycontext
941 # wctx is not inclusive, but works because _ancestrycontext
942 # is used to test filelog revisions
942 # is used to test filelog revisions
943 ac = cl.ancestors([p.rev() for p in base.parents()],
943 ac = cl.ancestors([p.rev() for p in base.parents()],
944 inclusive=True)
944 inclusive=True)
945 else:
945 else:
946 ac = cl.ancestors([base.rev()], inclusive=True)
946 ac = cl.ancestors([base.rev()], inclusive=True)
947 base._ancestrycontext = ac
947 base._ancestrycontext = ac
948
948
949 return dagop.annotate(base, parents, skiprevs=skiprevs,
949 return dagop.annotate(base, parents, skiprevs=skiprevs,
950 diffopts=diffopts)
950 diffopts=diffopts)
951
951
952 def ancestors(self, followfirst=False):
952 def ancestors(self, followfirst=False):
953 visit = {}
953 visit = {}
954 c = self
954 c = self
955 if followfirst:
955 if followfirst:
956 cut = 1
956 cut = 1
957 else:
957 else:
958 cut = None
958 cut = None
959
959
960 while True:
960 while True:
961 for parent in c.parents()[:cut]:
961 for parent in c.parents()[:cut]:
962 visit[(parent.linkrev(), parent.filenode())] = parent
962 visit[(parent.linkrev(), parent.filenode())] = parent
963 if not visit:
963 if not visit:
964 break
964 break
965 c = visit.pop(max(visit))
965 c = visit.pop(max(visit))
966 yield c
966 yield c
967
967
968 def decodeddata(self):
968 def decodeddata(self):
969 """Returns `data()` after running repository decoding filters.
969 """Returns `data()` after running repository decoding filters.
970
970
971 This is often equivalent to how the data would be expressed on disk.
971 This is often equivalent to how the data would be expressed on disk.
972 """
972 """
973 return self._repo.wwritedata(self.path(), self.data())
973 return self._repo.wwritedata(self.path(), self.data())
974
974
975 class filectx(basefilectx):
975 class filectx(basefilectx):
976 """A filecontext object makes access to data related to a particular
976 """A filecontext object makes access to data related to a particular
977 filerevision convenient."""
977 filerevision convenient."""
978 def __init__(self, repo, path, changeid=None, fileid=None,
978 def __init__(self, repo, path, changeid=None, fileid=None,
979 filelog=None, changectx=None):
979 filelog=None, changectx=None):
980 """changeid can be a changeset revision, node, or tag.
980 """changeid can be a changeset revision, node, or tag.
981 fileid can be a file revision or node."""
981 fileid can be a file revision or node."""
982 self._repo = repo
982 self._repo = repo
983 self._path = path
983 self._path = path
984
984
985 assert (changeid is not None
985 assert (changeid is not None
986 or fileid is not None
986 or fileid is not None
987 or changectx is not None), \
987 or changectx is not None), \
988 ("bad args: changeid=%r, fileid=%r, changectx=%r"
988 ("bad args: changeid=%r, fileid=%r, changectx=%r"
989 % (changeid, fileid, changectx))
989 % (changeid, fileid, changectx))
990
990
991 if filelog is not None:
991 if filelog is not None:
992 self._filelog = filelog
992 self._filelog = filelog
993
993
994 if changeid is not None:
994 if changeid is not None:
995 self._changeid = changeid
995 self._changeid = changeid
996 if changectx is not None:
996 if changectx is not None:
997 self._changectx = changectx
997 self._changectx = changectx
998 if fileid is not None:
998 if fileid is not None:
999 self._fileid = fileid
999 self._fileid = fileid
1000
1000
1001 @propertycache
1001 @propertycache
1002 def _changectx(self):
1002 def _changectx(self):
1003 try:
1003 try:
1004 return changectx(self._repo, self._changeid)
1004 return changectx(self._repo, self._changeid)
1005 except error.FilteredRepoLookupError:
1005 except error.FilteredRepoLookupError:
1006 # Linkrev may point to any revision in the repository. When the
1006 # Linkrev may point to any revision in the repository. When the
1007 # repository is filtered this may lead to `filectx` trying to build
1007 # repository is filtered this may lead to `filectx` trying to build
1008 # `changectx` for filtered revision. In such case we fallback to
1008 # `changectx` for filtered revision. In such case we fallback to
1009 # creating `changectx` on the unfiltered version of the reposition.
1009 # creating `changectx` on the unfiltered version of the reposition.
1010 # This fallback should not be an issue because `changectx` from
1010 # This fallback should not be an issue because `changectx` from
1011 # `filectx` are not used in complex operations that care about
1011 # `filectx` are not used in complex operations that care about
1012 # filtering.
1012 # filtering.
1013 #
1013 #
1014 # This fallback is a cheap and dirty fix that prevent several
1014 # This fallback is a cheap and dirty fix that prevent several
1015 # crashes. It does not ensure the behavior is correct. However the
1015 # crashes. It does not ensure the behavior is correct. However the
1016 # behavior was not correct before filtering either and "incorrect
1016 # behavior was not correct before filtering either and "incorrect
1017 # behavior" is seen as better as "crash"
1017 # behavior" is seen as better as "crash"
1018 #
1018 #
1019 # Linkrevs have several serious troubles with filtering that are
1019 # Linkrevs have several serious troubles with filtering that are
1020 # complicated to solve. Proper handling of the issue here should be
1020 # complicated to solve. Proper handling of the issue here should be
1021 # considered when solving linkrev issue are on the table.
1021 # considered when solving linkrev issue are on the table.
1022 return changectx(self._repo.unfiltered(), self._changeid)
1022 return changectx(self._repo.unfiltered(), self._changeid)
1023
1023
1024 def filectx(self, fileid, changeid=None):
1024 def filectx(self, fileid, changeid=None):
1025 '''opens an arbitrary revision of the file without
1025 '''opens an arbitrary revision of the file without
1026 opening a new filelog'''
1026 opening a new filelog'''
1027 return filectx(self._repo, self._path, fileid=fileid,
1027 return filectx(self._repo, self._path, fileid=fileid,
1028 filelog=self._filelog, changeid=changeid)
1028 filelog=self._filelog, changeid=changeid)
1029
1029
1030 def rawdata(self):
1030 def rawdata(self):
1031 return self._filelog.revision(self._filenode, raw=True)
1031 return self._filelog.revision(self._filenode, raw=True)
1032
1032
1033 def rawflags(self):
1033 def rawflags(self):
1034 """low-level revlog flags"""
1034 """low-level revlog flags"""
1035 return self._filelog.flags(self._filerev)
1035 return self._filelog.flags(self._filerev)
1036
1036
1037 def data(self):
1037 def data(self):
1038 try:
1038 try:
1039 return self._filelog.read(self._filenode)
1039 return self._filelog.read(self._filenode)
1040 except error.CensoredNodeError:
1040 except error.CensoredNodeError:
1041 if self._repo.ui.config("censor", "policy") == "ignore":
1041 if self._repo.ui.config("censor", "policy") == "ignore":
1042 return ""
1042 return ""
1043 raise error.Abort(_("censored node: %s") % short(self._filenode),
1043 raise error.Abort(_("censored node: %s") % short(self._filenode),
1044 hint=_("set censor.policy to ignore errors"))
1044 hint=_("set censor.policy to ignore errors"))
1045
1045
1046 def size(self):
1046 def size(self):
1047 return self._filelog.size(self._filerev)
1047 return self._filelog.size(self._filerev)
1048
1048
1049 @propertycache
1049 @propertycache
1050 def _copied(self):
1050 def _copied(self):
1051 """check if file was actually renamed in this changeset revision
1051 """check if file was actually renamed in this changeset revision
1052
1052
1053 If rename logged in file revision, we report copy for changeset only
1053 If rename logged in file revision, we report copy for changeset only
1054 if file revisions linkrev points back to the changeset in question
1054 if file revisions linkrev points back to the changeset in question
1055 or both changeset parents contain different file revisions.
1055 or both changeset parents contain different file revisions.
1056 """
1056 """
1057
1057
1058 renamed = self._filelog.renamed(self._filenode)
1058 renamed = self._filelog.renamed(self._filenode)
1059 if not renamed:
1059 if not renamed:
1060 return renamed
1060 return renamed
1061
1061
1062 if self.rev() == self.linkrev():
1062 if self.rev() == self.linkrev():
1063 return renamed
1063 return renamed
1064
1064
1065 name = self.path()
1065 name = self.path()
1066 fnode = self._filenode
1066 fnode = self._filenode
1067 for p in self._changectx.parents():
1067 for p in self._changectx.parents():
1068 try:
1068 try:
1069 if fnode == p.filenode(name):
1069 if fnode == p.filenode(name):
1070 return None
1070 return None
1071 except error.LookupError:
1071 except error.LookupError:
1072 pass
1072 pass
1073 return renamed
1073 return renamed
1074
1074
1075 def children(self):
1075 def children(self):
1076 # hard for renames
1076 # hard for renames
1077 c = self._filelog.children(self._filenode)
1077 c = self._filelog.children(self._filenode)
1078 return [filectx(self._repo, self._path, fileid=x,
1078 return [filectx(self._repo, self._path, fileid=x,
1079 filelog=self._filelog) for x in c]
1079 filelog=self._filelog) for x in c]
1080
1080
1081 class committablectx(basectx):
1081 class committablectx(basectx):
1082 """A committablectx object provides common functionality for a context that
1082 """A committablectx object provides common functionality for a context that
1083 wants the ability to commit, e.g. workingctx or memctx."""
1083 wants the ability to commit, e.g. workingctx or memctx."""
1084 def __init__(self, repo, text="", user=None, date=None, extra=None,
1084 def __init__(self, repo, text="", user=None, date=None, extra=None,
1085 changes=None):
1085 changes=None):
1086 super(committablectx, self).__init__(repo)
1086 super(committablectx, self).__init__(repo)
1087 self._rev = None
1087 self._rev = None
1088 self._node = None
1088 self._node = None
1089 self._text = text
1089 self._text = text
1090 if date:
1090 if date:
1091 self._date = dateutil.parsedate(date)
1091 self._date = dateutil.parsedate(date)
1092 if user:
1092 if user:
1093 self._user = user
1093 self._user = user
1094 if changes:
1094 if changes:
1095 self._status = changes
1095 self._status = changes
1096
1096
1097 self._extra = {}
1097 self._extra = {}
1098 if extra:
1098 if extra:
1099 self._extra = extra.copy()
1099 self._extra = extra.copy()
1100 if 'branch' not in self._extra:
1100 if 'branch' not in self._extra:
1101 try:
1101 try:
1102 branch = encoding.fromlocal(self._repo.dirstate.branch())
1102 branch = encoding.fromlocal(self._repo.dirstate.branch())
1103 except UnicodeDecodeError:
1103 except UnicodeDecodeError:
1104 raise error.Abort(_('branch name not in UTF-8!'))
1104 raise error.Abort(_('branch name not in UTF-8!'))
1105 self._extra['branch'] = branch
1105 self._extra['branch'] = branch
1106 if self._extra['branch'] == '':
1106 if self._extra['branch'] == '':
1107 self._extra['branch'] = 'default'
1107 self._extra['branch'] = 'default'
1108
1108
1109 def __bytes__(self):
1109 def __bytes__(self):
1110 return bytes(self._parents[0]) + "+"
1110 return bytes(self._parents[0]) + "+"
1111
1111
1112 __str__ = encoding.strmethod(__bytes__)
1112 __str__ = encoding.strmethod(__bytes__)
1113
1113
1114 def __nonzero__(self):
1114 def __nonzero__(self):
1115 return True
1115 return True
1116
1116
1117 __bool__ = __nonzero__
1117 __bool__ = __nonzero__
1118
1118
1119 def _buildflagfunc(self):
1119 def _buildflagfunc(self):
1120 # Create a fallback function for getting file flags when the
1120 # Create a fallback function for getting file flags when the
1121 # filesystem doesn't support them
1121 # filesystem doesn't support them
1122
1122
1123 copiesget = self._repo.dirstate.copies().get
1123 copiesget = self._repo.dirstate.copies().get
1124 parents = self.parents()
1124 parents = self.parents()
1125 if len(parents) < 2:
1125 if len(parents) < 2:
1126 # when we have one parent, it's easy: copy from parent
1126 # when we have one parent, it's easy: copy from parent
1127 man = parents[0].manifest()
1127 man = parents[0].manifest()
1128 def func(f):
1128 def func(f):
1129 f = copiesget(f, f)
1129 f = copiesget(f, f)
1130 return man.flags(f)
1130 return man.flags(f)
1131 else:
1131 else:
1132 # merges are tricky: we try to reconstruct the unstored
1132 # merges are tricky: we try to reconstruct the unstored
1133 # result from the merge (issue1802)
1133 # result from the merge (issue1802)
1134 p1, p2 = parents
1134 p1, p2 = parents
1135 pa = p1.ancestor(p2)
1135 pa = p1.ancestor(p2)
1136 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1136 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1137
1137
1138 def func(f):
1138 def func(f):
1139 f = copiesget(f, f) # may be wrong for merges with copies
1139 f = copiesget(f, f) # may be wrong for merges with copies
1140 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1140 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1141 if fl1 == fl2:
1141 if fl1 == fl2:
1142 return fl1
1142 return fl1
1143 if fl1 == fla:
1143 if fl1 == fla:
1144 return fl2
1144 return fl2
1145 if fl2 == fla:
1145 if fl2 == fla:
1146 return fl1
1146 return fl1
1147 return '' # punt for conflicts
1147 return '' # punt for conflicts
1148
1148
1149 return func
1149 return func
1150
1150
1151 @propertycache
1151 @propertycache
1152 def _flagfunc(self):
1152 def _flagfunc(self):
1153 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1153 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1154
1154
1155 @propertycache
1155 @propertycache
1156 def _status(self):
1156 def _status(self):
1157 return self._repo.status()
1157 return self._repo.status()
1158
1158
1159 @propertycache
1159 @propertycache
1160 def _user(self):
1160 def _user(self):
1161 return self._repo.ui.username()
1161 return self._repo.ui.username()
1162
1162
1163 @propertycache
1163 @propertycache
1164 def _date(self):
1164 def _date(self):
1165 ui = self._repo.ui
1165 ui = self._repo.ui
1166 date = ui.configdate('devel', 'default-date')
1166 date = ui.configdate('devel', 'default-date')
1167 if date is None:
1167 if date is None:
1168 date = dateutil.makedate()
1168 date = dateutil.makedate()
1169 return date
1169 return date
1170
1170
1171 def subrev(self, subpath):
1171 def subrev(self, subpath):
1172 return None
1172 return None
1173
1173
1174 def manifestnode(self):
1174 def manifestnode(self):
1175 return None
1175 return None
1176 def user(self):
1176 def user(self):
1177 return self._user or self._repo.ui.username()
1177 return self._user or self._repo.ui.username()
1178 def date(self):
1178 def date(self):
1179 return self._date
1179 return self._date
1180 def description(self):
1180 def description(self):
1181 return self._text
1181 return self._text
1182 def files(self):
1182 def files(self):
1183 return sorted(self._status.modified + self._status.added +
1183 return sorted(self._status.modified + self._status.added +
1184 self._status.removed)
1184 self._status.removed)
1185
1185
1186 def modified(self):
1186 def modified(self):
1187 return self._status.modified
1187 return self._status.modified
1188 def added(self):
1188 def added(self):
1189 return self._status.added
1189 return self._status.added
1190 def removed(self):
1190 def removed(self):
1191 return self._status.removed
1191 return self._status.removed
1192 def deleted(self):
1192 def deleted(self):
1193 return self._status.deleted
1193 return self._status.deleted
1194 def branch(self):
1194 def branch(self):
1195 return encoding.tolocal(self._extra['branch'])
1195 return encoding.tolocal(self._extra['branch'])
1196 def closesbranch(self):
1196 def closesbranch(self):
1197 return 'close' in self._extra
1197 return 'close' in self._extra
1198 def extra(self):
1198 def extra(self):
1199 return self._extra
1199 return self._extra
1200
1200
1201 def isinmemory(self):
1201 def isinmemory(self):
1202 return False
1202 return False
1203
1203
1204 def tags(self):
1204 def tags(self):
1205 return []
1205 return []
1206
1206
1207 def bookmarks(self):
1207 def bookmarks(self):
1208 b = []
1208 b = []
1209 for p in self.parents():
1209 for p in self.parents():
1210 b.extend(p.bookmarks())
1210 b.extend(p.bookmarks())
1211 return b
1211 return b
1212
1212
1213 def phase(self):
1213 def phase(self):
1214 phase = phases.draft # default phase to draft
1214 phase = phases.draft # default phase to draft
1215 for p in self.parents():
1215 for p in self.parents():
1216 phase = max(phase, p.phase())
1216 phase = max(phase, p.phase())
1217 return phase
1217 return phase
1218
1218
1219 def hidden(self):
1219 def hidden(self):
1220 return False
1220 return False
1221
1221
1222 def children(self):
1222 def children(self):
1223 return []
1223 return []
1224
1224
1225 def flags(self, path):
1225 def flags(self, path):
1226 if r'_manifest' in self.__dict__:
1226 if r'_manifest' in self.__dict__:
1227 try:
1227 try:
1228 return self._manifest.flags(path)
1228 return self._manifest.flags(path)
1229 except KeyError:
1229 except KeyError:
1230 return ''
1230 return ''
1231
1231
1232 try:
1232 try:
1233 return self._flagfunc(path)
1233 return self._flagfunc(path)
1234 except OSError:
1234 except OSError:
1235 return ''
1235 return ''
1236
1236
1237 def ancestor(self, c2):
1237 def ancestor(self, c2):
1238 """return the "best" ancestor context of self and c2"""
1238 """return the "best" ancestor context of self and c2"""
1239 return self._parents[0].ancestor(c2) # punt on two parents for now
1239 return self._parents[0].ancestor(c2) # punt on two parents for now
1240
1240
1241 def walk(self, match):
1241 def walk(self, match):
1242 '''Generates matching file names.'''
1242 '''Generates matching file names.'''
1243 return sorted(self._repo.dirstate.walk(match,
1243 return sorted(self._repo.dirstate.walk(match,
1244 subrepos=sorted(self.substate),
1244 subrepos=sorted(self.substate),
1245 unknown=True, ignored=False))
1245 unknown=True, ignored=False))
1246
1246
1247 def matches(self, match):
1247 def matches(self, match):
1248 ds = self._repo.dirstate
1248 ds = self._repo.dirstate
1249 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1249 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1250
1250
1251 def ancestors(self):
1251 def ancestors(self):
1252 for p in self._parents:
1252 for p in self._parents:
1253 yield p
1253 yield p
1254 for a in self._repo.changelog.ancestors(
1254 for a in self._repo.changelog.ancestors(
1255 [p.rev() for p in self._parents]):
1255 [p.rev() for p in self._parents]):
1256 yield changectx(self._repo, a)
1256 yield changectx(self._repo, a)
1257
1257
1258 def markcommitted(self, node):
1258 def markcommitted(self, node):
1259 """Perform post-commit cleanup necessary after committing this ctx
1259 """Perform post-commit cleanup necessary after committing this ctx
1260
1260
1261 Specifically, this updates backing stores this working context
1261 Specifically, this updates backing stores this working context
1262 wraps to reflect the fact that the changes reflected by this
1262 wraps to reflect the fact that the changes reflected by this
1263 workingctx have been committed. For example, it marks
1263 workingctx have been committed. For example, it marks
1264 modified and added files as normal in the dirstate.
1264 modified and added files as normal in the dirstate.
1265
1265
1266 """
1266 """
1267
1267
1268 with self._repo.dirstate.parentchange():
1268 with self._repo.dirstate.parentchange():
1269 for f in self.modified() + self.added():
1269 for f in self.modified() + self.added():
1270 self._repo.dirstate.normal(f)
1270 self._repo.dirstate.normal(f)
1271 for f in self.removed():
1271 for f in self.removed():
1272 self._repo.dirstate.drop(f)
1272 self._repo.dirstate.drop(f)
1273 self._repo.dirstate.setparents(node)
1273 self._repo.dirstate.setparents(node)
1274
1274
1275 # write changes out explicitly, because nesting wlock at
1275 # write changes out explicitly, because nesting wlock at
1276 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1276 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1277 # from immediately doing so for subsequent changing files
1277 # from immediately doing so for subsequent changing files
1278 self._repo.dirstate.write(self._repo.currenttransaction())
1278 self._repo.dirstate.write(self._repo.currenttransaction())
1279
1279
1280 def dirty(self, missing=False, merge=True, branch=True):
1280 def dirty(self, missing=False, merge=True, branch=True):
1281 return False
1281 return False
1282
1282
1283 class workingctx(committablectx):
1283 class workingctx(committablectx):
1284 """A workingctx object makes access to data related to
1284 """A workingctx object makes access to data related to
1285 the current working directory convenient.
1285 the current working directory convenient.
1286 date - any valid date string or (unixtime, offset), or None.
1286 date - any valid date string or (unixtime, offset), or None.
1287 user - username string, or None.
1287 user - username string, or None.
1288 extra - a dictionary of extra values, or None.
1288 extra - a dictionary of extra values, or None.
1289 changes - a list of file lists as returned by localrepo.status()
1289 changes - a list of file lists as returned by localrepo.status()
1290 or None to use the repository status.
1290 or None to use the repository status.
1291 """
1291 """
1292 def __init__(self, repo, text="", user=None, date=None, extra=None,
1292 def __init__(self, repo, text="", user=None, date=None, extra=None,
1293 changes=None):
1293 changes=None):
1294 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1294 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1295
1295
1296 def __iter__(self):
1296 def __iter__(self):
1297 d = self._repo.dirstate
1297 d = self._repo.dirstate
1298 for f in d:
1298 for f in d:
1299 if d[f] != 'r':
1299 if d[f] != 'r':
1300 yield f
1300 yield f
1301
1301
1302 def __contains__(self, key):
1302 def __contains__(self, key):
1303 return self._repo.dirstate[key] not in "?r"
1303 return self._repo.dirstate[key] not in "?r"
1304
1304
1305 def hex(self):
1305 def hex(self):
1306 return hex(wdirid)
1306 return hex(wdirid)
1307
1307
1308 @propertycache
1308 @propertycache
1309 def _parents(self):
1309 def _parents(self):
1310 p = self._repo.dirstate.parents()
1310 p = self._repo.dirstate.parents()
1311 if p[1] == nullid:
1311 if p[1] == nullid:
1312 p = p[:-1]
1312 p = p[:-1]
1313 return [changectx(self._repo, x) for x in p]
1313 return [changectx(self._repo, x) for x in p]
1314
1314
1315 def _fileinfo(self, path):
1315 def _fileinfo(self, path):
1316 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1316 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1317 self._manifest
1317 self._manifest
1318 return super(workingctx, self)._fileinfo(path)
1318 return super(workingctx, self)._fileinfo(path)
1319
1319
1320 def filectx(self, path, filelog=None):
1320 def filectx(self, path, filelog=None):
1321 """get a file context from the working directory"""
1321 """get a file context from the working directory"""
1322 return workingfilectx(self._repo, path, workingctx=self,
1322 return workingfilectx(self._repo, path, workingctx=self,
1323 filelog=filelog)
1323 filelog=filelog)
1324
1324
1325 def dirty(self, missing=False, merge=True, branch=True):
1325 def dirty(self, missing=False, merge=True, branch=True):
1326 "check whether a working directory is modified"
1326 "check whether a working directory is modified"
1327 # check subrepos first
1327 # check subrepos first
1328 for s in sorted(self.substate):
1328 for s in sorted(self.substate):
1329 if self.sub(s).dirty(missing=missing):
1329 if self.sub(s).dirty(missing=missing):
1330 return True
1330 return True
1331 # check current working dir
1331 # check current working dir
1332 return ((merge and self.p2()) or
1332 return ((merge and self.p2()) or
1333 (branch and self.branch() != self.p1().branch()) or
1333 (branch and self.branch() != self.p1().branch()) or
1334 self.modified() or self.added() or self.removed() or
1334 self.modified() or self.added() or self.removed() or
1335 (missing and self.deleted()))
1335 (missing and self.deleted()))
1336
1336
1337 def add(self, list, prefix=""):
1337 def add(self, list, prefix=""):
1338 with self._repo.wlock():
1338 with self._repo.wlock():
1339 ui, ds = self._repo.ui, self._repo.dirstate
1339 ui, ds = self._repo.ui, self._repo.dirstate
1340 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1340 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1341 rejected = []
1341 rejected = []
1342 lstat = self._repo.wvfs.lstat
1342 lstat = self._repo.wvfs.lstat
1343 for f in list:
1343 for f in list:
1344 # ds.pathto() returns an absolute file when this is invoked from
1344 # ds.pathto() returns an absolute file when this is invoked from
1345 # the keyword extension. That gets flagged as non-portable on
1345 # the keyword extension. That gets flagged as non-portable on
1346 # Windows, since it contains the drive letter and colon.
1346 # Windows, since it contains the drive letter and colon.
1347 scmutil.checkportable(ui, os.path.join(prefix, f))
1347 scmutil.checkportable(ui, os.path.join(prefix, f))
1348 try:
1348 try:
1349 st = lstat(f)
1349 st = lstat(f)
1350 except OSError:
1350 except OSError:
1351 ui.warn(_("%s does not exist!\n") % uipath(f))
1351 ui.warn(_("%s does not exist!\n") % uipath(f))
1352 rejected.append(f)
1352 rejected.append(f)
1353 continue
1353 continue
1354 limit = ui.configbytes('ui', 'large-file-limit')
1354 limit = ui.configbytes('ui', 'large-file-limit')
1355 if limit != 0 and st.st_size > limit:
1355 if limit != 0 and st.st_size > limit:
1356 ui.warn(_("%s: up to %d MB of RAM may be required "
1356 ui.warn(_("%s: up to %d MB of RAM may be required "
1357 "to manage this file\n"
1357 "to manage this file\n"
1358 "(use 'hg revert %s' to cancel the "
1358 "(use 'hg revert %s' to cancel the "
1359 "pending addition)\n")
1359 "pending addition)\n")
1360 % (f, 3 * st.st_size // 1000000, uipath(f)))
1360 % (f, 3 * st.st_size // 1000000, uipath(f)))
1361 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1361 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1362 ui.warn(_("%s not added: only files and symlinks "
1362 ui.warn(_("%s not added: only files and symlinks "
1363 "supported currently\n") % uipath(f))
1363 "supported currently\n") % uipath(f))
1364 rejected.append(f)
1364 rejected.append(f)
1365 elif ds[f] in 'amn':
1365 elif ds[f] in 'amn':
1366 ui.warn(_("%s already tracked!\n") % uipath(f))
1366 ui.warn(_("%s already tracked!\n") % uipath(f))
1367 elif ds[f] == 'r':
1367 elif ds[f] == 'r':
1368 ds.normallookup(f)
1368 ds.normallookup(f)
1369 else:
1369 else:
1370 ds.add(f)
1370 ds.add(f)
1371 return rejected
1371 return rejected
1372
1372
1373 def forget(self, files, prefix=""):
1373 def forget(self, files, prefix=""):
1374 with self._repo.wlock():
1374 with self._repo.wlock():
1375 ds = self._repo.dirstate
1375 ds = self._repo.dirstate
1376 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1376 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1377 rejected = []
1377 rejected = []
1378 for f in files:
1378 for f in files:
1379 if f not in self._repo.dirstate:
1379 if f not in self._repo.dirstate:
1380 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1380 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1381 rejected.append(f)
1381 rejected.append(f)
1382 elif self._repo.dirstate[f] != 'a':
1382 elif self._repo.dirstate[f] != 'a':
1383 self._repo.dirstate.remove(f)
1383 self._repo.dirstate.remove(f)
1384 else:
1384 else:
1385 self._repo.dirstate.drop(f)
1385 self._repo.dirstate.drop(f)
1386 return rejected
1386 return rejected
1387
1387
1388 def undelete(self, list):
1388 def undelete(self, list):
1389 pctxs = self.parents()
1389 pctxs = self.parents()
1390 with self._repo.wlock():
1390 with self._repo.wlock():
1391 ds = self._repo.dirstate
1391 ds = self._repo.dirstate
1392 for f in list:
1392 for f in list:
1393 if self._repo.dirstate[f] != 'r':
1393 if self._repo.dirstate[f] != 'r':
1394 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1394 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1395 else:
1395 else:
1396 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1396 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1397 t = fctx.data()
1397 t = fctx.data()
1398 self._repo.wwrite(f, t, fctx.flags())
1398 self._repo.wwrite(f, t, fctx.flags())
1399 self._repo.dirstate.normal(f)
1399 self._repo.dirstate.normal(f)
1400
1400
1401 def copy(self, source, dest):
1401 def copy(self, source, dest):
1402 try:
1402 try:
1403 st = self._repo.wvfs.lstat(dest)
1403 st = self._repo.wvfs.lstat(dest)
1404 except OSError as err:
1404 except OSError as err:
1405 if err.errno != errno.ENOENT:
1405 if err.errno != errno.ENOENT:
1406 raise
1406 raise
1407 self._repo.ui.warn(_("%s does not exist!\n")
1407 self._repo.ui.warn(_("%s does not exist!\n")
1408 % self._repo.dirstate.pathto(dest))
1408 % self._repo.dirstate.pathto(dest))
1409 return
1409 return
1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1411 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1411 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1412 "symbolic link\n")
1412 "symbolic link\n")
1413 % self._repo.dirstate.pathto(dest))
1413 % self._repo.dirstate.pathto(dest))
1414 else:
1414 else:
1415 with self._repo.wlock():
1415 with self._repo.wlock():
1416 if self._repo.dirstate[dest] in '?':
1416 if self._repo.dirstate[dest] in '?':
1417 self._repo.dirstate.add(dest)
1417 self._repo.dirstate.add(dest)
1418 elif self._repo.dirstate[dest] in 'r':
1418 elif self._repo.dirstate[dest] in 'r':
1419 self._repo.dirstate.normallookup(dest)
1419 self._repo.dirstate.normallookup(dest)
1420 self._repo.dirstate.copy(source, dest)
1420 self._repo.dirstate.copy(source, dest)
1421
1421
1422 def match(self, pats=None, include=None, exclude=None, default='glob',
1422 def match(self, pats=None, include=None, exclude=None, default='glob',
1423 listsubrepos=False, badfn=None):
1423 listsubrepos=False, badfn=None):
1424 r = self._repo
1424 r = self._repo
1425
1425
1426 # Only a case insensitive filesystem needs magic to translate user input
1426 # Only a case insensitive filesystem needs magic to translate user input
1427 # to actual case in the filesystem.
1427 # to actual case in the filesystem.
1428 icasefs = not util.fscasesensitive(r.root)
1428 icasefs = not util.fscasesensitive(r.root)
1429 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1429 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1430 default, auditor=r.auditor, ctx=self,
1430 default, auditor=r.auditor, ctx=self,
1431 listsubrepos=listsubrepos, badfn=badfn,
1431 listsubrepos=listsubrepos, badfn=badfn,
1432 icasefs=icasefs)
1432 icasefs=icasefs)
1433
1433
1434 def _filtersuspectsymlink(self, files):
1434 def _filtersuspectsymlink(self, files):
1435 if not files or self._repo.dirstate._checklink:
1435 if not files or self._repo.dirstate._checklink:
1436 return files
1436 return files
1437
1437
1438 # Symlink placeholders may get non-symlink-like contents
1438 # Symlink placeholders may get non-symlink-like contents
1439 # via user error or dereferencing by NFS or Samba servers,
1439 # via user error or dereferencing by NFS or Samba servers,
1440 # so we filter out any placeholders that don't look like a
1440 # so we filter out any placeholders that don't look like a
1441 # symlink
1441 # symlink
1442 sane = []
1442 sane = []
1443 for f in files:
1443 for f in files:
1444 if self.flags(f) == 'l':
1444 if self.flags(f) == 'l':
1445 d = self[f].data()
1445 d = self[f].data()
1446 if (d == '' or len(d) >= 1024 or '\n' in d
1446 if (d == '' or len(d) >= 1024 or '\n' in d
1447 or stringutil.binary(d)):
1447 or stringutil.binary(d)):
1448 self._repo.ui.debug('ignoring suspect symlink placeholder'
1448 self._repo.ui.debug('ignoring suspect symlink placeholder'
1449 ' "%s"\n' % f)
1449 ' "%s"\n' % f)
1450 continue
1450 continue
1451 sane.append(f)
1451 sane.append(f)
1452 return sane
1452 return sane
1453
1453
1454 def _checklookup(self, files):
1454 def _checklookup(self, files):
1455 # check for any possibly clean files
1455 # check for any possibly clean files
1456 if not files:
1456 if not files:
1457 return [], [], []
1457 return [], [], []
1458
1458
1459 modified = []
1459 modified = []
1460 deleted = []
1460 deleted = []
1461 fixup = []
1461 fixup = []
1462 pctx = self._parents[0]
1462 pctx = self._parents[0]
1463 # do a full compare of any files that might have changed
1463 # do a full compare of any files that might have changed
1464 for f in sorted(files):
1464 for f in sorted(files):
1465 try:
1465 try:
1466 # This will return True for a file that got replaced by a
1466 # This will return True for a file that got replaced by a
1467 # directory in the interim, but fixing that is pretty hard.
1467 # directory in the interim, but fixing that is pretty hard.
1468 if (f not in pctx or self.flags(f) != pctx.flags(f)
1468 if (f not in pctx or self.flags(f) != pctx.flags(f)
1469 or pctx[f].cmp(self[f])):
1469 or pctx[f].cmp(self[f])):
1470 modified.append(f)
1470 modified.append(f)
1471 else:
1471 else:
1472 fixup.append(f)
1472 fixup.append(f)
1473 except (IOError, OSError):
1473 except (IOError, OSError):
1474 # A file become inaccessible in between? Mark it as deleted,
1474 # A file become inaccessible in between? Mark it as deleted,
1475 # matching dirstate behavior (issue5584).
1475 # matching dirstate behavior (issue5584).
1476 # The dirstate has more complex behavior around whether a
1476 # The dirstate has more complex behavior around whether a
1477 # missing file matches a directory, etc, but we don't need to
1477 # missing file matches a directory, etc, but we don't need to
1478 # bother with that: if f has made it to this point, we're sure
1478 # bother with that: if f has made it to this point, we're sure
1479 # it's in the dirstate.
1479 # it's in the dirstate.
1480 deleted.append(f)
1480 deleted.append(f)
1481
1481
1482 return modified, deleted, fixup
1482 return modified, deleted, fixup
1483
1483
1484 def _poststatusfixup(self, status, fixup):
1484 def _poststatusfixup(self, status, fixup):
1485 """update dirstate for files that are actually clean"""
1485 """update dirstate for files that are actually clean"""
1486 poststatus = self._repo.postdsstatus()
1486 poststatus = self._repo.postdsstatus()
1487 if fixup or poststatus:
1487 if fixup or poststatus:
1488 try:
1488 try:
1489 oldid = self._repo.dirstate.identity()
1489 oldid = self._repo.dirstate.identity()
1490
1490
1491 # updating the dirstate is optional
1491 # updating the dirstate is optional
1492 # so we don't wait on the lock
1492 # so we don't wait on the lock
1493 # wlock can invalidate the dirstate, so cache normal _after_
1493 # wlock can invalidate the dirstate, so cache normal _after_
1494 # taking the lock
1494 # taking the lock
1495 with self._repo.wlock(False):
1495 with self._repo.wlock(False):
1496 if self._repo.dirstate.identity() == oldid:
1496 if self._repo.dirstate.identity() == oldid:
1497 if fixup:
1497 if fixup:
1498 normal = self._repo.dirstate.normal
1498 normal = self._repo.dirstate.normal
1499 for f in fixup:
1499 for f in fixup:
1500 normal(f)
1500 normal(f)
1501 # write changes out explicitly, because nesting
1501 # write changes out explicitly, because nesting
1502 # wlock at runtime may prevent 'wlock.release()'
1502 # wlock at runtime may prevent 'wlock.release()'
1503 # after this block from doing so for subsequent
1503 # after this block from doing so for subsequent
1504 # changing files
1504 # changing files
1505 tr = self._repo.currenttransaction()
1505 tr = self._repo.currenttransaction()
1506 self._repo.dirstate.write(tr)
1506 self._repo.dirstate.write(tr)
1507
1507
1508 if poststatus:
1508 if poststatus:
1509 for ps in poststatus:
1509 for ps in poststatus:
1510 ps(self, status)
1510 ps(self, status)
1511 else:
1511 else:
1512 # in this case, writing changes out breaks
1512 # in this case, writing changes out breaks
1513 # consistency, because .hg/dirstate was
1513 # consistency, because .hg/dirstate was
1514 # already changed simultaneously after last
1514 # already changed simultaneously after last
1515 # caching (see also issue5584 for detail)
1515 # caching (see also issue5584 for detail)
1516 self._repo.ui.debug('skip updating dirstate: '
1516 self._repo.ui.debug('skip updating dirstate: '
1517 'identity mismatch\n')
1517 'identity mismatch\n')
1518 except error.LockError:
1518 except error.LockError:
1519 pass
1519 pass
1520 finally:
1520 finally:
1521 # Even if the wlock couldn't be grabbed, clear out the list.
1521 # Even if the wlock couldn't be grabbed, clear out the list.
1522 self._repo.clearpostdsstatus()
1522 self._repo.clearpostdsstatus()
1523
1523
1524 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1524 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1525 '''Gets the status from the dirstate -- internal use only.'''
1525 '''Gets the status from the dirstate -- internal use only.'''
1526 subrepos = []
1526 subrepos = []
1527 if '.hgsub' in self:
1527 if '.hgsub' in self:
1528 subrepos = sorted(self.substate)
1528 subrepos = sorted(self.substate)
1529 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1529 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1530 clean=clean, unknown=unknown)
1530 clean=clean, unknown=unknown)
1531
1531
1532 # check for any possibly clean files
1532 # check for any possibly clean files
1533 fixup = []
1533 fixup = []
1534 if cmp:
1534 if cmp:
1535 modified2, deleted2, fixup = self._checklookup(cmp)
1535 modified2, deleted2, fixup = self._checklookup(cmp)
1536 s.modified.extend(modified2)
1536 s.modified.extend(modified2)
1537 s.deleted.extend(deleted2)
1537 s.deleted.extend(deleted2)
1538
1538
1539 if fixup and clean:
1539 if fixup and clean:
1540 s.clean.extend(fixup)
1540 s.clean.extend(fixup)
1541
1541
1542 self._poststatusfixup(s, fixup)
1542 self._poststatusfixup(s, fixup)
1543
1543
1544 if match.always():
1544 if match.always():
1545 # cache for performance
1545 # cache for performance
1546 if s.unknown or s.ignored or s.clean:
1546 if s.unknown or s.ignored or s.clean:
1547 # "_status" is cached with list*=False in the normal route
1547 # "_status" is cached with list*=False in the normal route
1548 self._status = scmutil.status(s.modified, s.added, s.removed,
1548 self._status = scmutil.status(s.modified, s.added, s.removed,
1549 s.deleted, [], [], [])
1549 s.deleted, [], [], [])
1550 else:
1550 else:
1551 self._status = s
1551 self._status = s
1552
1552
1553 return s
1553 return s
1554
1554
1555 @propertycache
1555 @propertycache
1556 def _manifest(self):
1556 def _manifest(self):
1557 """generate a manifest corresponding to the values in self._status
1557 """generate a manifest corresponding to the values in self._status
1558
1558
1559 This reuse the file nodeid from parent, but we use special node
1559 This reuse the file nodeid from parent, but we use special node
1560 identifiers for added and modified files. This is used by manifests
1560 identifiers for added and modified files. This is used by manifests
1561 merge to see that files are different and by update logic to avoid
1561 merge to see that files are different and by update logic to avoid
1562 deleting newly added files.
1562 deleting newly added files.
1563 """
1563 """
1564 return self._buildstatusmanifest(self._status)
1564 return self._buildstatusmanifest(self._status)
1565
1565
1566 def _buildstatusmanifest(self, status):
1566 def _buildstatusmanifest(self, status):
1567 """Builds a manifest that includes the given status results."""
1567 """Builds a manifest that includes the given status results."""
1568 parents = self.parents()
1568 parents = self.parents()
1569
1569
1570 man = parents[0].manifest().copy()
1570 man = parents[0].manifest().copy()
1571
1571
1572 ff = self._flagfunc
1572 ff = self._flagfunc
1573 for i, l in ((addednodeid, status.added),
1573 for i, l in ((addednodeid, status.added),
1574 (modifiednodeid, status.modified)):
1574 (modifiednodeid, status.modified)):
1575 for f in l:
1575 for f in l:
1576 man[f] = i
1576 man[f] = i
1577 try:
1577 try:
1578 man.setflag(f, ff(f))
1578 man.setflag(f, ff(f))
1579 except OSError:
1579 except OSError:
1580 pass
1580 pass
1581
1581
1582 for f in status.deleted + status.removed:
1582 for f in status.deleted + status.removed:
1583 if f in man:
1583 if f in man:
1584 del man[f]
1584 del man[f]
1585
1585
1586 return man
1586 return man
1587
1587
1588 def _buildstatus(self, other, s, match, listignored, listclean,
1588 def _buildstatus(self, other, s, match, listignored, listclean,
1589 listunknown):
1589 listunknown):
1590 """build a status with respect to another context
1590 """build a status with respect to another context
1591
1591
1592 This includes logic for maintaining the fast path of status when
1592 This includes logic for maintaining the fast path of status when
1593 comparing the working directory against its parent, which is to skip
1593 comparing the working directory against its parent, which is to skip
1594 building a new manifest if self (working directory) is not comparing
1594 building a new manifest if self (working directory) is not comparing
1595 against its parent (repo['.']).
1595 against its parent (repo['.']).
1596 """
1596 """
1597 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1597 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1598 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1598 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1599 # might have accidentally ended up with the entire contents of the file
1599 # might have accidentally ended up with the entire contents of the file
1600 # they are supposed to be linking to.
1600 # they are supposed to be linking to.
1601 s.modified[:] = self._filtersuspectsymlink(s.modified)
1601 s.modified[:] = self._filtersuspectsymlink(s.modified)
1602 if other != self._repo['.']:
1602 if other != self._repo['.']:
1603 s = super(workingctx, self)._buildstatus(other, s, match,
1603 s = super(workingctx, self)._buildstatus(other, s, match,
1604 listignored, listclean,
1604 listignored, listclean,
1605 listunknown)
1605 listunknown)
1606 return s
1606 return s
1607
1607
1608 def _matchstatus(self, other, match):
1608 def _matchstatus(self, other, match):
1609 """override the match method with a filter for directory patterns
1609 """override the match method with a filter for directory patterns
1610
1610
1611 We use inheritance to customize the match.bad method only in cases of
1611 We use inheritance to customize the match.bad method only in cases of
1612 workingctx since it belongs only to the working directory when
1612 workingctx since it belongs only to the working directory when
1613 comparing against the parent changeset.
1613 comparing against the parent changeset.
1614
1614
1615 If we aren't comparing against the working directory's parent, then we
1615 If we aren't comparing against the working directory's parent, then we
1616 just use the default match object sent to us.
1616 just use the default match object sent to us.
1617 """
1617 """
1618 if other != self._repo['.']:
1618 if other != self._repo['.']:
1619 def bad(f, msg):
1619 def bad(f, msg):
1620 # 'f' may be a directory pattern from 'match.files()',
1620 # 'f' may be a directory pattern from 'match.files()',
1621 # so 'f not in ctx1' is not enough
1621 # so 'f not in ctx1' is not enough
1622 if f not in other and not other.hasdir(f):
1622 if f not in other and not other.hasdir(f):
1623 self._repo.ui.warn('%s: %s\n' %
1623 self._repo.ui.warn('%s: %s\n' %
1624 (self._repo.dirstate.pathto(f), msg))
1624 (self._repo.dirstate.pathto(f), msg))
1625 match.bad = bad
1625 match.bad = bad
1626 return match
1626 return match
1627
1627
1628 def markcommitted(self, node):
1628 def markcommitted(self, node):
1629 super(workingctx, self).markcommitted(node)
1629 super(workingctx, self).markcommitted(node)
1630
1630
1631 sparse.aftercommit(self._repo, node)
1631 sparse.aftercommit(self._repo, node)
1632
1632
1633 class committablefilectx(basefilectx):
1633 class committablefilectx(basefilectx):
1634 """A committablefilectx provides common functionality for a file context
1634 """A committablefilectx provides common functionality for a file context
1635 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1635 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1636 def __init__(self, repo, path, filelog=None, ctx=None):
1636 def __init__(self, repo, path, filelog=None, ctx=None):
1637 self._repo = repo
1637 self._repo = repo
1638 self._path = path
1638 self._path = path
1639 self._changeid = None
1639 self._changeid = None
1640 self._filerev = self._filenode = None
1640 self._filerev = self._filenode = None
1641
1641
1642 if filelog is not None:
1642 if filelog is not None:
1643 self._filelog = filelog
1643 self._filelog = filelog
1644 if ctx:
1644 if ctx:
1645 self._changectx = ctx
1645 self._changectx = ctx
1646
1646
1647 def __nonzero__(self):
1647 def __nonzero__(self):
1648 return True
1648 return True
1649
1649
1650 __bool__ = __nonzero__
1650 __bool__ = __nonzero__
1651
1651
1652 def linkrev(self):
1652 def linkrev(self):
1653 # linked to self._changectx no matter if file is modified or not
1653 # linked to self._changectx no matter if file is modified or not
1654 return self.rev()
1654 return self.rev()
1655
1655
1656 def parents(self):
1656 def parents(self):
1657 '''return parent filectxs, following copies if necessary'''
1657 '''return parent filectxs, following copies if necessary'''
1658 def filenode(ctx, path):
1658 def filenode(ctx, path):
1659 return ctx._manifest.get(path, nullid)
1659 return ctx._manifest.get(path, nullid)
1660
1660
1661 path = self._path
1661 path = self._path
1662 fl = self._filelog
1662 fl = self._filelog
1663 pcl = self._changectx._parents
1663 pcl = self._changectx._parents
1664 renamed = self.renamed()
1664 renamed = self.renamed()
1665
1665
1666 if renamed:
1666 if renamed:
1667 pl = [renamed + (None,)]
1667 pl = [renamed + (None,)]
1668 else:
1668 else:
1669 pl = [(path, filenode(pcl[0], path), fl)]
1669 pl = [(path, filenode(pcl[0], path), fl)]
1670
1670
1671 for pc in pcl[1:]:
1671 for pc in pcl[1:]:
1672 pl.append((path, filenode(pc, path), fl))
1672 pl.append((path, filenode(pc, path), fl))
1673
1673
1674 return [self._parentfilectx(p, fileid=n, filelog=l)
1674 return [self._parentfilectx(p, fileid=n, filelog=l)
1675 for p, n, l in pl if n != nullid]
1675 for p, n, l in pl if n != nullid]
1676
1676
1677 def children(self):
1677 def children(self):
1678 return []
1678 return []
1679
1679
1680 class workingfilectx(committablefilectx):
1680 class workingfilectx(committablefilectx):
1681 """A workingfilectx object makes access to data related to a particular
1681 """A workingfilectx object makes access to data related to a particular
1682 file in the working directory convenient."""
1682 file in the working directory convenient."""
1683 def __init__(self, repo, path, filelog=None, workingctx=None):
1683 def __init__(self, repo, path, filelog=None, workingctx=None):
1684 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1684 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1685
1685
1686 @propertycache
1686 @propertycache
1687 def _changectx(self):
1687 def _changectx(self):
1688 return workingctx(self._repo)
1688 return workingctx(self._repo)
1689
1689
1690 def data(self):
1690 def data(self):
1691 return self._repo.wread(self._path)
1691 return self._repo.wread(self._path)
1692 def renamed(self):
1692 def renamed(self):
1693 rp = self._repo.dirstate.copied(self._path)
1693 rp = self._repo.dirstate.copied(self._path)
1694 if not rp:
1694 if not rp:
1695 return None
1695 return None
1696 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1696 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1697
1697
1698 def size(self):
1698 def size(self):
1699 return self._repo.wvfs.lstat(self._path).st_size
1699 return self._repo.wvfs.lstat(self._path).st_size
1700 def date(self):
1700 def date(self):
1701 t, tz = self._changectx.date()
1701 t, tz = self._changectx.date()
1702 try:
1702 try:
1703 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1703 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1704 except OSError as err:
1704 except OSError as err:
1705 if err.errno != errno.ENOENT:
1705 if err.errno != errno.ENOENT:
1706 raise
1706 raise
1707 return (t, tz)
1707 return (t, tz)
1708
1708
1709 def exists(self):
1709 def exists(self):
1710 return self._repo.wvfs.exists(self._path)
1710 return self._repo.wvfs.exists(self._path)
1711
1711
1712 def lexists(self):
1712 def lexists(self):
1713 return self._repo.wvfs.lexists(self._path)
1713 return self._repo.wvfs.lexists(self._path)
1714
1714
1715 def audit(self):
1715 def audit(self):
1716 return self._repo.wvfs.audit(self._path)
1716 return self._repo.wvfs.audit(self._path)
1717
1717
1718 def cmp(self, fctx):
1718 def cmp(self, fctx):
1719 """compare with other file context
1719 """compare with other file context
1720
1720
1721 returns True if different than fctx.
1721 returns True if different than fctx.
1722 """
1722 """
1723 # fctx should be a filectx (not a workingfilectx)
1723 # fctx should be a filectx (not a workingfilectx)
1724 # invert comparison to reuse the same code path
1724 # invert comparison to reuse the same code path
1725 return fctx.cmp(self)
1725 return fctx.cmp(self)
1726
1726
1727 def remove(self, ignoremissing=False):
1727 def remove(self, ignoremissing=False):
1728 """wraps unlink for a repo's working directory"""
1728 """wraps unlink for a repo's working directory"""
1729 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1729 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1730 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1730 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1731 rmdir=rmdir)
1731 rmdir=rmdir)
1732
1732
1733 def write(self, data, flags, backgroundclose=False, **kwargs):
1733 def write(self, data, flags, backgroundclose=False, **kwargs):
1734 """wraps repo.wwrite"""
1734 """wraps repo.wwrite"""
1735 self._repo.wwrite(self._path, data, flags,
1735 self._repo.wwrite(self._path, data, flags,
1736 backgroundclose=backgroundclose,
1736 backgroundclose=backgroundclose,
1737 **kwargs)
1737 **kwargs)
1738
1738
1739 def markcopied(self, src):
1739 def markcopied(self, src):
1740 """marks this file a copy of `src`"""
1740 """marks this file a copy of `src`"""
1741 if self._repo.dirstate[self._path] in "nma":
1741 if self._repo.dirstate[self._path] in "nma":
1742 self._repo.dirstate.copy(src, self._path)
1742 self._repo.dirstate.copy(src, self._path)
1743
1743
1744 def clearunknown(self):
1744 def clearunknown(self):
1745 """Removes conflicting items in the working directory so that
1745 """Removes conflicting items in the working directory so that
1746 ``write()`` can be called successfully.
1746 ``write()`` can be called successfully.
1747 """
1747 """
1748 wvfs = self._repo.wvfs
1748 wvfs = self._repo.wvfs
1749 f = self._path
1749 f = self._path
1750 wvfs.audit(f)
1750 wvfs.audit(f)
1751 if wvfs.isdir(f) and not wvfs.islink(f):
1751 if wvfs.isdir(f) and not wvfs.islink(f):
1752 wvfs.rmtree(f, forcibly=True)
1752 wvfs.rmtree(f, forcibly=True)
1753 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1753 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1754 for p in reversed(list(util.finddirs(f))):
1754 for p in reversed(list(util.finddirs(f))):
1755 if wvfs.isfileorlink(p):
1755 if wvfs.isfileorlink(p):
1756 wvfs.unlink(p)
1756 wvfs.unlink(p)
1757 break
1757 break
1758
1758
1759 def setflags(self, l, x):
1759 def setflags(self, l, x):
1760 self._repo.wvfs.setflags(self._path, l, x)
1760 self._repo.wvfs.setflags(self._path, l, x)
1761
1761
1762 class overlayworkingctx(committablectx):
1762 class overlayworkingctx(committablectx):
1763 """Wraps another mutable context with a write-back cache that can be
1763 """Wraps another mutable context with a write-back cache that can be
1764 converted into a commit context.
1764 converted into a commit context.
1765
1765
1766 self._cache[path] maps to a dict with keys: {
1766 self._cache[path] maps to a dict with keys: {
1767 'exists': bool?
1767 'exists': bool?
1768 'date': date?
1768 'date': date?
1769 'data': str?
1769 'data': str?
1770 'flags': str?
1770 'flags': str?
1771 'copied': str? (path or None)
1771 'copied': str? (path or None)
1772 }
1772 }
1773 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1773 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1774 is `False`, the file was deleted.
1774 is `False`, the file was deleted.
1775 """
1775 """
1776
1776
1777 def __init__(self, repo):
1777 def __init__(self, repo):
1778 super(overlayworkingctx, self).__init__(repo)
1778 super(overlayworkingctx, self).__init__(repo)
1779 self.clean()
1779 self.clean()
1780
1780
1781 def setbase(self, wrappedctx):
1781 def setbase(self, wrappedctx):
1782 self._wrappedctx = wrappedctx
1782 self._wrappedctx = wrappedctx
1783 self._parents = [wrappedctx]
1783 self._parents = [wrappedctx]
1784 # Drop old manifest cache as it is now out of date.
1784 # Drop old manifest cache as it is now out of date.
1785 # This is necessary when, e.g., rebasing several nodes with one
1785 # This is necessary when, e.g., rebasing several nodes with one
1786 # ``overlayworkingctx`` (e.g. with --collapse).
1786 # ``overlayworkingctx`` (e.g. with --collapse).
1787 util.clearcachedproperty(self, '_manifest')
1787 util.clearcachedproperty(self, '_manifest')
1788
1788
1789 def data(self, path):
1789 def data(self, path):
1790 if self.isdirty(path):
1790 if self.isdirty(path):
1791 if self._cache[path]['exists']:
1791 if self._cache[path]['exists']:
1792 if self._cache[path]['data']:
1792 if self._cache[path]['data']:
1793 return self._cache[path]['data']
1793 return self._cache[path]['data']
1794 else:
1794 else:
1795 # Must fallback here, too, because we only set flags.
1795 # Must fallback here, too, because we only set flags.
1796 return self._wrappedctx[path].data()
1796 return self._wrappedctx[path].data()
1797 else:
1797 else:
1798 raise error.ProgrammingError("No such file or directory: %s" %
1798 raise error.ProgrammingError("No such file or directory: %s" %
1799 path)
1799 path)
1800 else:
1800 else:
1801 return self._wrappedctx[path].data()
1801 return self._wrappedctx[path].data()
1802
1802
1803 @propertycache
1803 @propertycache
1804 def _manifest(self):
1804 def _manifest(self):
1805 parents = self.parents()
1805 parents = self.parents()
1806 man = parents[0].manifest().copy()
1806 man = parents[0].manifest().copy()
1807
1807
1808 flag = self._flagfunc
1808 flag = self._flagfunc
1809 for path in self.added():
1809 for path in self.added():
1810 man[path] = addednodeid
1810 man[path] = addednodeid
1811 man.setflag(path, flag(path))
1811 man.setflag(path, flag(path))
1812 for path in self.modified():
1812 for path in self.modified():
1813 man[path] = modifiednodeid
1813 man[path] = modifiednodeid
1814 man.setflag(path, flag(path))
1814 man.setflag(path, flag(path))
1815 for path in self.removed():
1815 for path in self.removed():
1816 del man[path]
1816 del man[path]
1817 return man
1817 return man
1818
1818
1819 @propertycache
1819 @propertycache
1820 def _flagfunc(self):
1820 def _flagfunc(self):
1821 def f(path):
1821 def f(path):
1822 return self._cache[path]['flags']
1822 return self._cache[path]['flags']
1823 return f
1823 return f
1824
1824
1825 def files(self):
1825 def files(self):
1826 return sorted(self.added() + self.modified() + self.removed())
1826 return sorted(self.added() + self.modified() + self.removed())
1827
1827
1828 def modified(self):
1828 def modified(self):
1829 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1829 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 self._existsinparent(f)]
1830 self._existsinparent(f)]
1831
1831
1832 def added(self):
1832 def added(self):
1833 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1833 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1834 not self._existsinparent(f)]
1834 not self._existsinparent(f)]
1835
1835
1836 def removed(self):
1836 def removed(self):
1837 return [f for f in self._cache.keys() if
1837 return [f for f in self._cache.keys() if
1838 not self._cache[f]['exists'] and self._existsinparent(f)]
1838 not self._cache[f]['exists'] and self._existsinparent(f)]
1839
1839
1840 def isinmemory(self):
1840 def isinmemory(self):
1841 return True
1841 return True
1842
1842
1843 def filedate(self, path):
1843 def filedate(self, path):
1844 if self.isdirty(path):
1844 if self.isdirty(path):
1845 return self._cache[path]['date']
1845 return self._cache[path]['date']
1846 else:
1846 else:
1847 return self._wrappedctx[path].date()
1847 return self._wrappedctx[path].date()
1848
1848
1849 def markcopied(self, path, origin):
1849 def markcopied(self, path, origin):
1850 if self.isdirty(path):
1850 if self.isdirty(path):
1851 self._cache[path]['copied'] = origin
1851 self._cache[path]['copied'] = origin
1852 else:
1852 else:
1853 raise error.ProgrammingError('markcopied() called on clean context')
1853 raise error.ProgrammingError('markcopied() called on clean context')
1854
1854
1855 def copydata(self, path):
1855 def copydata(self, path):
1856 if self.isdirty(path):
1856 if self.isdirty(path):
1857 return self._cache[path]['copied']
1857 return self._cache[path]['copied']
1858 else:
1858 else:
1859 raise error.ProgrammingError('copydata() called on clean context')
1859 raise error.ProgrammingError('copydata() called on clean context')
1860
1860
1861 def flags(self, path):
1861 def flags(self, path):
1862 if self.isdirty(path):
1862 if self.isdirty(path):
1863 if self._cache[path]['exists']:
1863 if self._cache[path]['exists']:
1864 return self._cache[path]['flags']
1864 return self._cache[path]['flags']
1865 else:
1865 else:
1866 raise error.ProgrammingError("No such file or directory: %s" %
1866 raise error.ProgrammingError("No such file or directory: %s" %
1867 self._path)
1867 self._path)
1868 else:
1868 else:
1869 return self._wrappedctx[path].flags()
1869 return self._wrappedctx[path].flags()
1870
1870
1871 def _existsinparent(self, path):
1871 def _existsinparent(self, path):
1872 try:
1872 try:
1873 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1873 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1874 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1874 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1875 # with an ``exists()`` function.
1875 # with an ``exists()`` function.
1876 self._wrappedctx[path]
1876 self._wrappedctx[path]
1877 return True
1877 return True
1878 except error.ManifestLookupError:
1878 except error.ManifestLookupError:
1879 return False
1879 return False
1880
1880
1881 def _auditconflicts(self, path):
1881 def _auditconflicts(self, path):
1882 """Replicates conflict checks done by wvfs.write().
1882 """Replicates conflict checks done by wvfs.write().
1883
1883
1884 Since we never write to the filesystem and never call `applyupdates` in
1884 Since we never write to the filesystem and never call `applyupdates` in
1885 IMM, we'll never check that a path is actually writable -- e.g., because
1885 IMM, we'll never check that a path is actually writable -- e.g., because
1886 it adds `a/foo`, but `a` is actually a file in the other commit.
1886 it adds `a/foo`, but `a` is actually a file in the other commit.
1887 """
1887 """
1888 def fail(path, component):
1888 def fail(path, component):
1889 # p1() is the base and we're receiving "writes" for p2()'s
1889 # p1() is the base and we're receiving "writes" for p2()'s
1890 # files.
1890 # files.
1891 if 'l' in self.p1()[component].flags():
1891 if 'l' in self.p1()[component].flags():
1892 raise error.Abort("error: %s conflicts with symlink %s "
1892 raise error.Abort("error: %s conflicts with symlink %s "
1893 "in %s." % (path, component,
1893 "in %s." % (path, component,
1894 self.p1().rev()))
1894 self.p1().rev()))
1895 else:
1895 else:
1896 raise error.Abort("error: '%s' conflicts with file '%s' in "
1896 raise error.Abort("error: '%s' conflicts with file '%s' in "
1897 "%s." % (path, component,
1897 "%s." % (path, component,
1898 self.p1().rev()))
1898 self.p1().rev()))
1899
1899
1900 # Test that each new directory to be created to write this path from p2
1900 # Test that each new directory to be created to write this path from p2
1901 # is not a file in p1.
1901 # is not a file in p1.
1902 components = path.split('/')
1902 components = path.split('/')
1903 for i in pycompat.xrange(len(components)):
1903 for i in pycompat.xrange(len(components)):
1904 component = "/".join(components[0:i])
1904 component = "/".join(components[0:i])
1905 if component in self.p1():
1905 if component in self.p1():
1906 fail(path, component)
1906 fail(path, component)
1907
1907
1908 # Test the other direction -- that this path from p2 isn't a directory
1908 # Test the other direction -- that this path from p2 isn't a directory
1909 # in p1 (test that p1 doesn't any paths matching `path/*`).
1909 # in p1 (test that p1 doesn't any paths matching `path/*`).
1910 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1910 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1911 matches = self.p1().manifest().matches(match)
1911 matches = self.p1().manifest().matches(match)
1912 if len(matches) > 0:
1912 if len(matches) > 0:
1913 if len(matches) == 1 and matches.keys()[0] == path:
1913 if len(matches) == 1 and matches.keys()[0] == path:
1914 return
1914 return
1915 raise error.Abort("error: file '%s' cannot be written because "
1915 raise error.Abort("error: file '%s' cannot be written because "
1916 " '%s/' is a folder in %s (containing %d "
1916 " '%s/' is a folder in %s (containing %d "
1917 "entries: %s)"
1917 "entries: %s)"
1918 % (path, path, self.p1(), len(matches),
1918 % (path, path, self.p1(), len(matches),
1919 ', '.join(matches.keys())))
1919 ', '.join(matches.keys())))
1920
1920
1921 def write(self, path, data, flags='', **kwargs):
1921 def write(self, path, data, flags='', **kwargs):
1922 if data is None:
1922 if data is None:
1923 raise error.ProgrammingError("data must be non-None")
1923 raise error.ProgrammingError("data must be non-None")
1924 self._auditconflicts(path)
1924 self._auditconflicts(path)
1925 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1925 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1926 flags=flags)
1926 flags=flags)
1927
1927
1928 def setflags(self, path, l, x):
1928 def setflags(self, path, l, x):
1929 flag = ''
1930 if l:
1931 flag = 'l'
1932 elif x:
1933 flag = 'x'
1929 self._markdirty(path, exists=True, date=dateutil.makedate(),
1934 self._markdirty(path, exists=True, date=dateutil.makedate(),
1930 flags=(l and 'l' or '') + (x and 'x' or ''))
1935 flags=flag)
1931
1936
1932 def remove(self, path):
1937 def remove(self, path):
1933 self._markdirty(path, exists=False)
1938 self._markdirty(path, exists=False)
1934
1939
1935 def exists(self, path):
1940 def exists(self, path):
1936 """exists behaves like `lexists`, but needs to follow symlinks and
1941 """exists behaves like `lexists`, but needs to follow symlinks and
1937 return False if they are broken.
1942 return False if they are broken.
1938 """
1943 """
1939 if self.isdirty(path):
1944 if self.isdirty(path):
1940 # If this path exists and is a symlink, "follow" it by calling
1945 # If this path exists and is a symlink, "follow" it by calling
1941 # exists on the destination path.
1946 # exists on the destination path.
1942 if (self._cache[path]['exists'] and
1947 if (self._cache[path]['exists'] and
1943 'l' in self._cache[path]['flags']):
1948 'l' in self._cache[path]['flags']):
1944 return self.exists(self._cache[path]['data'].strip())
1949 return self.exists(self._cache[path]['data'].strip())
1945 else:
1950 else:
1946 return self._cache[path]['exists']
1951 return self._cache[path]['exists']
1947
1952
1948 return self._existsinparent(path)
1953 return self._existsinparent(path)
1949
1954
1950 def lexists(self, path):
1955 def lexists(self, path):
1951 """lexists returns True if the path exists"""
1956 """lexists returns True if the path exists"""
1952 if self.isdirty(path):
1957 if self.isdirty(path):
1953 return self._cache[path]['exists']
1958 return self._cache[path]['exists']
1954
1959
1955 return self._existsinparent(path)
1960 return self._existsinparent(path)
1956
1961
1957 def size(self, path):
1962 def size(self, path):
1958 if self.isdirty(path):
1963 if self.isdirty(path):
1959 if self._cache[path]['exists']:
1964 if self._cache[path]['exists']:
1960 return len(self._cache[path]['data'])
1965 return len(self._cache[path]['data'])
1961 else:
1966 else:
1962 raise error.ProgrammingError("No such file or directory: %s" %
1967 raise error.ProgrammingError("No such file or directory: %s" %
1963 self._path)
1968 self._path)
1964 return self._wrappedctx[path].size()
1969 return self._wrappedctx[path].size()
1965
1970
1966 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1971 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1967 user=None, editor=None):
1972 user=None, editor=None):
1968 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1973 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1969 committed.
1974 committed.
1970
1975
1971 ``text`` is the commit message.
1976 ``text`` is the commit message.
1972 ``parents`` (optional) are rev numbers.
1977 ``parents`` (optional) are rev numbers.
1973 """
1978 """
1974 # Default parents to the wrapped contexts' if not passed.
1979 # Default parents to the wrapped contexts' if not passed.
1975 if parents is None:
1980 if parents is None:
1976 parents = self._wrappedctx.parents()
1981 parents = self._wrappedctx.parents()
1977 if len(parents) == 1:
1982 if len(parents) == 1:
1978 parents = (parents[0], None)
1983 parents = (parents[0], None)
1979
1984
1980 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1985 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1981 if parents[1] is None:
1986 if parents[1] is None:
1982 parents = (self._repo[parents[0]], None)
1987 parents = (self._repo[parents[0]], None)
1983 else:
1988 else:
1984 parents = (self._repo[parents[0]], self._repo[parents[1]])
1989 parents = (self._repo[parents[0]], self._repo[parents[1]])
1985
1990
1986 files = self._cache.keys()
1991 files = self._cache.keys()
1987 def getfile(repo, memctx, path):
1992 def getfile(repo, memctx, path):
1988 if self._cache[path]['exists']:
1993 if self._cache[path]['exists']:
1989 return memfilectx(repo, memctx, path,
1994 return memfilectx(repo, memctx, path,
1990 self._cache[path]['data'],
1995 self._cache[path]['data'],
1991 'l' in self._cache[path]['flags'],
1996 'l' in self._cache[path]['flags'],
1992 'x' in self._cache[path]['flags'],
1997 'x' in self._cache[path]['flags'],
1993 self._cache[path]['copied'])
1998 self._cache[path]['copied'])
1994 else:
1999 else:
1995 # Returning None, but including the path in `files`, is
2000 # Returning None, but including the path in `files`, is
1996 # necessary for memctx to register a deletion.
2001 # necessary for memctx to register a deletion.
1997 return None
2002 return None
1998 return memctx(self._repo, parents, text, files, getfile, date=date,
2003 return memctx(self._repo, parents, text, files, getfile, date=date,
1999 extra=extra, user=user, branch=branch, editor=editor)
2004 extra=extra, user=user, branch=branch, editor=editor)
2000
2005
2001 def isdirty(self, path):
2006 def isdirty(self, path):
2002 return path in self._cache
2007 return path in self._cache
2003
2008
2004 def isempty(self):
2009 def isempty(self):
2005 # We need to discard any keys that are actually clean before the empty
2010 # We need to discard any keys that are actually clean before the empty
2006 # commit check.
2011 # commit check.
2007 self._compact()
2012 self._compact()
2008 return len(self._cache) == 0
2013 return len(self._cache) == 0
2009
2014
2010 def clean(self):
2015 def clean(self):
2011 self._cache = {}
2016 self._cache = {}
2012
2017
2013 def _compact(self):
2018 def _compact(self):
2014 """Removes keys from the cache that are actually clean, by comparing
2019 """Removes keys from the cache that are actually clean, by comparing
2015 them with the underlying context.
2020 them with the underlying context.
2016
2021
2017 This can occur during the merge process, e.g. by passing --tool :local
2022 This can occur during the merge process, e.g. by passing --tool :local
2018 to resolve a conflict.
2023 to resolve a conflict.
2019 """
2024 """
2020 keys = []
2025 keys = []
2021 for path in self._cache.keys():
2026 for path in self._cache.keys():
2022 cache = self._cache[path]
2027 cache = self._cache[path]
2023 try:
2028 try:
2024 underlying = self._wrappedctx[path]
2029 underlying = self._wrappedctx[path]
2025 if (underlying.data() == cache['data'] and
2030 if (underlying.data() == cache['data'] and
2026 underlying.flags() == cache['flags']):
2031 underlying.flags() == cache['flags']):
2027 keys.append(path)
2032 keys.append(path)
2028 except error.ManifestLookupError:
2033 except error.ManifestLookupError:
2029 # Path not in the underlying manifest (created).
2034 # Path not in the underlying manifest (created).
2030 continue
2035 continue
2031
2036
2032 for path in keys:
2037 for path in keys:
2033 del self._cache[path]
2038 del self._cache[path]
2034 return keys
2039 return keys
2035
2040
2036 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2041 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2037 self._cache[path] = {
2042 self._cache[path] = {
2038 'exists': exists,
2043 'exists': exists,
2039 'data': data,
2044 'data': data,
2040 'date': date,
2045 'date': date,
2041 'flags': flags,
2046 'flags': flags,
2042 'copied': None,
2047 'copied': None,
2043 }
2048 }
2044
2049
2045 def filectx(self, path, filelog=None):
2050 def filectx(self, path, filelog=None):
2046 return overlayworkingfilectx(self._repo, path, parent=self,
2051 return overlayworkingfilectx(self._repo, path, parent=self,
2047 filelog=filelog)
2052 filelog=filelog)
2048
2053
2049 class overlayworkingfilectx(committablefilectx):
2054 class overlayworkingfilectx(committablefilectx):
2050 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2055 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2051 cache, which can be flushed through later by calling ``flush()``."""
2056 cache, which can be flushed through later by calling ``flush()``."""
2052
2057
2053 def __init__(self, repo, path, filelog=None, parent=None):
2058 def __init__(self, repo, path, filelog=None, parent=None):
2054 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2059 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2055 parent)
2060 parent)
2056 self._repo = repo
2061 self._repo = repo
2057 self._parent = parent
2062 self._parent = parent
2058 self._path = path
2063 self._path = path
2059
2064
2060 def cmp(self, fctx):
2065 def cmp(self, fctx):
2061 return self.data() != fctx.data()
2066 return self.data() != fctx.data()
2062
2067
2063 def changectx(self):
2068 def changectx(self):
2064 return self._parent
2069 return self._parent
2065
2070
2066 def data(self):
2071 def data(self):
2067 return self._parent.data(self._path)
2072 return self._parent.data(self._path)
2068
2073
2069 def date(self):
2074 def date(self):
2070 return self._parent.filedate(self._path)
2075 return self._parent.filedate(self._path)
2071
2076
2072 def exists(self):
2077 def exists(self):
2073 return self.lexists()
2078 return self.lexists()
2074
2079
2075 def lexists(self):
2080 def lexists(self):
2076 return self._parent.exists(self._path)
2081 return self._parent.exists(self._path)
2077
2082
2078 def renamed(self):
2083 def renamed(self):
2079 path = self._parent.copydata(self._path)
2084 path = self._parent.copydata(self._path)
2080 if not path:
2085 if not path:
2081 return None
2086 return None
2082 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2087 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2083
2088
2084 def size(self):
2089 def size(self):
2085 return self._parent.size(self._path)
2090 return self._parent.size(self._path)
2086
2091
2087 def markcopied(self, origin):
2092 def markcopied(self, origin):
2088 self._parent.markcopied(self._path, origin)
2093 self._parent.markcopied(self._path, origin)
2089
2094
2090 def audit(self):
2095 def audit(self):
2091 pass
2096 pass
2092
2097
2093 def flags(self):
2098 def flags(self):
2094 return self._parent.flags(self._path)
2099 return self._parent.flags(self._path)
2095
2100
2096 def setflags(self, islink, isexec):
2101 def setflags(self, islink, isexec):
2097 return self._parent.setflags(self._path, islink, isexec)
2102 return self._parent.setflags(self._path, islink, isexec)
2098
2103
2099 def write(self, data, flags, backgroundclose=False, **kwargs):
2104 def write(self, data, flags, backgroundclose=False, **kwargs):
2100 return self._parent.write(self._path, data, flags, **kwargs)
2105 return self._parent.write(self._path, data, flags, **kwargs)
2101
2106
2102 def remove(self, ignoremissing=False):
2107 def remove(self, ignoremissing=False):
2103 return self._parent.remove(self._path)
2108 return self._parent.remove(self._path)
2104
2109
2105 def clearunknown(self):
2110 def clearunknown(self):
2106 pass
2111 pass
2107
2112
2108 class workingcommitctx(workingctx):
2113 class workingcommitctx(workingctx):
2109 """A workingcommitctx object makes access to data related to
2114 """A workingcommitctx object makes access to data related to
2110 the revision being committed convenient.
2115 the revision being committed convenient.
2111
2116
2112 This hides changes in the working directory, if they aren't
2117 This hides changes in the working directory, if they aren't
2113 committed in this context.
2118 committed in this context.
2114 """
2119 """
2115 def __init__(self, repo, changes,
2120 def __init__(self, repo, changes,
2116 text="", user=None, date=None, extra=None):
2121 text="", user=None, date=None, extra=None):
2117 super(workingctx, self).__init__(repo, text, user, date, extra,
2122 super(workingctx, self).__init__(repo, text, user, date, extra,
2118 changes)
2123 changes)
2119
2124
2120 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2125 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2121 """Return matched files only in ``self._status``
2126 """Return matched files only in ``self._status``
2122
2127
2123 Uncommitted files appear "clean" via this context, even if
2128 Uncommitted files appear "clean" via this context, even if
2124 they aren't actually so in the working directory.
2129 they aren't actually so in the working directory.
2125 """
2130 """
2126 if clean:
2131 if clean:
2127 clean = [f for f in self._manifest if f not in self._changedset]
2132 clean = [f for f in self._manifest if f not in self._changedset]
2128 else:
2133 else:
2129 clean = []
2134 clean = []
2130 return scmutil.status([f for f in self._status.modified if match(f)],
2135 return scmutil.status([f for f in self._status.modified if match(f)],
2131 [f for f in self._status.added if match(f)],
2136 [f for f in self._status.added if match(f)],
2132 [f for f in self._status.removed if match(f)],
2137 [f for f in self._status.removed if match(f)],
2133 [], [], [], clean)
2138 [], [], [], clean)
2134
2139
2135 @propertycache
2140 @propertycache
2136 def _changedset(self):
2141 def _changedset(self):
2137 """Return the set of files changed in this context
2142 """Return the set of files changed in this context
2138 """
2143 """
2139 changed = set(self._status.modified)
2144 changed = set(self._status.modified)
2140 changed.update(self._status.added)
2145 changed.update(self._status.added)
2141 changed.update(self._status.removed)
2146 changed.update(self._status.removed)
2142 return changed
2147 return changed
2143
2148
2144 def makecachingfilectxfn(func):
2149 def makecachingfilectxfn(func):
2145 """Create a filectxfn that caches based on the path.
2150 """Create a filectxfn that caches based on the path.
2146
2151
2147 We can't use util.cachefunc because it uses all arguments as the cache
2152 We can't use util.cachefunc because it uses all arguments as the cache
2148 key and this creates a cycle since the arguments include the repo and
2153 key and this creates a cycle since the arguments include the repo and
2149 memctx.
2154 memctx.
2150 """
2155 """
2151 cache = {}
2156 cache = {}
2152
2157
2153 def getfilectx(repo, memctx, path):
2158 def getfilectx(repo, memctx, path):
2154 if path not in cache:
2159 if path not in cache:
2155 cache[path] = func(repo, memctx, path)
2160 cache[path] = func(repo, memctx, path)
2156 return cache[path]
2161 return cache[path]
2157
2162
2158 return getfilectx
2163 return getfilectx
2159
2164
2160 def memfilefromctx(ctx):
2165 def memfilefromctx(ctx):
2161 """Given a context return a memfilectx for ctx[path]
2166 """Given a context return a memfilectx for ctx[path]
2162
2167
2163 This is a convenience method for building a memctx based on another
2168 This is a convenience method for building a memctx based on another
2164 context.
2169 context.
2165 """
2170 """
2166 def getfilectx(repo, memctx, path):
2171 def getfilectx(repo, memctx, path):
2167 fctx = ctx[path]
2172 fctx = ctx[path]
2168 # this is weird but apparently we only keep track of one parent
2173 # this is weird but apparently we only keep track of one parent
2169 # (why not only store that instead of a tuple?)
2174 # (why not only store that instead of a tuple?)
2170 copied = fctx.renamed()
2175 copied = fctx.renamed()
2171 if copied:
2176 if copied:
2172 copied = copied[0]
2177 copied = copied[0]
2173 return memfilectx(repo, memctx, path, fctx.data(),
2178 return memfilectx(repo, memctx, path, fctx.data(),
2174 islink=fctx.islink(), isexec=fctx.isexec(),
2179 islink=fctx.islink(), isexec=fctx.isexec(),
2175 copied=copied)
2180 copied=copied)
2176
2181
2177 return getfilectx
2182 return getfilectx
2178
2183
2179 def memfilefrompatch(patchstore):
2184 def memfilefrompatch(patchstore):
2180 """Given a patch (e.g. patchstore object) return a memfilectx
2185 """Given a patch (e.g. patchstore object) return a memfilectx
2181
2186
2182 This is a convenience method for building a memctx based on a patchstore.
2187 This is a convenience method for building a memctx based on a patchstore.
2183 """
2188 """
2184 def getfilectx(repo, memctx, path):
2189 def getfilectx(repo, memctx, path):
2185 data, mode, copied = patchstore.getfile(path)
2190 data, mode, copied = patchstore.getfile(path)
2186 if data is None:
2191 if data is None:
2187 return None
2192 return None
2188 islink, isexec = mode
2193 islink, isexec = mode
2189 return memfilectx(repo, memctx, path, data, islink=islink,
2194 return memfilectx(repo, memctx, path, data, islink=islink,
2190 isexec=isexec, copied=copied)
2195 isexec=isexec, copied=copied)
2191
2196
2192 return getfilectx
2197 return getfilectx
2193
2198
2194 class memctx(committablectx):
2199 class memctx(committablectx):
2195 """Use memctx to perform in-memory commits via localrepo.commitctx().
2200 """Use memctx to perform in-memory commits via localrepo.commitctx().
2196
2201
2197 Revision information is supplied at initialization time while
2202 Revision information is supplied at initialization time while
2198 related files data and is made available through a callback
2203 related files data and is made available through a callback
2199 mechanism. 'repo' is the current localrepo, 'parents' is a
2204 mechanism. 'repo' is the current localrepo, 'parents' is a
2200 sequence of two parent revisions identifiers (pass None for every
2205 sequence of two parent revisions identifiers (pass None for every
2201 missing parent), 'text' is the commit message and 'files' lists
2206 missing parent), 'text' is the commit message and 'files' lists
2202 names of files touched by the revision (normalized and relative to
2207 names of files touched by the revision (normalized and relative to
2203 repository root).
2208 repository root).
2204
2209
2205 filectxfn(repo, memctx, path) is a callable receiving the
2210 filectxfn(repo, memctx, path) is a callable receiving the
2206 repository, the current memctx object and the normalized path of
2211 repository, the current memctx object and the normalized path of
2207 requested file, relative to repository root. It is fired by the
2212 requested file, relative to repository root. It is fired by the
2208 commit function for every file in 'files', but calls order is
2213 commit function for every file in 'files', but calls order is
2209 undefined. If the file is available in the revision being
2214 undefined. If the file is available in the revision being
2210 committed (updated or added), filectxfn returns a memfilectx
2215 committed (updated or added), filectxfn returns a memfilectx
2211 object. If the file was removed, filectxfn return None for recent
2216 object. If the file was removed, filectxfn return None for recent
2212 Mercurial. Moved files are represented by marking the source file
2217 Mercurial. Moved files are represented by marking the source file
2213 removed and the new file added with copy information (see
2218 removed and the new file added with copy information (see
2214 memfilectx).
2219 memfilectx).
2215
2220
2216 user receives the committer name and defaults to current
2221 user receives the committer name and defaults to current
2217 repository username, date is the commit date in any format
2222 repository username, date is the commit date in any format
2218 supported by dateutil.parsedate() and defaults to current date, extra
2223 supported by dateutil.parsedate() and defaults to current date, extra
2219 is a dictionary of metadata or is left empty.
2224 is a dictionary of metadata or is left empty.
2220 """
2225 """
2221
2226
2222 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2227 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2223 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2228 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2224 # this field to determine what to do in filectxfn.
2229 # this field to determine what to do in filectxfn.
2225 _returnnoneformissingfiles = True
2230 _returnnoneformissingfiles = True
2226
2231
2227 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2232 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2228 date=None, extra=None, branch=None, editor=False):
2233 date=None, extra=None, branch=None, editor=False):
2229 super(memctx, self).__init__(repo, text, user, date, extra)
2234 super(memctx, self).__init__(repo, text, user, date, extra)
2230 self._rev = None
2235 self._rev = None
2231 self._node = None
2236 self._node = None
2232 parents = [(p or nullid) for p in parents]
2237 parents = [(p or nullid) for p in parents]
2233 p1, p2 = parents
2238 p1, p2 = parents
2234 self._parents = [self._repo[p] for p in (p1, p2)]
2239 self._parents = [self._repo[p] for p in (p1, p2)]
2235 files = sorted(set(files))
2240 files = sorted(set(files))
2236 self._files = files
2241 self._files = files
2237 if branch is not None:
2242 if branch is not None:
2238 self._extra['branch'] = encoding.fromlocal(branch)
2243 self._extra['branch'] = encoding.fromlocal(branch)
2239 self.substate = {}
2244 self.substate = {}
2240
2245
2241 if isinstance(filectxfn, patch.filestore):
2246 if isinstance(filectxfn, patch.filestore):
2242 filectxfn = memfilefrompatch(filectxfn)
2247 filectxfn = memfilefrompatch(filectxfn)
2243 elif not callable(filectxfn):
2248 elif not callable(filectxfn):
2244 # if store is not callable, wrap it in a function
2249 # if store is not callable, wrap it in a function
2245 filectxfn = memfilefromctx(filectxfn)
2250 filectxfn = memfilefromctx(filectxfn)
2246
2251
2247 # memoizing increases performance for e.g. vcs convert scenarios.
2252 # memoizing increases performance for e.g. vcs convert scenarios.
2248 self._filectxfn = makecachingfilectxfn(filectxfn)
2253 self._filectxfn = makecachingfilectxfn(filectxfn)
2249
2254
2250 if editor:
2255 if editor:
2251 self._text = editor(self._repo, self, [])
2256 self._text = editor(self._repo, self, [])
2252 self._repo.savecommitmessage(self._text)
2257 self._repo.savecommitmessage(self._text)
2253
2258
2254 def filectx(self, path, filelog=None):
2259 def filectx(self, path, filelog=None):
2255 """get a file context from the working directory
2260 """get a file context from the working directory
2256
2261
2257 Returns None if file doesn't exist and should be removed."""
2262 Returns None if file doesn't exist and should be removed."""
2258 return self._filectxfn(self._repo, self, path)
2263 return self._filectxfn(self._repo, self, path)
2259
2264
2260 def commit(self):
2265 def commit(self):
2261 """commit context to the repo"""
2266 """commit context to the repo"""
2262 return self._repo.commitctx(self)
2267 return self._repo.commitctx(self)
2263
2268
2264 @propertycache
2269 @propertycache
2265 def _manifest(self):
2270 def _manifest(self):
2266 """generate a manifest based on the return values of filectxfn"""
2271 """generate a manifest based on the return values of filectxfn"""
2267
2272
2268 # keep this simple for now; just worry about p1
2273 # keep this simple for now; just worry about p1
2269 pctx = self._parents[0]
2274 pctx = self._parents[0]
2270 man = pctx.manifest().copy()
2275 man = pctx.manifest().copy()
2271
2276
2272 for f in self._status.modified:
2277 for f in self._status.modified:
2273 p1node = nullid
2278 p1node = nullid
2274 p2node = nullid
2279 p2node = nullid
2275 p = pctx[f].parents() # if file isn't in pctx, check p2?
2280 p = pctx[f].parents() # if file isn't in pctx, check p2?
2276 if len(p) > 0:
2281 if len(p) > 0:
2277 p1node = p[0].filenode()
2282 p1node = p[0].filenode()
2278 if len(p) > 1:
2283 if len(p) > 1:
2279 p2node = p[1].filenode()
2284 p2node = p[1].filenode()
2280 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2285 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2281
2286
2282 for f in self._status.added:
2287 for f in self._status.added:
2283 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2288 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2284
2289
2285 for f in self._status.removed:
2290 for f in self._status.removed:
2286 if f in man:
2291 if f in man:
2287 del man[f]
2292 del man[f]
2288
2293
2289 return man
2294 return man
2290
2295
2291 @propertycache
2296 @propertycache
2292 def _status(self):
2297 def _status(self):
2293 """Calculate exact status from ``files`` specified at construction
2298 """Calculate exact status from ``files`` specified at construction
2294 """
2299 """
2295 man1 = self.p1().manifest()
2300 man1 = self.p1().manifest()
2296 p2 = self._parents[1]
2301 p2 = self._parents[1]
2297 # "1 < len(self._parents)" can't be used for checking
2302 # "1 < len(self._parents)" can't be used for checking
2298 # existence of the 2nd parent, because "memctx._parents" is
2303 # existence of the 2nd parent, because "memctx._parents" is
2299 # explicitly initialized by the list, of which length is 2.
2304 # explicitly initialized by the list, of which length is 2.
2300 if p2.node() != nullid:
2305 if p2.node() != nullid:
2301 man2 = p2.manifest()
2306 man2 = p2.manifest()
2302 managing = lambda f: f in man1 or f in man2
2307 managing = lambda f: f in man1 or f in man2
2303 else:
2308 else:
2304 managing = lambda f: f in man1
2309 managing = lambda f: f in man1
2305
2310
2306 modified, added, removed = [], [], []
2311 modified, added, removed = [], [], []
2307 for f in self._files:
2312 for f in self._files:
2308 if not managing(f):
2313 if not managing(f):
2309 added.append(f)
2314 added.append(f)
2310 elif self[f]:
2315 elif self[f]:
2311 modified.append(f)
2316 modified.append(f)
2312 else:
2317 else:
2313 removed.append(f)
2318 removed.append(f)
2314
2319
2315 return scmutil.status(modified, added, removed, [], [], [], [])
2320 return scmutil.status(modified, added, removed, [], [], [], [])
2316
2321
2317 class memfilectx(committablefilectx):
2322 class memfilectx(committablefilectx):
2318 """memfilectx represents an in-memory file to commit.
2323 """memfilectx represents an in-memory file to commit.
2319
2324
2320 See memctx and committablefilectx for more details.
2325 See memctx and committablefilectx for more details.
2321 """
2326 """
2322 def __init__(self, repo, changectx, path, data, islink=False,
2327 def __init__(self, repo, changectx, path, data, islink=False,
2323 isexec=False, copied=None):
2328 isexec=False, copied=None):
2324 """
2329 """
2325 path is the normalized file path relative to repository root.
2330 path is the normalized file path relative to repository root.
2326 data is the file content as a string.
2331 data is the file content as a string.
2327 islink is True if the file is a symbolic link.
2332 islink is True if the file is a symbolic link.
2328 isexec is True if the file is executable.
2333 isexec is True if the file is executable.
2329 copied is the source file path if current file was copied in the
2334 copied is the source file path if current file was copied in the
2330 revision being committed, or None."""
2335 revision being committed, or None."""
2331 super(memfilectx, self).__init__(repo, path, None, changectx)
2336 super(memfilectx, self).__init__(repo, path, None, changectx)
2332 self._data = data
2337 self._data = data
2333 if islink:
2338 if islink:
2334 self._flags = 'l'
2339 self._flags = 'l'
2335 elif isexec:
2340 elif isexec:
2336 self._flags = 'x'
2341 self._flags = 'x'
2337 else:
2342 else:
2338 self._flags = ''
2343 self._flags = ''
2339 self._copied = None
2344 self._copied = None
2340 if copied:
2345 if copied:
2341 self._copied = (copied, nullid)
2346 self._copied = (copied, nullid)
2342
2347
2343 def data(self):
2348 def data(self):
2344 return self._data
2349 return self._data
2345
2350
2346 def remove(self, ignoremissing=False):
2351 def remove(self, ignoremissing=False):
2347 """wraps unlink for a repo's working directory"""
2352 """wraps unlink for a repo's working directory"""
2348 # need to figure out what to do here
2353 # need to figure out what to do here
2349 del self._changectx[self._path]
2354 del self._changectx[self._path]
2350
2355
2351 def write(self, data, flags, **kwargs):
2356 def write(self, data, flags, **kwargs):
2352 """wraps repo.wwrite"""
2357 """wraps repo.wwrite"""
2353 self._data = data
2358 self._data = data
2354
2359
2355 class overlayfilectx(committablefilectx):
2360 class overlayfilectx(committablefilectx):
2356 """Like memfilectx but take an original filectx and optional parameters to
2361 """Like memfilectx but take an original filectx and optional parameters to
2357 override parts of it. This is useful when fctx.data() is expensive (i.e.
2362 override parts of it. This is useful when fctx.data() is expensive (i.e.
2358 flag processor is expensive) and raw data, flags, and filenode could be
2363 flag processor is expensive) and raw data, flags, and filenode could be
2359 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2364 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2360 """
2365 """
2361
2366
2362 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2367 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2363 copied=None, ctx=None):
2368 copied=None, ctx=None):
2364 """originalfctx: filecontext to duplicate
2369 """originalfctx: filecontext to duplicate
2365
2370
2366 datafunc: None or a function to override data (file content). It is a
2371 datafunc: None or a function to override data (file content). It is a
2367 function to be lazy. path, flags, copied, ctx: None or overridden value
2372 function to be lazy. path, flags, copied, ctx: None or overridden value
2368
2373
2369 copied could be (path, rev), or False. copied could also be just path,
2374 copied could be (path, rev), or False. copied could also be just path,
2370 and will be converted to (path, nullid). This simplifies some callers.
2375 and will be converted to (path, nullid). This simplifies some callers.
2371 """
2376 """
2372
2377
2373 if path is None:
2378 if path is None:
2374 path = originalfctx.path()
2379 path = originalfctx.path()
2375 if ctx is None:
2380 if ctx is None:
2376 ctx = originalfctx.changectx()
2381 ctx = originalfctx.changectx()
2377 ctxmatch = lambda: True
2382 ctxmatch = lambda: True
2378 else:
2383 else:
2379 ctxmatch = lambda: ctx == originalfctx.changectx()
2384 ctxmatch = lambda: ctx == originalfctx.changectx()
2380
2385
2381 repo = originalfctx.repo()
2386 repo = originalfctx.repo()
2382 flog = originalfctx.filelog()
2387 flog = originalfctx.filelog()
2383 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2388 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2384
2389
2385 if copied is None:
2390 if copied is None:
2386 copied = originalfctx.renamed()
2391 copied = originalfctx.renamed()
2387 copiedmatch = lambda: True
2392 copiedmatch = lambda: True
2388 else:
2393 else:
2389 if copied and not isinstance(copied, tuple):
2394 if copied and not isinstance(copied, tuple):
2390 # repo._filecommit will recalculate copyrev so nullid is okay
2395 # repo._filecommit will recalculate copyrev so nullid is okay
2391 copied = (copied, nullid)
2396 copied = (copied, nullid)
2392 copiedmatch = lambda: copied == originalfctx.renamed()
2397 copiedmatch = lambda: copied == originalfctx.renamed()
2393
2398
2394 # When data, copied (could affect data), ctx (could affect filelog
2399 # When data, copied (could affect data), ctx (could affect filelog
2395 # parents) are not overridden, rawdata, rawflags, and filenode may be
2400 # parents) are not overridden, rawdata, rawflags, and filenode may be
2396 # reused (repo._filecommit should double check filelog parents).
2401 # reused (repo._filecommit should double check filelog parents).
2397 #
2402 #
2398 # path, flags are not hashed in filelog (but in manifestlog) so they do
2403 # path, flags are not hashed in filelog (but in manifestlog) so they do
2399 # not affect reusable here.
2404 # not affect reusable here.
2400 #
2405 #
2401 # If ctx or copied is overridden to a same value with originalfctx,
2406 # If ctx or copied is overridden to a same value with originalfctx,
2402 # still consider it's reusable. originalfctx.renamed() may be a bit
2407 # still consider it's reusable. originalfctx.renamed() may be a bit
2403 # expensive so it's not called unless necessary. Assuming datafunc is
2408 # expensive so it's not called unless necessary. Assuming datafunc is
2404 # always expensive, do not call it for this "reusable" test.
2409 # always expensive, do not call it for this "reusable" test.
2405 reusable = datafunc is None and ctxmatch() and copiedmatch()
2410 reusable = datafunc is None and ctxmatch() and copiedmatch()
2406
2411
2407 if datafunc is None:
2412 if datafunc is None:
2408 datafunc = originalfctx.data
2413 datafunc = originalfctx.data
2409 if flags is None:
2414 if flags is None:
2410 flags = originalfctx.flags()
2415 flags = originalfctx.flags()
2411
2416
2412 self._datafunc = datafunc
2417 self._datafunc = datafunc
2413 self._flags = flags
2418 self._flags = flags
2414 self._copied = copied
2419 self._copied = copied
2415
2420
2416 if reusable:
2421 if reusable:
2417 # copy extra fields from originalfctx
2422 # copy extra fields from originalfctx
2418 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2423 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2419 for attr_ in attrs:
2424 for attr_ in attrs:
2420 if util.safehasattr(originalfctx, attr_):
2425 if util.safehasattr(originalfctx, attr_):
2421 setattr(self, attr_, getattr(originalfctx, attr_))
2426 setattr(self, attr_, getattr(originalfctx, attr_))
2422
2427
2423 def data(self):
2428 def data(self):
2424 return self._datafunc()
2429 return self._datafunc()
2425
2430
2426 class metadataonlyctx(committablectx):
2431 class metadataonlyctx(committablectx):
2427 """Like memctx but it's reusing the manifest of different commit.
2432 """Like memctx but it's reusing the manifest of different commit.
2428 Intended to be used by lightweight operations that are creating
2433 Intended to be used by lightweight operations that are creating
2429 metadata-only changes.
2434 metadata-only changes.
2430
2435
2431 Revision information is supplied at initialization time. 'repo' is the
2436 Revision information is supplied at initialization time. 'repo' is the
2432 current localrepo, 'ctx' is original revision which manifest we're reuisng
2437 current localrepo, 'ctx' is original revision which manifest we're reuisng
2433 'parents' is a sequence of two parent revisions identifiers (pass None for
2438 'parents' is a sequence of two parent revisions identifiers (pass None for
2434 every missing parent), 'text' is the commit.
2439 every missing parent), 'text' is the commit.
2435
2440
2436 user receives the committer name and defaults to current repository
2441 user receives the committer name and defaults to current repository
2437 username, date is the commit date in any format supported by
2442 username, date is the commit date in any format supported by
2438 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2443 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2439 metadata or is left empty.
2444 metadata or is left empty.
2440 """
2445 """
2441 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2446 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2442 date=None, extra=None, editor=False):
2447 date=None, extra=None, editor=False):
2443 if text is None:
2448 if text is None:
2444 text = originalctx.description()
2449 text = originalctx.description()
2445 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2450 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2446 self._rev = None
2451 self._rev = None
2447 self._node = None
2452 self._node = None
2448 self._originalctx = originalctx
2453 self._originalctx = originalctx
2449 self._manifestnode = originalctx.manifestnode()
2454 self._manifestnode = originalctx.manifestnode()
2450 if parents is None:
2455 if parents is None:
2451 parents = originalctx.parents()
2456 parents = originalctx.parents()
2452 else:
2457 else:
2453 parents = [repo[p] for p in parents if p is not None]
2458 parents = [repo[p] for p in parents if p is not None]
2454 parents = parents[:]
2459 parents = parents[:]
2455 while len(parents) < 2:
2460 while len(parents) < 2:
2456 parents.append(repo[nullid])
2461 parents.append(repo[nullid])
2457 p1, p2 = self._parents = parents
2462 p1, p2 = self._parents = parents
2458
2463
2459 # sanity check to ensure that the reused manifest parents are
2464 # sanity check to ensure that the reused manifest parents are
2460 # manifests of our commit parents
2465 # manifests of our commit parents
2461 mp1, mp2 = self.manifestctx().parents
2466 mp1, mp2 = self.manifestctx().parents
2462 if p1 != nullid and p1.manifestnode() != mp1:
2467 if p1 != nullid and p1.manifestnode() != mp1:
2463 raise RuntimeError('can\'t reuse the manifest: '
2468 raise RuntimeError('can\'t reuse the manifest: '
2464 'its p1 doesn\'t match the new ctx p1')
2469 'its p1 doesn\'t match the new ctx p1')
2465 if p2 != nullid and p2.manifestnode() != mp2:
2470 if p2 != nullid and p2.manifestnode() != mp2:
2466 raise RuntimeError('can\'t reuse the manifest: '
2471 raise RuntimeError('can\'t reuse the manifest: '
2467 'its p2 doesn\'t match the new ctx p2')
2472 'its p2 doesn\'t match the new ctx p2')
2468
2473
2469 self._files = originalctx.files()
2474 self._files = originalctx.files()
2470 self.substate = {}
2475 self.substate = {}
2471
2476
2472 if editor:
2477 if editor:
2473 self._text = editor(self._repo, self, [])
2478 self._text = editor(self._repo, self, [])
2474 self._repo.savecommitmessage(self._text)
2479 self._repo.savecommitmessage(self._text)
2475
2480
2476 def manifestnode(self):
2481 def manifestnode(self):
2477 return self._manifestnode
2482 return self._manifestnode
2478
2483
2479 @property
2484 @property
2480 def _manifestctx(self):
2485 def _manifestctx(self):
2481 return self._repo.manifestlog[self._manifestnode]
2486 return self._repo.manifestlog[self._manifestnode]
2482
2487
2483 def filectx(self, path, filelog=None):
2488 def filectx(self, path, filelog=None):
2484 return self._originalctx.filectx(path, filelog=filelog)
2489 return self._originalctx.filectx(path, filelog=filelog)
2485
2490
2486 def commit(self):
2491 def commit(self):
2487 """commit context to the repo"""
2492 """commit context to the repo"""
2488 return self._repo.commitctx(self)
2493 return self._repo.commitctx(self)
2489
2494
2490 @property
2495 @property
2491 def _manifest(self):
2496 def _manifest(self):
2492 return self._originalctx.manifest()
2497 return self._originalctx.manifest()
2493
2498
2494 @propertycache
2499 @propertycache
2495 def _status(self):
2500 def _status(self):
2496 """Calculate exact status from ``files`` specified in the ``origctx``
2501 """Calculate exact status from ``files`` specified in the ``origctx``
2497 and parents manifests.
2502 and parents manifests.
2498 """
2503 """
2499 man1 = self.p1().manifest()
2504 man1 = self.p1().manifest()
2500 p2 = self._parents[1]
2505 p2 = self._parents[1]
2501 # "1 < len(self._parents)" can't be used for checking
2506 # "1 < len(self._parents)" can't be used for checking
2502 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2507 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2503 # explicitly initialized by the list, of which length is 2.
2508 # explicitly initialized by the list, of which length is 2.
2504 if p2.node() != nullid:
2509 if p2.node() != nullid:
2505 man2 = p2.manifest()
2510 man2 = p2.manifest()
2506 managing = lambda f: f in man1 or f in man2
2511 managing = lambda f: f in man1 or f in man2
2507 else:
2512 else:
2508 managing = lambda f: f in man1
2513 managing = lambda f: f in man1
2509
2514
2510 modified, added, removed = [], [], []
2515 modified, added, removed = [], [], []
2511 for f in self._files:
2516 for f in self._files:
2512 if not managing(f):
2517 if not managing(f):
2513 added.append(f)
2518 added.append(f)
2514 elif f in self:
2519 elif f in self:
2515 modified.append(f)
2520 modified.append(f)
2516 else:
2521 else:
2517 removed.append(f)
2522 removed.append(f)
2518
2523
2519 return scmutil.status(modified, added, removed, [], [], [], [])
2524 return scmutil.status(modified, added, removed, [], [], [], [])
2520
2525
2521 class arbitraryfilectx(object):
2526 class arbitraryfilectx(object):
2522 """Allows you to use filectx-like functions on a file in an arbitrary
2527 """Allows you to use filectx-like functions on a file in an arbitrary
2523 location on disk, possibly not in the working directory.
2528 location on disk, possibly not in the working directory.
2524 """
2529 """
2525 def __init__(self, path, repo=None):
2530 def __init__(self, path, repo=None):
2526 # Repo is optional because contrib/simplemerge uses this class.
2531 # Repo is optional because contrib/simplemerge uses this class.
2527 self._repo = repo
2532 self._repo = repo
2528 self._path = path
2533 self._path = path
2529
2534
2530 def cmp(self, fctx):
2535 def cmp(self, fctx):
2531 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2536 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2532 # path if either side is a symlink.
2537 # path if either side is a symlink.
2533 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2538 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2534 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2539 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2535 # Add a fast-path for merge if both sides are disk-backed.
2540 # Add a fast-path for merge if both sides are disk-backed.
2536 # Note that filecmp uses the opposite return values (True if same)
2541 # Note that filecmp uses the opposite return values (True if same)
2537 # from our cmp functions (True if different).
2542 # from our cmp functions (True if different).
2538 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2543 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2539 return self.data() != fctx.data()
2544 return self.data() != fctx.data()
2540
2545
2541 def path(self):
2546 def path(self):
2542 return self._path
2547 return self._path
2543
2548
2544 def flags(self):
2549 def flags(self):
2545 return ''
2550 return ''
2546
2551
2547 def data(self):
2552 def data(self):
2548 return util.readfile(self._path)
2553 return util.readfile(self._path)
2549
2554
2550 def decodeddata(self):
2555 def decodeddata(self):
2551 with open(self._path, "rb") as f:
2556 with open(self._path, "rb") as f:
2552 return f.read()
2557 return f.read()
2553
2558
2554 def remove(self):
2559 def remove(self):
2555 util.unlink(self._path)
2560 util.unlink(self._path)
2556
2561
2557 def write(self, data, flags, **kwargs):
2562 def write(self, data, flags, **kwargs):
2558 assert not flags
2563 assert not flags
2559 with open(self._path, "w") as f:
2564 with open(self._path, "w") as f:
2560 f.write(data)
2565 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now