##// END OF EJS Templates
context: stop catching TypeError when converting hex nodeid to binary...
Martin von Zweigbergk -
r39943:8cef5703 default
parent child Browse files
Show More
@@ -1,2497 +1,2497 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52 class basectx(object):
52 class basectx(object):
53 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
54 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
55 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
56 be committed,
56 be committed,
57 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
58 be committed."""
58 be committed."""
59
59
60 def __init__(self, repo):
60 def __init__(self, repo):
61 self._repo = repo
61 self._repo = repo
62
62
63 def __bytes__(self):
63 def __bytes__(self):
64 return short(self.node())
64 return short(self.node())
65
65
66 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
67
67
68 def __repr__(self):
68 def __repr__(self):
69 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
70
70
71 def __eq__(self, other):
71 def __eq__(self, other):
72 try:
72 try:
73 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
74 except AttributeError:
74 except AttributeError:
75 return False
75 return False
76
76
77 def __ne__(self, other):
77 def __ne__(self, other):
78 return not (self == other)
78 return not (self == other)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 return iter(self._manifest)
87 return iter(self._manifest)
88
88
89 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
90 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
91 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
92 the normal manifest."""
92 the normal manifest."""
93 return self.manifest()
93 return self.manifest()
94
94
95 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
96 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
97 match operator.
97 match operator.
98 """
98 """
99 return match
99 return match
100
100
101 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
102 listunknown):
102 listunknown):
103 """build a status with respect to another context"""
103 """build a status with respect to another context"""
104 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta application.
109 # delta application.
110 mf2 = None
110 mf2 = None
111 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
112 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
114 if mf2 is None:
114 if mf2 is None:
115 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
116
116
117 modified, added = [], []
117 modified, added = [], []
118 removed = []
118 removed = []
119 clean = []
119 clean = []
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deletedset = set(deleted)
121 deletedset = set(deleted)
122 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
123 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
124 if fn in deletedset:
124 if fn in deletedset:
125 continue
125 continue
126 if value is None:
126 if value is None:
127 clean.append(fn)
127 clean.append(fn)
128 continue
128 continue
129 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
130 if node1 is None:
130 if node1 is None:
131 added.append(fn)
131 added.append(fn)
132 elif node2 is None:
132 elif node2 is None:
133 removed.append(fn)
133 removed.append(fn)
134 elif flag1 != flag2:
134 elif flag1 != flag2:
135 modified.append(fn)
135 modified.append(fn)
136 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
137 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
138 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
139 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
140 # to a file as a modification.
140 # to a file as a modification.
141 modified.append(fn)
141 modified.append(fn)
142 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
143 modified.append(fn)
143 modified.append(fn)
144 else:
144 else:
145 clean.append(fn)
145 clean.append(fn)
146
146
147 if removed:
147 if removed:
148 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
149 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
150 (not match or match(fn))]
150 (not match or match(fn))]
151 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
152 (not match or match(fn))]
152 (not match or match(fn))]
153 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
154 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
155
155
156 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
157 ignored, clean)
157 ignored, clean)
158
158
159 @propertycache
159 @propertycache
160 def substate(self):
160 def substate(self):
161 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
162
162
163 def subrev(self, subpath):
163 def subrev(self, subpath):
164 return self.substate[subpath][1]
164 return self.substate[subpath][1]
165
165
166 def rev(self):
166 def rev(self):
167 return self._rev
167 return self._rev
168 def node(self):
168 def node(self):
169 return self._node
169 return self._node
170 def hex(self):
170 def hex(self):
171 return hex(self.node())
171 return hex(self.node())
172 def manifest(self):
172 def manifest(self):
173 return self._manifest
173 return self._manifest
174 def manifestctx(self):
174 def manifestctx(self):
175 return self._manifestctx
175 return self._manifestctx
176 def repo(self):
176 def repo(self):
177 return self._repo
177 return self._repo
178 def phasestr(self):
178 def phasestr(self):
179 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
180 def mutable(self):
180 def mutable(self):
181 return self.phase() > phases.public
181 return self.phase() > phases.public
182
182
183 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
184 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
185
185
186 def obsolete(self):
186 def obsolete(self):
187 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
189
190 def extinct(self):
190 def extinct(self):
191 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
193
194 def orphan(self):
194 def orphan(self):
195 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197
197
198 def phasedivergent(self):
198 def phasedivergent(self):
199 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
200
200
201 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
202 """
202 """
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204
204
205 def contentdivergent(self):
205 def contentdivergent(self):
206 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
207
207
208 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
209 """
209 """
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211
211
212 def isunstable(self):
212 def isunstable(self):
213 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
214 content-divergent"""
214 content-divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 narrowmatch = self._repo.narrowmatch()
375 narrowmatch = self._repo.narrowmatch()
376 if not narrowmatch.always():
376 if not narrowmatch.always():
377 for l in r:
377 for l in r:
378 l[:] = list(filter(narrowmatch, l))
378 l[:] = list(filter(narrowmatch, l))
379 for l in r:
379 for l in r:
380 l.sort()
380 l.sort()
381
381
382 return r
382 return r
383
383
384 class changectx(basectx):
384 class changectx(basectx):
385 """A changecontext object makes access to data related to a particular
385 """A changecontext object makes access to data related to a particular
386 changeset convenient. It represents a read-only context already present in
386 changeset convenient. It represents a read-only context already present in
387 the repo."""
387 the repo."""
388 def __init__(self, repo, changeid='.'):
388 def __init__(self, repo, changeid='.'):
389 """changeid is a revision number, node, or tag"""
389 """changeid is a revision number, node, or tag"""
390 super(changectx, self).__init__(repo)
390 super(changectx, self).__init__(repo)
391
391
392 try:
392 try:
393 if isinstance(changeid, int):
393 if isinstance(changeid, int):
394 self._node = repo.changelog.node(changeid)
394 self._node = repo.changelog.node(changeid)
395 self._rev = changeid
395 self._rev = changeid
396 return
396 return
397 elif changeid == 'null':
397 elif changeid == 'null':
398 self._node = nullid
398 self._node = nullid
399 self._rev = nullrev
399 self._rev = nullrev
400 return
400 return
401 elif changeid == 'tip':
401 elif changeid == 'tip':
402 self._node = repo.changelog.tip()
402 self._node = repo.changelog.tip()
403 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
404 return
404 return
405 elif (changeid == '.'
405 elif (changeid == '.'
406 or repo.local() and changeid == repo.dirstate.p1()):
406 or repo.local() and changeid == repo.dirstate.p1()):
407 # this is a hack to delay/avoid loading obsmarkers
407 # this is a hack to delay/avoid loading obsmarkers
408 # when we know that '.' won't be hidden
408 # when we know that '.' won't be hidden
409 self._node = repo.dirstate.p1()
409 self._node = repo.dirstate.p1()
410 self._rev = repo.unfiltered().changelog.rev(self._node)
410 self._rev = repo.unfiltered().changelog.rev(self._node)
411 return
411 return
412 elif len(changeid) == 20:
412 elif len(changeid) == 20:
413 try:
413 try:
414 self._node = changeid
414 self._node = changeid
415 self._rev = repo.changelog.rev(changeid)
415 self._rev = repo.changelog.rev(changeid)
416 return
416 return
417 except error.FilteredLookupError:
417 except error.FilteredLookupError:
418 changeid = hex(changeid) # for the error message
418 changeid = hex(changeid) # for the error message
419 raise
419 raise
420 except LookupError:
420 except LookupError:
421 # check if it might have come from damaged dirstate
421 # check if it might have come from damaged dirstate
422 #
422 #
423 # XXX we could avoid the unfiltered if we had a recognizable
423 # XXX we could avoid the unfiltered if we had a recognizable
424 # exception for filtered changeset access
424 # exception for filtered changeset access
425 if (repo.local()
425 if (repo.local()
426 and changeid in repo.unfiltered().dirstate.parents()):
426 and changeid in repo.unfiltered().dirstate.parents()):
427 msg = _("working directory has unknown parent '%s'!")
427 msg = _("working directory has unknown parent '%s'!")
428 raise error.Abort(msg % short(changeid))
428 raise error.Abort(msg % short(changeid))
429 changeid = hex(changeid) # for the error message
429 changeid = hex(changeid) # for the error message
430
430
431 elif len(changeid) == 40:
431 elif len(changeid) == 40:
432 try:
432 try:
433 self._node = bin(changeid)
433 self._node = bin(changeid)
434 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
435 return
435 return
436 except error.FilteredLookupError:
436 except error.FilteredLookupError:
437 raise
437 raise
438 except (TypeError, LookupError):
438 except LookupError:
439 pass
439 pass
440 else:
440 else:
441 raise error.ProgrammingError(
441 raise error.ProgrammingError(
442 "unsupported changeid '%s' of type %s" %
442 "unsupported changeid '%s' of type %s" %
443 (changeid, type(changeid)))
443 (changeid, type(changeid)))
444
444
445 except (error.FilteredIndexError, error.FilteredLookupError):
445 except (error.FilteredIndexError, error.FilteredLookupError):
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
447 % pycompat.bytestr(changeid))
447 % pycompat.bytestr(changeid))
448 except IndexError:
448 except IndexError:
449 pass
449 pass
450 raise error.RepoLookupError(
450 raise error.RepoLookupError(
451 _("unknown revision '%s'") % changeid)
451 _("unknown revision '%s'") % changeid)
452
452
453 def __hash__(self):
453 def __hash__(self):
454 try:
454 try:
455 return hash(self._rev)
455 return hash(self._rev)
456 except AttributeError:
456 except AttributeError:
457 return id(self)
457 return id(self)
458
458
459 def __nonzero__(self):
459 def __nonzero__(self):
460 return self._rev != nullrev
460 return self._rev != nullrev
461
461
462 __bool__ = __nonzero__
462 __bool__ = __nonzero__
463
463
464 @propertycache
464 @propertycache
465 def _changeset(self):
465 def _changeset(self):
466 return self._repo.changelog.changelogrevision(self.rev())
466 return self._repo.changelog.changelogrevision(self.rev())
467
467
468 @propertycache
468 @propertycache
469 def _manifest(self):
469 def _manifest(self):
470 return self._manifestctx.read()
470 return self._manifestctx.read()
471
471
472 @property
472 @property
473 def _manifestctx(self):
473 def _manifestctx(self):
474 return self._repo.manifestlog[self._changeset.manifest]
474 return self._repo.manifestlog[self._changeset.manifest]
475
475
476 @propertycache
476 @propertycache
477 def _manifestdelta(self):
477 def _manifestdelta(self):
478 return self._manifestctx.readdelta()
478 return self._manifestctx.readdelta()
479
479
480 @propertycache
480 @propertycache
481 def _parents(self):
481 def _parents(self):
482 repo = self._repo
482 repo = self._repo
483 p1, p2 = repo.changelog.parentrevs(self._rev)
483 p1, p2 = repo.changelog.parentrevs(self._rev)
484 if p2 == nullrev:
484 if p2 == nullrev:
485 return [changectx(repo, p1)]
485 return [changectx(repo, p1)]
486 return [changectx(repo, p1), changectx(repo, p2)]
486 return [changectx(repo, p1), changectx(repo, p2)]
487
487
488 def changeset(self):
488 def changeset(self):
489 c = self._changeset
489 c = self._changeset
490 return (
490 return (
491 c.manifest,
491 c.manifest,
492 c.user,
492 c.user,
493 c.date,
493 c.date,
494 c.files,
494 c.files,
495 c.description,
495 c.description,
496 c.extra,
496 c.extra,
497 )
497 )
498 def manifestnode(self):
498 def manifestnode(self):
499 return self._changeset.manifest
499 return self._changeset.manifest
500
500
501 def user(self):
501 def user(self):
502 return self._changeset.user
502 return self._changeset.user
503 def date(self):
503 def date(self):
504 return self._changeset.date
504 return self._changeset.date
505 def files(self):
505 def files(self):
506 return self._changeset.files
506 return self._changeset.files
507 def description(self):
507 def description(self):
508 return self._changeset.description
508 return self._changeset.description
509 def branch(self):
509 def branch(self):
510 return encoding.tolocal(self._changeset.extra.get("branch"))
510 return encoding.tolocal(self._changeset.extra.get("branch"))
511 def closesbranch(self):
511 def closesbranch(self):
512 return 'close' in self._changeset.extra
512 return 'close' in self._changeset.extra
513 def extra(self):
513 def extra(self):
514 """Return a dict of extra information."""
514 """Return a dict of extra information."""
515 return self._changeset.extra
515 return self._changeset.extra
516 def tags(self):
516 def tags(self):
517 """Return a list of byte tag names"""
517 """Return a list of byte tag names"""
518 return self._repo.nodetags(self._node)
518 return self._repo.nodetags(self._node)
519 def bookmarks(self):
519 def bookmarks(self):
520 """Return a list of byte bookmark names."""
520 """Return a list of byte bookmark names."""
521 return self._repo.nodebookmarks(self._node)
521 return self._repo.nodebookmarks(self._node)
522 def phase(self):
522 def phase(self):
523 return self._repo._phasecache.phase(self._repo, self._rev)
523 return self._repo._phasecache.phase(self._repo, self._rev)
524 def hidden(self):
524 def hidden(self):
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
526
526
527 def isinmemory(self):
527 def isinmemory(self):
528 return False
528 return False
529
529
530 def children(self):
530 def children(self):
531 """return list of changectx contexts for each child changeset.
531 """return list of changectx contexts for each child changeset.
532
532
533 This returns only the immediate child changesets. Use descendants() to
533 This returns only the immediate child changesets. Use descendants() to
534 recursively walk children.
534 recursively walk children.
535 """
535 """
536 c = self._repo.changelog.children(self._node)
536 c = self._repo.changelog.children(self._node)
537 return [changectx(self._repo, x) for x in c]
537 return [changectx(self._repo, x) for x in c]
538
538
539 def ancestors(self):
539 def ancestors(self):
540 for a in self._repo.changelog.ancestors([self._rev]):
540 for a in self._repo.changelog.ancestors([self._rev]):
541 yield changectx(self._repo, a)
541 yield changectx(self._repo, a)
542
542
543 def descendants(self):
543 def descendants(self):
544 """Recursively yield all children of the changeset.
544 """Recursively yield all children of the changeset.
545
545
546 For just the immediate children, use children()
546 For just the immediate children, use children()
547 """
547 """
548 for d in self._repo.changelog.descendants([self._rev]):
548 for d in self._repo.changelog.descendants([self._rev]):
549 yield changectx(self._repo, d)
549 yield changectx(self._repo, d)
550
550
551 def filectx(self, path, fileid=None, filelog=None):
551 def filectx(self, path, fileid=None, filelog=None):
552 """get a file context from this changeset"""
552 """get a file context from this changeset"""
553 if fileid is None:
553 if fileid is None:
554 fileid = self.filenode(path)
554 fileid = self.filenode(path)
555 return filectx(self._repo, path, fileid=fileid,
555 return filectx(self._repo, path, fileid=fileid,
556 changectx=self, filelog=filelog)
556 changectx=self, filelog=filelog)
557
557
558 def ancestor(self, c2, warn=False):
558 def ancestor(self, c2, warn=False):
559 """return the "best" ancestor context of self and c2
559 """return the "best" ancestor context of self and c2
560
560
561 If there are multiple candidates, it will show a message and check
561 If there are multiple candidates, it will show a message and check
562 merge.preferancestor configuration before falling back to the
562 merge.preferancestor configuration before falling back to the
563 revlog ancestor."""
563 revlog ancestor."""
564 # deal with workingctxs
564 # deal with workingctxs
565 n2 = c2._node
565 n2 = c2._node
566 if n2 is None:
566 if n2 is None:
567 n2 = c2._parents[0]._node
567 n2 = c2._parents[0]._node
568 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
568 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
569 if not cahs:
569 if not cahs:
570 anc = nullid
570 anc = nullid
571 elif len(cahs) == 1:
571 elif len(cahs) == 1:
572 anc = cahs[0]
572 anc = cahs[0]
573 else:
573 else:
574 # experimental config: merge.preferancestor
574 # experimental config: merge.preferancestor
575 for r in self._repo.ui.configlist('merge', 'preferancestor'):
575 for r in self._repo.ui.configlist('merge', 'preferancestor'):
576 try:
576 try:
577 ctx = scmutil.revsymbol(self._repo, r)
577 ctx = scmutil.revsymbol(self._repo, r)
578 except error.RepoLookupError:
578 except error.RepoLookupError:
579 continue
579 continue
580 anc = ctx.node()
580 anc = ctx.node()
581 if anc in cahs:
581 if anc in cahs:
582 break
582 break
583 else:
583 else:
584 anc = self._repo.changelog.ancestor(self._node, n2)
584 anc = self._repo.changelog.ancestor(self._node, n2)
585 if warn:
585 if warn:
586 self._repo.ui.status(
586 self._repo.ui.status(
587 (_("note: using %s as ancestor of %s and %s\n") %
587 (_("note: using %s as ancestor of %s and %s\n") %
588 (short(anc), short(self._node), short(n2))) +
588 (short(anc), short(self._node), short(n2))) +
589 ''.join(_(" alternatively, use --config "
589 ''.join(_(" alternatively, use --config "
590 "merge.preferancestor=%s\n") %
590 "merge.preferancestor=%s\n") %
591 short(n) for n in sorted(cahs) if n != anc))
591 short(n) for n in sorted(cahs) if n != anc))
592 return changectx(self._repo, anc)
592 return changectx(self._repo, anc)
593
593
594 def isancestorof(self, other):
594 def isancestorof(self, other):
595 """True if this changeset is an ancestor of other"""
595 """True if this changeset is an ancestor of other"""
596 return self._repo.changelog.isancestorrev(self._rev, other._rev)
596 return self._repo.changelog.isancestorrev(self._rev, other._rev)
597
597
598 def walk(self, match):
598 def walk(self, match):
599 '''Generates matching file names.'''
599 '''Generates matching file names.'''
600
600
601 # Wrap match.bad method to have message with nodeid
601 # Wrap match.bad method to have message with nodeid
602 def bad(fn, msg):
602 def bad(fn, msg):
603 # The manifest doesn't know about subrepos, so don't complain about
603 # The manifest doesn't know about subrepos, so don't complain about
604 # paths into valid subrepos.
604 # paths into valid subrepos.
605 if any(fn == s or fn.startswith(s + '/')
605 if any(fn == s or fn.startswith(s + '/')
606 for s in self.substate):
606 for s in self.substate):
607 return
607 return
608 match.bad(fn, _('no such file in rev %s') % self)
608 match.bad(fn, _('no such file in rev %s') % self)
609
609
610 m = matchmod.badmatch(match, bad)
610 m = matchmod.badmatch(match, bad)
611 return self._manifest.walk(m)
611 return self._manifest.walk(m)
612
612
613 def matches(self, match):
613 def matches(self, match):
614 return self.walk(match)
614 return self.walk(match)
615
615
616 class basefilectx(object):
616 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
617 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
618 filectx: read-only access to a filerevision that is already present
619 in the repo,
619 in the repo,
620 workingfilectx: a filecontext that represents files from the working
620 workingfilectx: a filecontext that represents files from the working
621 directory,
621 directory,
622 memfilectx: a filecontext that represents files in-memory,
622 memfilectx: a filecontext that represents files in-memory,
623 """
623 """
624 @propertycache
624 @propertycache
625 def _filelog(self):
625 def _filelog(self):
626 return self._repo.file(self._path)
626 return self._repo.file(self._path)
627
627
628 @propertycache
628 @propertycache
629 def _changeid(self):
629 def _changeid(self):
630 if r'_changeid' in self.__dict__:
630 if r'_changeid' in self.__dict__:
631 return self._changeid
631 return self._changeid
632 elif r'_changectx' in self.__dict__:
632 elif r'_changectx' in self.__dict__:
633 return self._changectx.rev()
633 return self._changectx.rev()
634 elif r'_descendantrev' in self.__dict__:
634 elif r'_descendantrev' in self.__dict__:
635 # this file context was created from a revision with a known
635 # this file context was created from a revision with a known
636 # descendant, we can (lazily) correct for linkrev aliases
636 # descendant, we can (lazily) correct for linkrev aliases
637 return self._adjustlinkrev(self._descendantrev)
637 return self._adjustlinkrev(self._descendantrev)
638 else:
638 else:
639 return self._filelog.linkrev(self._filerev)
639 return self._filelog.linkrev(self._filerev)
640
640
641 @propertycache
641 @propertycache
642 def _filenode(self):
642 def _filenode(self):
643 if r'_fileid' in self.__dict__:
643 if r'_fileid' in self.__dict__:
644 return self._filelog.lookup(self._fileid)
644 return self._filelog.lookup(self._fileid)
645 else:
645 else:
646 return self._changectx.filenode(self._path)
646 return self._changectx.filenode(self._path)
647
647
648 @propertycache
648 @propertycache
649 def _filerev(self):
649 def _filerev(self):
650 return self._filelog.rev(self._filenode)
650 return self._filelog.rev(self._filenode)
651
651
652 @propertycache
652 @propertycache
653 def _repopath(self):
653 def _repopath(self):
654 return self._path
654 return self._path
655
655
656 def __nonzero__(self):
656 def __nonzero__(self):
657 try:
657 try:
658 self._filenode
658 self._filenode
659 return True
659 return True
660 except error.LookupError:
660 except error.LookupError:
661 # file is missing
661 # file is missing
662 return False
662 return False
663
663
664 __bool__ = __nonzero__
664 __bool__ = __nonzero__
665
665
666 def __bytes__(self):
666 def __bytes__(self):
667 try:
667 try:
668 return "%s@%s" % (self.path(), self._changectx)
668 return "%s@%s" % (self.path(), self._changectx)
669 except error.LookupError:
669 except error.LookupError:
670 return "%s@???" % self.path()
670 return "%s@???" % self.path()
671
671
672 __str__ = encoding.strmethod(__bytes__)
672 __str__ = encoding.strmethod(__bytes__)
673
673
674 def __repr__(self):
674 def __repr__(self):
675 return r"<%s %s>" % (type(self).__name__, str(self))
675 return r"<%s %s>" % (type(self).__name__, str(self))
676
676
677 def __hash__(self):
677 def __hash__(self):
678 try:
678 try:
679 return hash((self._path, self._filenode))
679 return hash((self._path, self._filenode))
680 except AttributeError:
680 except AttributeError:
681 return id(self)
681 return id(self)
682
682
683 def __eq__(self, other):
683 def __eq__(self, other):
684 try:
684 try:
685 return (type(self) == type(other) and self._path == other._path
685 return (type(self) == type(other) and self._path == other._path
686 and self._filenode == other._filenode)
686 and self._filenode == other._filenode)
687 except AttributeError:
687 except AttributeError:
688 return False
688 return False
689
689
690 def __ne__(self, other):
690 def __ne__(self, other):
691 return not (self == other)
691 return not (self == other)
692
692
693 def filerev(self):
693 def filerev(self):
694 return self._filerev
694 return self._filerev
695 def filenode(self):
695 def filenode(self):
696 return self._filenode
696 return self._filenode
697 @propertycache
697 @propertycache
698 def _flags(self):
698 def _flags(self):
699 return self._changectx.flags(self._path)
699 return self._changectx.flags(self._path)
700 def flags(self):
700 def flags(self):
701 return self._flags
701 return self._flags
702 def filelog(self):
702 def filelog(self):
703 return self._filelog
703 return self._filelog
704 def rev(self):
704 def rev(self):
705 return self._changeid
705 return self._changeid
706 def linkrev(self):
706 def linkrev(self):
707 return self._filelog.linkrev(self._filerev)
707 return self._filelog.linkrev(self._filerev)
708 def node(self):
708 def node(self):
709 return self._changectx.node()
709 return self._changectx.node()
710 def hex(self):
710 def hex(self):
711 return self._changectx.hex()
711 return self._changectx.hex()
712 def user(self):
712 def user(self):
713 return self._changectx.user()
713 return self._changectx.user()
714 def date(self):
714 def date(self):
715 return self._changectx.date()
715 return self._changectx.date()
716 def files(self):
716 def files(self):
717 return self._changectx.files()
717 return self._changectx.files()
718 def description(self):
718 def description(self):
719 return self._changectx.description()
719 return self._changectx.description()
720 def branch(self):
720 def branch(self):
721 return self._changectx.branch()
721 return self._changectx.branch()
722 def extra(self):
722 def extra(self):
723 return self._changectx.extra()
723 return self._changectx.extra()
724 def phase(self):
724 def phase(self):
725 return self._changectx.phase()
725 return self._changectx.phase()
726 def phasestr(self):
726 def phasestr(self):
727 return self._changectx.phasestr()
727 return self._changectx.phasestr()
728 def obsolete(self):
728 def obsolete(self):
729 return self._changectx.obsolete()
729 return self._changectx.obsolete()
730 def instabilities(self):
730 def instabilities(self):
731 return self._changectx.instabilities()
731 return self._changectx.instabilities()
732 def manifest(self):
732 def manifest(self):
733 return self._changectx.manifest()
733 return self._changectx.manifest()
734 def changectx(self):
734 def changectx(self):
735 return self._changectx
735 return self._changectx
736 def renamed(self):
736 def renamed(self):
737 return self._copied
737 return self._copied
738 def repo(self):
738 def repo(self):
739 return self._repo
739 return self._repo
740 def size(self):
740 def size(self):
741 return len(self.data())
741 return len(self.data())
742
742
743 def path(self):
743 def path(self):
744 return self._path
744 return self._path
745
745
746 def isbinary(self):
746 def isbinary(self):
747 try:
747 try:
748 return stringutil.binary(self.data())
748 return stringutil.binary(self.data())
749 except IOError:
749 except IOError:
750 return False
750 return False
751 def isexec(self):
751 def isexec(self):
752 return 'x' in self.flags()
752 return 'x' in self.flags()
753 def islink(self):
753 def islink(self):
754 return 'l' in self.flags()
754 return 'l' in self.flags()
755
755
756 def isabsent(self):
756 def isabsent(self):
757 """whether this filectx represents a file not in self._changectx
757 """whether this filectx represents a file not in self._changectx
758
758
759 This is mainly for merge code to detect change/delete conflicts. This is
759 This is mainly for merge code to detect change/delete conflicts. This is
760 expected to be True for all subclasses of basectx."""
760 expected to be True for all subclasses of basectx."""
761 return False
761 return False
762
762
763 _customcmp = False
763 _customcmp = False
764 def cmp(self, fctx):
764 def cmp(self, fctx):
765 """compare with other file context
765 """compare with other file context
766
766
767 returns True if different than fctx.
767 returns True if different than fctx.
768 """
768 """
769 if fctx._customcmp:
769 if fctx._customcmp:
770 return fctx.cmp(self)
770 return fctx.cmp(self)
771
771
772 if (fctx._filenode is None
772 if (fctx._filenode is None
773 and (self._repo._encodefilterpats
773 and (self._repo._encodefilterpats
774 # if file data starts with '\1\n', empty metadata block is
774 # if file data starts with '\1\n', empty metadata block is
775 # prepended, which adds 4 bytes to filelog.size().
775 # prepended, which adds 4 bytes to filelog.size().
776 or self.size() - 4 == fctx.size())
776 or self.size() - 4 == fctx.size())
777 or self.size() == fctx.size()):
777 or self.size() == fctx.size()):
778 return self._filelog.cmp(self._filenode, fctx.data())
778 return self._filelog.cmp(self._filenode, fctx.data())
779
779
780 return True
780 return True
781
781
782 def _adjustlinkrev(self, srcrev, inclusive=False):
782 def _adjustlinkrev(self, srcrev, inclusive=False):
783 """return the first ancestor of <srcrev> introducing <fnode>
783 """return the first ancestor of <srcrev> introducing <fnode>
784
784
785 If the linkrev of the file revision does not point to an ancestor of
785 If the linkrev of the file revision does not point to an ancestor of
786 srcrev, we'll walk down the ancestors until we find one introducing
786 srcrev, we'll walk down the ancestors until we find one introducing
787 this file revision.
787 this file revision.
788
788
789 :srcrev: the changeset revision we search ancestors from
789 :srcrev: the changeset revision we search ancestors from
790 :inclusive: if true, the src revision will also be checked
790 :inclusive: if true, the src revision will also be checked
791 """
791 """
792 repo = self._repo
792 repo = self._repo
793 cl = repo.unfiltered().changelog
793 cl = repo.unfiltered().changelog
794 mfl = repo.manifestlog
794 mfl = repo.manifestlog
795 # fetch the linkrev
795 # fetch the linkrev
796 lkr = self.linkrev()
796 lkr = self.linkrev()
797 # hack to reuse ancestor computation when searching for renames
797 # hack to reuse ancestor computation when searching for renames
798 memberanc = getattr(self, '_ancestrycontext', None)
798 memberanc = getattr(self, '_ancestrycontext', None)
799 iteranc = None
799 iteranc = None
800 if srcrev is None:
800 if srcrev is None:
801 # wctx case, used by workingfilectx during mergecopy
801 # wctx case, used by workingfilectx during mergecopy
802 revs = [p.rev() for p in self._repo[None].parents()]
802 revs = [p.rev() for p in self._repo[None].parents()]
803 inclusive = True # we skipped the real (revless) source
803 inclusive = True # we skipped the real (revless) source
804 else:
804 else:
805 revs = [srcrev]
805 revs = [srcrev]
806 if memberanc is None:
806 if memberanc is None:
807 memberanc = iteranc = cl.ancestors(revs, lkr,
807 memberanc = iteranc = cl.ancestors(revs, lkr,
808 inclusive=inclusive)
808 inclusive=inclusive)
809 # check if this linkrev is an ancestor of srcrev
809 # check if this linkrev is an ancestor of srcrev
810 if lkr not in memberanc:
810 if lkr not in memberanc:
811 if iteranc is None:
811 if iteranc is None:
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
813 fnode = self._filenode
813 fnode = self._filenode
814 path = self._path
814 path = self._path
815 for a in iteranc:
815 for a in iteranc:
816 ac = cl.read(a) # get changeset data (we avoid object creation)
816 ac = cl.read(a) # get changeset data (we avoid object creation)
817 if path in ac[3]: # checking the 'files' field.
817 if path in ac[3]: # checking the 'files' field.
818 # The file has been touched, check if the content is
818 # The file has been touched, check if the content is
819 # similar to the one we search for.
819 # similar to the one we search for.
820 if fnode == mfl[ac[0]].readfast().get(path):
820 if fnode == mfl[ac[0]].readfast().get(path):
821 return a
821 return a
822 # In theory, we should never get out of that loop without a result.
822 # In theory, we should never get out of that loop without a result.
823 # But if manifest uses a buggy file revision (not children of the
823 # But if manifest uses a buggy file revision (not children of the
824 # one it replaces) we could. Such a buggy situation will likely
824 # one it replaces) we could. Such a buggy situation will likely
825 # result is crash somewhere else at to some point.
825 # result is crash somewhere else at to some point.
826 return lkr
826 return lkr
827
827
828 def introrev(self):
828 def introrev(self):
829 """return the rev of the changeset which introduced this file revision
829 """return the rev of the changeset which introduced this file revision
830
830
831 This method is different from linkrev because it take into account the
831 This method is different from linkrev because it take into account the
832 changeset the filectx was created from. It ensures the returned
832 changeset the filectx was created from. It ensures the returned
833 revision is one of its ancestors. This prevents bugs from
833 revision is one of its ancestors. This prevents bugs from
834 'linkrev-shadowing' when a file revision is used by multiple
834 'linkrev-shadowing' when a file revision is used by multiple
835 changesets.
835 changesets.
836 """
836 """
837 lkr = self.linkrev()
837 lkr = self.linkrev()
838 attrs = vars(self)
838 attrs = vars(self)
839 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
839 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
840 if noctx or self.rev() == lkr:
840 if noctx or self.rev() == lkr:
841 return self.linkrev()
841 return self.linkrev()
842 return self._adjustlinkrev(self.rev(), inclusive=True)
842 return self._adjustlinkrev(self.rev(), inclusive=True)
843
843
844 def introfilectx(self):
844 def introfilectx(self):
845 """Return filectx having identical contents, but pointing to the
845 """Return filectx having identical contents, but pointing to the
846 changeset revision where this filectx was introduced"""
846 changeset revision where this filectx was introduced"""
847 introrev = self.introrev()
847 introrev = self.introrev()
848 if self.rev() == introrev:
848 if self.rev() == introrev:
849 return self
849 return self
850 return self.filectx(self.filenode(), changeid=introrev)
850 return self.filectx(self.filenode(), changeid=introrev)
851
851
852 def _parentfilectx(self, path, fileid, filelog):
852 def _parentfilectx(self, path, fileid, filelog):
853 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
853 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
854 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
854 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
855 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
855 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
856 # If self is associated with a changeset (probably explicitly
856 # If self is associated with a changeset (probably explicitly
857 # fed), ensure the created filectx is associated with a
857 # fed), ensure the created filectx is associated with a
858 # changeset that is an ancestor of self.changectx.
858 # changeset that is an ancestor of self.changectx.
859 # This lets us later use _adjustlinkrev to get a correct link.
859 # This lets us later use _adjustlinkrev to get a correct link.
860 fctx._descendantrev = self.rev()
860 fctx._descendantrev = self.rev()
861 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
862 elif r'_descendantrev' in vars(self):
862 elif r'_descendantrev' in vars(self):
863 # Otherwise propagate _descendantrev if we have one associated.
863 # Otherwise propagate _descendantrev if we have one associated.
864 fctx._descendantrev = self._descendantrev
864 fctx._descendantrev = self._descendantrev
865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
866 return fctx
866 return fctx
867
867
868 def parents(self):
868 def parents(self):
869 _path = self._path
869 _path = self._path
870 fl = self._filelog
870 fl = self._filelog
871 parents = self._filelog.parents(self._filenode)
871 parents = self._filelog.parents(self._filenode)
872 pl = [(_path, node, fl) for node in parents if node != nullid]
872 pl = [(_path, node, fl) for node in parents if node != nullid]
873
873
874 r = fl.renamed(self._filenode)
874 r = fl.renamed(self._filenode)
875 if r:
875 if r:
876 # - In the simple rename case, both parent are nullid, pl is empty.
876 # - In the simple rename case, both parent are nullid, pl is empty.
877 # - In case of merge, only one of the parent is null id and should
877 # - In case of merge, only one of the parent is null id and should
878 # be replaced with the rename information. This parent is -always-
878 # be replaced with the rename information. This parent is -always-
879 # the first one.
879 # the first one.
880 #
880 #
881 # As null id have always been filtered out in the previous list
881 # As null id have always been filtered out in the previous list
882 # comprehension, inserting to 0 will always result in "replacing
882 # comprehension, inserting to 0 will always result in "replacing
883 # first nullid parent with rename information.
883 # first nullid parent with rename information.
884 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
884 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
885
885
886 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
886 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
887
887
888 def p1(self):
888 def p1(self):
889 return self.parents()[0]
889 return self.parents()[0]
890
890
891 def p2(self):
891 def p2(self):
892 p = self.parents()
892 p = self.parents()
893 if len(p) == 2:
893 if len(p) == 2:
894 return p[1]
894 return p[1]
895 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
895 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
896
896
897 def annotate(self, follow=False, skiprevs=None, diffopts=None):
897 def annotate(self, follow=False, skiprevs=None, diffopts=None):
898 """Returns a list of annotateline objects for each line in the file
898 """Returns a list of annotateline objects for each line in the file
899
899
900 - line.fctx is the filectx of the node where that line was last changed
900 - line.fctx is the filectx of the node where that line was last changed
901 - line.lineno is the line number at the first appearance in the managed
901 - line.lineno is the line number at the first appearance in the managed
902 file
902 file
903 - line.text is the data on that line (including newline character)
903 - line.text is the data on that line (including newline character)
904 """
904 """
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
906
906
907 def parents(f):
907 def parents(f):
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
911 # isn't an ancestor of the srcrev.
911 # isn't an ancestor of the srcrev.
912 f._changeid
912 f._changeid
913 pl = f.parents()
913 pl = f.parents()
914
914
915 # Don't return renamed parents if we aren't following.
915 # Don't return renamed parents if we aren't following.
916 if not follow:
916 if not follow:
917 pl = [p for p in pl if p.path() == f.path()]
917 pl = [p for p in pl if p.path() == f.path()]
918
918
919 # renamed filectx won't have a filelog yet, so set it
919 # renamed filectx won't have a filelog yet, so set it
920 # from the cache to save time
920 # from the cache to save time
921 for p in pl:
921 for p in pl:
922 if not r'_filelog' in p.__dict__:
922 if not r'_filelog' in p.__dict__:
923 p._filelog = getlog(p.path())
923 p._filelog = getlog(p.path())
924
924
925 return pl
925 return pl
926
926
927 # use linkrev to find the first changeset where self appeared
927 # use linkrev to find the first changeset where self appeared
928 base = self.introfilectx()
928 base = self.introfilectx()
929 if getattr(base, '_ancestrycontext', None) is None:
929 if getattr(base, '_ancestrycontext', None) is None:
930 cl = self._repo.changelog
930 cl = self._repo.changelog
931 if base.rev() is None:
931 if base.rev() is None:
932 # wctx is not inclusive, but works because _ancestrycontext
932 # wctx is not inclusive, but works because _ancestrycontext
933 # is used to test filelog revisions
933 # is used to test filelog revisions
934 ac = cl.ancestors([p.rev() for p in base.parents()],
934 ac = cl.ancestors([p.rev() for p in base.parents()],
935 inclusive=True)
935 inclusive=True)
936 else:
936 else:
937 ac = cl.ancestors([base.rev()], inclusive=True)
937 ac = cl.ancestors([base.rev()], inclusive=True)
938 base._ancestrycontext = ac
938 base._ancestrycontext = ac
939
939
940 return dagop.annotate(base, parents, skiprevs=skiprevs,
940 return dagop.annotate(base, parents, skiprevs=skiprevs,
941 diffopts=diffopts)
941 diffopts=diffopts)
942
942
943 def ancestors(self, followfirst=False):
943 def ancestors(self, followfirst=False):
944 visit = {}
944 visit = {}
945 c = self
945 c = self
946 if followfirst:
946 if followfirst:
947 cut = 1
947 cut = 1
948 else:
948 else:
949 cut = None
949 cut = None
950
950
951 while True:
951 while True:
952 for parent in c.parents()[:cut]:
952 for parent in c.parents()[:cut]:
953 visit[(parent.linkrev(), parent.filenode())] = parent
953 visit[(parent.linkrev(), parent.filenode())] = parent
954 if not visit:
954 if not visit:
955 break
955 break
956 c = visit.pop(max(visit))
956 c = visit.pop(max(visit))
957 yield c
957 yield c
958
958
959 def decodeddata(self):
959 def decodeddata(self):
960 """Returns `data()` after running repository decoding filters.
960 """Returns `data()` after running repository decoding filters.
961
961
962 This is often equivalent to how the data would be expressed on disk.
962 This is often equivalent to how the data would be expressed on disk.
963 """
963 """
964 return self._repo.wwritedata(self.path(), self.data())
964 return self._repo.wwritedata(self.path(), self.data())
965
965
966 class filectx(basefilectx):
966 class filectx(basefilectx):
967 """A filecontext object makes access to data related to a particular
967 """A filecontext object makes access to data related to a particular
968 filerevision convenient."""
968 filerevision convenient."""
969 def __init__(self, repo, path, changeid=None, fileid=None,
969 def __init__(self, repo, path, changeid=None, fileid=None,
970 filelog=None, changectx=None):
970 filelog=None, changectx=None):
971 """changeid can be a changeset revision, node, or tag.
971 """changeid can be a changeset revision, node, or tag.
972 fileid can be a file revision or node."""
972 fileid can be a file revision or node."""
973 self._repo = repo
973 self._repo = repo
974 self._path = path
974 self._path = path
975
975
976 assert (changeid is not None
976 assert (changeid is not None
977 or fileid is not None
977 or fileid is not None
978 or changectx is not None), \
978 or changectx is not None), \
979 ("bad args: changeid=%r, fileid=%r, changectx=%r"
979 ("bad args: changeid=%r, fileid=%r, changectx=%r"
980 % (changeid, fileid, changectx))
980 % (changeid, fileid, changectx))
981
981
982 if filelog is not None:
982 if filelog is not None:
983 self._filelog = filelog
983 self._filelog = filelog
984
984
985 if changeid is not None:
985 if changeid is not None:
986 self._changeid = changeid
986 self._changeid = changeid
987 if changectx is not None:
987 if changectx is not None:
988 self._changectx = changectx
988 self._changectx = changectx
989 if fileid is not None:
989 if fileid is not None:
990 self._fileid = fileid
990 self._fileid = fileid
991
991
992 @propertycache
992 @propertycache
993 def _changectx(self):
993 def _changectx(self):
994 try:
994 try:
995 return changectx(self._repo, self._changeid)
995 return changectx(self._repo, self._changeid)
996 except error.FilteredRepoLookupError:
996 except error.FilteredRepoLookupError:
997 # Linkrev may point to any revision in the repository. When the
997 # Linkrev may point to any revision in the repository. When the
998 # repository is filtered this may lead to `filectx` trying to build
998 # repository is filtered this may lead to `filectx` trying to build
999 # `changectx` for filtered revision. In such case we fallback to
999 # `changectx` for filtered revision. In such case we fallback to
1000 # creating `changectx` on the unfiltered version of the reposition.
1000 # creating `changectx` on the unfiltered version of the reposition.
1001 # This fallback should not be an issue because `changectx` from
1001 # This fallback should not be an issue because `changectx` from
1002 # `filectx` are not used in complex operations that care about
1002 # `filectx` are not used in complex operations that care about
1003 # filtering.
1003 # filtering.
1004 #
1004 #
1005 # This fallback is a cheap and dirty fix that prevent several
1005 # This fallback is a cheap and dirty fix that prevent several
1006 # crashes. It does not ensure the behavior is correct. However the
1006 # crashes. It does not ensure the behavior is correct. However the
1007 # behavior was not correct before filtering either and "incorrect
1007 # behavior was not correct before filtering either and "incorrect
1008 # behavior" is seen as better as "crash"
1008 # behavior" is seen as better as "crash"
1009 #
1009 #
1010 # Linkrevs have several serious troubles with filtering that are
1010 # Linkrevs have several serious troubles with filtering that are
1011 # complicated to solve. Proper handling of the issue here should be
1011 # complicated to solve. Proper handling of the issue here should be
1012 # considered when solving linkrev issue are on the table.
1012 # considered when solving linkrev issue are on the table.
1013 return changectx(self._repo.unfiltered(), self._changeid)
1013 return changectx(self._repo.unfiltered(), self._changeid)
1014
1014
1015 def filectx(self, fileid, changeid=None):
1015 def filectx(self, fileid, changeid=None):
1016 '''opens an arbitrary revision of the file without
1016 '''opens an arbitrary revision of the file without
1017 opening a new filelog'''
1017 opening a new filelog'''
1018 return filectx(self._repo, self._path, fileid=fileid,
1018 return filectx(self._repo, self._path, fileid=fileid,
1019 filelog=self._filelog, changeid=changeid)
1019 filelog=self._filelog, changeid=changeid)
1020
1020
1021 def rawdata(self):
1021 def rawdata(self):
1022 return self._filelog.revision(self._filenode, raw=True)
1022 return self._filelog.revision(self._filenode, raw=True)
1023
1023
1024 def rawflags(self):
1024 def rawflags(self):
1025 """low-level revlog flags"""
1025 """low-level revlog flags"""
1026 return self._filelog.flags(self._filerev)
1026 return self._filelog.flags(self._filerev)
1027
1027
1028 def data(self):
1028 def data(self):
1029 try:
1029 try:
1030 return self._filelog.read(self._filenode)
1030 return self._filelog.read(self._filenode)
1031 except error.CensoredNodeError:
1031 except error.CensoredNodeError:
1032 if self._repo.ui.config("censor", "policy") == "ignore":
1032 if self._repo.ui.config("censor", "policy") == "ignore":
1033 return ""
1033 return ""
1034 raise error.Abort(_("censored node: %s") % short(self._filenode),
1034 raise error.Abort(_("censored node: %s") % short(self._filenode),
1035 hint=_("set censor.policy to ignore errors"))
1035 hint=_("set censor.policy to ignore errors"))
1036
1036
1037 def size(self):
1037 def size(self):
1038 return self._filelog.size(self._filerev)
1038 return self._filelog.size(self._filerev)
1039
1039
1040 @propertycache
1040 @propertycache
1041 def _copied(self):
1041 def _copied(self):
1042 """check if file was actually renamed in this changeset revision
1042 """check if file was actually renamed in this changeset revision
1043
1043
1044 If rename logged in file revision, we report copy for changeset only
1044 If rename logged in file revision, we report copy for changeset only
1045 if file revisions linkrev points back to the changeset in question
1045 if file revisions linkrev points back to the changeset in question
1046 or both changeset parents contain different file revisions.
1046 or both changeset parents contain different file revisions.
1047 """
1047 """
1048
1048
1049 renamed = self._filelog.renamed(self._filenode)
1049 renamed = self._filelog.renamed(self._filenode)
1050 if not renamed:
1050 if not renamed:
1051 return None
1051 return None
1052
1052
1053 if self.rev() == self.linkrev():
1053 if self.rev() == self.linkrev():
1054 return renamed
1054 return renamed
1055
1055
1056 name = self.path()
1056 name = self.path()
1057 fnode = self._filenode
1057 fnode = self._filenode
1058 for p in self._changectx.parents():
1058 for p in self._changectx.parents():
1059 try:
1059 try:
1060 if fnode == p.filenode(name):
1060 if fnode == p.filenode(name):
1061 return None
1061 return None
1062 except error.LookupError:
1062 except error.LookupError:
1063 pass
1063 pass
1064 return renamed
1064 return renamed
1065
1065
1066 def children(self):
1066 def children(self):
1067 # hard for renames
1067 # hard for renames
1068 c = self._filelog.children(self._filenode)
1068 c = self._filelog.children(self._filenode)
1069 return [filectx(self._repo, self._path, fileid=x,
1069 return [filectx(self._repo, self._path, fileid=x,
1070 filelog=self._filelog) for x in c]
1070 filelog=self._filelog) for x in c]
1071
1071
1072 class committablectx(basectx):
1072 class committablectx(basectx):
1073 """A committablectx object provides common functionality for a context that
1073 """A committablectx object provides common functionality for a context that
1074 wants the ability to commit, e.g. workingctx or memctx."""
1074 wants the ability to commit, e.g. workingctx or memctx."""
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 changes=None):
1076 changes=None):
1077 super(committablectx, self).__init__(repo)
1077 super(committablectx, self).__init__(repo)
1078 self._rev = None
1078 self._rev = None
1079 self._node = None
1079 self._node = None
1080 self._text = text
1080 self._text = text
1081 if date:
1081 if date:
1082 self._date = dateutil.parsedate(date)
1082 self._date = dateutil.parsedate(date)
1083 if user:
1083 if user:
1084 self._user = user
1084 self._user = user
1085 if changes:
1085 if changes:
1086 self._status = changes
1086 self._status = changes
1087
1087
1088 self._extra = {}
1088 self._extra = {}
1089 if extra:
1089 if extra:
1090 self._extra = extra.copy()
1090 self._extra = extra.copy()
1091 if 'branch' not in self._extra:
1091 if 'branch' not in self._extra:
1092 try:
1092 try:
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 except UnicodeDecodeError:
1094 except UnicodeDecodeError:
1095 raise error.Abort(_('branch name not in UTF-8!'))
1095 raise error.Abort(_('branch name not in UTF-8!'))
1096 self._extra['branch'] = branch
1096 self._extra['branch'] = branch
1097 if self._extra['branch'] == '':
1097 if self._extra['branch'] == '':
1098 self._extra['branch'] = 'default'
1098 self._extra['branch'] = 'default'
1099
1099
1100 def __bytes__(self):
1100 def __bytes__(self):
1101 return bytes(self._parents[0]) + "+"
1101 return bytes(self._parents[0]) + "+"
1102
1102
1103 __str__ = encoding.strmethod(__bytes__)
1103 __str__ = encoding.strmethod(__bytes__)
1104
1104
1105 def __nonzero__(self):
1105 def __nonzero__(self):
1106 return True
1106 return True
1107
1107
1108 __bool__ = __nonzero__
1108 __bool__ = __nonzero__
1109
1109
1110 def _buildflagfunc(self):
1110 def _buildflagfunc(self):
1111 # Create a fallback function for getting file flags when the
1111 # Create a fallback function for getting file flags when the
1112 # filesystem doesn't support them
1112 # filesystem doesn't support them
1113
1113
1114 copiesget = self._repo.dirstate.copies().get
1114 copiesget = self._repo.dirstate.copies().get
1115 parents = self.parents()
1115 parents = self.parents()
1116 if len(parents) < 2:
1116 if len(parents) < 2:
1117 # when we have one parent, it's easy: copy from parent
1117 # when we have one parent, it's easy: copy from parent
1118 man = parents[0].manifest()
1118 man = parents[0].manifest()
1119 def func(f):
1119 def func(f):
1120 f = copiesget(f, f)
1120 f = copiesget(f, f)
1121 return man.flags(f)
1121 return man.flags(f)
1122 else:
1122 else:
1123 # merges are tricky: we try to reconstruct the unstored
1123 # merges are tricky: we try to reconstruct the unstored
1124 # result from the merge (issue1802)
1124 # result from the merge (issue1802)
1125 p1, p2 = parents
1125 p1, p2 = parents
1126 pa = p1.ancestor(p2)
1126 pa = p1.ancestor(p2)
1127 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1127 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1128
1128
1129 def func(f):
1129 def func(f):
1130 f = copiesget(f, f) # may be wrong for merges with copies
1130 f = copiesget(f, f) # may be wrong for merges with copies
1131 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1131 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1132 if fl1 == fl2:
1132 if fl1 == fl2:
1133 return fl1
1133 return fl1
1134 if fl1 == fla:
1134 if fl1 == fla:
1135 return fl2
1135 return fl2
1136 if fl2 == fla:
1136 if fl2 == fla:
1137 return fl1
1137 return fl1
1138 return '' # punt for conflicts
1138 return '' # punt for conflicts
1139
1139
1140 return func
1140 return func
1141
1141
1142 @propertycache
1142 @propertycache
1143 def _flagfunc(self):
1143 def _flagfunc(self):
1144 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1144 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1145
1145
1146 @propertycache
1146 @propertycache
1147 def _status(self):
1147 def _status(self):
1148 return self._repo.status()
1148 return self._repo.status()
1149
1149
1150 @propertycache
1150 @propertycache
1151 def _user(self):
1151 def _user(self):
1152 return self._repo.ui.username()
1152 return self._repo.ui.username()
1153
1153
1154 @propertycache
1154 @propertycache
1155 def _date(self):
1155 def _date(self):
1156 ui = self._repo.ui
1156 ui = self._repo.ui
1157 date = ui.configdate('devel', 'default-date')
1157 date = ui.configdate('devel', 'default-date')
1158 if date is None:
1158 if date is None:
1159 date = dateutil.makedate()
1159 date = dateutil.makedate()
1160 return date
1160 return date
1161
1161
1162 def subrev(self, subpath):
1162 def subrev(self, subpath):
1163 return None
1163 return None
1164
1164
1165 def manifestnode(self):
1165 def manifestnode(self):
1166 return None
1166 return None
1167 def user(self):
1167 def user(self):
1168 return self._user or self._repo.ui.username()
1168 return self._user or self._repo.ui.username()
1169 def date(self):
1169 def date(self):
1170 return self._date
1170 return self._date
1171 def description(self):
1171 def description(self):
1172 return self._text
1172 return self._text
1173 def files(self):
1173 def files(self):
1174 return sorted(self._status.modified + self._status.added +
1174 return sorted(self._status.modified + self._status.added +
1175 self._status.removed)
1175 self._status.removed)
1176
1176
1177 def modified(self):
1177 def modified(self):
1178 return self._status.modified
1178 return self._status.modified
1179 def added(self):
1179 def added(self):
1180 return self._status.added
1180 return self._status.added
1181 def removed(self):
1181 def removed(self):
1182 return self._status.removed
1182 return self._status.removed
1183 def deleted(self):
1183 def deleted(self):
1184 return self._status.deleted
1184 return self._status.deleted
1185 def branch(self):
1185 def branch(self):
1186 return encoding.tolocal(self._extra['branch'])
1186 return encoding.tolocal(self._extra['branch'])
1187 def closesbranch(self):
1187 def closesbranch(self):
1188 return 'close' in self._extra
1188 return 'close' in self._extra
1189 def extra(self):
1189 def extra(self):
1190 return self._extra
1190 return self._extra
1191
1191
1192 def isinmemory(self):
1192 def isinmemory(self):
1193 return False
1193 return False
1194
1194
1195 def tags(self):
1195 def tags(self):
1196 return []
1196 return []
1197
1197
1198 def bookmarks(self):
1198 def bookmarks(self):
1199 b = []
1199 b = []
1200 for p in self.parents():
1200 for p in self.parents():
1201 b.extend(p.bookmarks())
1201 b.extend(p.bookmarks())
1202 return b
1202 return b
1203
1203
1204 def phase(self):
1204 def phase(self):
1205 phase = phases.draft # default phase to draft
1205 phase = phases.draft # default phase to draft
1206 for p in self.parents():
1206 for p in self.parents():
1207 phase = max(phase, p.phase())
1207 phase = max(phase, p.phase())
1208 return phase
1208 return phase
1209
1209
1210 def hidden(self):
1210 def hidden(self):
1211 return False
1211 return False
1212
1212
1213 def children(self):
1213 def children(self):
1214 return []
1214 return []
1215
1215
1216 def flags(self, path):
1216 def flags(self, path):
1217 if r'_manifest' in self.__dict__:
1217 if r'_manifest' in self.__dict__:
1218 try:
1218 try:
1219 return self._manifest.flags(path)
1219 return self._manifest.flags(path)
1220 except KeyError:
1220 except KeyError:
1221 return ''
1221 return ''
1222
1222
1223 try:
1223 try:
1224 return self._flagfunc(path)
1224 return self._flagfunc(path)
1225 except OSError:
1225 except OSError:
1226 return ''
1226 return ''
1227
1227
1228 def ancestor(self, c2):
1228 def ancestor(self, c2):
1229 """return the "best" ancestor context of self and c2"""
1229 """return the "best" ancestor context of self and c2"""
1230 return self._parents[0].ancestor(c2) # punt on two parents for now
1230 return self._parents[0].ancestor(c2) # punt on two parents for now
1231
1231
1232 def walk(self, match):
1232 def walk(self, match):
1233 '''Generates matching file names.'''
1233 '''Generates matching file names.'''
1234 return sorted(self._repo.dirstate.walk(match,
1234 return sorted(self._repo.dirstate.walk(match,
1235 subrepos=sorted(self.substate),
1235 subrepos=sorted(self.substate),
1236 unknown=True, ignored=False))
1236 unknown=True, ignored=False))
1237
1237
1238 def matches(self, match):
1238 def matches(self, match):
1239 ds = self._repo.dirstate
1239 ds = self._repo.dirstate
1240 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1240 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1241
1241
1242 def ancestors(self):
1242 def ancestors(self):
1243 for p in self._parents:
1243 for p in self._parents:
1244 yield p
1244 yield p
1245 for a in self._repo.changelog.ancestors(
1245 for a in self._repo.changelog.ancestors(
1246 [p.rev() for p in self._parents]):
1246 [p.rev() for p in self._parents]):
1247 yield changectx(self._repo, a)
1247 yield changectx(self._repo, a)
1248
1248
1249 def markcommitted(self, node):
1249 def markcommitted(self, node):
1250 """Perform post-commit cleanup necessary after committing this ctx
1250 """Perform post-commit cleanup necessary after committing this ctx
1251
1251
1252 Specifically, this updates backing stores this working context
1252 Specifically, this updates backing stores this working context
1253 wraps to reflect the fact that the changes reflected by this
1253 wraps to reflect the fact that the changes reflected by this
1254 workingctx have been committed. For example, it marks
1254 workingctx have been committed. For example, it marks
1255 modified and added files as normal in the dirstate.
1255 modified and added files as normal in the dirstate.
1256
1256
1257 """
1257 """
1258
1258
1259 with self._repo.dirstate.parentchange():
1259 with self._repo.dirstate.parentchange():
1260 for f in self.modified() + self.added():
1260 for f in self.modified() + self.added():
1261 self._repo.dirstate.normal(f)
1261 self._repo.dirstate.normal(f)
1262 for f in self.removed():
1262 for f in self.removed():
1263 self._repo.dirstate.drop(f)
1263 self._repo.dirstate.drop(f)
1264 self._repo.dirstate.setparents(node)
1264 self._repo.dirstate.setparents(node)
1265
1265
1266 # write changes out explicitly, because nesting wlock at
1266 # write changes out explicitly, because nesting wlock at
1267 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1267 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1268 # from immediately doing so for subsequent changing files
1268 # from immediately doing so for subsequent changing files
1269 self._repo.dirstate.write(self._repo.currenttransaction())
1269 self._repo.dirstate.write(self._repo.currenttransaction())
1270
1270
1271 def dirty(self, missing=False, merge=True, branch=True):
1271 def dirty(self, missing=False, merge=True, branch=True):
1272 return False
1272 return False
1273
1273
1274 class workingctx(committablectx):
1274 class workingctx(committablectx):
1275 """A workingctx object makes access to data related to
1275 """A workingctx object makes access to data related to
1276 the current working directory convenient.
1276 the current working directory convenient.
1277 date - any valid date string or (unixtime, offset), or None.
1277 date - any valid date string or (unixtime, offset), or None.
1278 user - username string, or None.
1278 user - username string, or None.
1279 extra - a dictionary of extra values, or None.
1279 extra - a dictionary of extra values, or None.
1280 changes - a list of file lists as returned by localrepo.status()
1280 changes - a list of file lists as returned by localrepo.status()
1281 or None to use the repository status.
1281 or None to use the repository status.
1282 """
1282 """
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 changes=None):
1284 changes=None):
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286
1286
1287 def __iter__(self):
1287 def __iter__(self):
1288 d = self._repo.dirstate
1288 d = self._repo.dirstate
1289 for f in d:
1289 for f in d:
1290 if d[f] != 'r':
1290 if d[f] != 'r':
1291 yield f
1291 yield f
1292
1292
1293 def __contains__(self, key):
1293 def __contains__(self, key):
1294 return self._repo.dirstate[key] not in "?r"
1294 return self._repo.dirstate[key] not in "?r"
1295
1295
1296 def hex(self):
1296 def hex(self):
1297 return hex(wdirid)
1297 return hex(wdirid)
1298
1298
1299 @propertycache
1299 @propertycache
1300 def _parents(self):
1300 def _parents(self):
1301 p = self._repo.dirstate.parents()
1301 p = self._repo.dirstate.parents()
1302 if p[1] == nullid:
1302 if p[1] == nullid:
1303 p = p[:-1]
1303 p = p[:-1]
1304 return [changectx(self._repo, x) for x in p]
1304 return [changectx(self._repo, x) for x in p]
1305
1305
1306 def _fileinfo(self, path):
1306 def _fileinfo(self, path):
1307 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1307 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1308 self._manifest
1308 self._manifest
1309 return super(workingctx, self)._fileinfo(path)
1309 return super(workingctx, self)._fileinfo(path)
1310
1310
1311 def filectx(self, path, filelog=None):
1311 def filectx(self, path, filelog=None):
1312 """get a file context from the working directory"""
1312 """get a file context from the working directory"""
1313 return workingfilectx(self._repo, path, workingctx=self,
1313 return workingfilectx(self._repo, path, workingctx=self,
1314 filelog=filelog)
1314 filelog=filelog)
1315
1315
1316 def dirty(self, missing=False, merge=True, branch=True):
1316 def dirty(self, missing=False, merge=True, branch=True):
1317 "check whether a working directory is modified"
1317 "check whether a working directory is modified"
1318 # check subrepos first
1318 # check subrepos first
1319 for s in sorted(self.substate):
1319 for s in sorted(self.substate):
1320 if self.sub(s).dirty(missing=missing):
1320 if self.sub(s).dirty(missing=missing):
1321 return True
1321 return True
1322 # check current working dir
1322 # check current working dir
1323 return ((merge and self.p2()) or
1323 return ((merge and self.p2()) or
1324 (branch and self.branch() != self.p1().branch()) or
1324 (branch and self.branch() != self.p1().branch()) or
1325 self.modified() or self.added() or self.removed() or
1325 self.modified() or self.added() or self.removed() or
1326 (missing and self.deleted()))
1326 (missing and self.deleted()))
1327
1327
1328 def add(self, list, prefix=""):
1328 def add(self, list, prefix=""):
1329 with self._repo.wlock():
1329 with self._repo.wlock():
1330 ui, ds = self._repo.ui, self._repo.dirstate
1330 ui, ds = self._repo.ui, self._repo.dirstate
1331 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1331 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1332 rejected = []
1332 rejected = []
1333 lstat = self._repo.wvfs.lstat
1333 lstat = self._repo.wvfs.lstat
1334 for f in list:
1334 for f in list:
1335 # ds.pathto() returns an absolute file when this is invoked from
1335 # ds.pathto() returns an absolute file when this is invoked from
1336 # the keyword extension. That gets flagged as non-portable on
1336 # the keyword extension. That gets flagged as non-portable on
1337 # Windows, since it contains the drive letter and colon.
1337 # Windows, since it contains the drive letter and colon.
1338 scmutil.checkportable(ui, os.path.join(prefix, f))
1338 scmutil.checkportable(ui, os.path.join(prefix, f))
1339 try:
1339 try:
1340 st = lstat(f)
1340 st = lstat(f)
1341 except OSError:
1341 except OSError:
1342 ui.warn(_("%s does not exist!\n") % uipath(f))
1342 ui.warn(_("%s does not exist!\n") % uipath(f))
1343 rejected.append(f)
1343 rejected.append(f)
1344 continue
1344 continue
1345 limit = ui.configbytes('ui', 'large-file-limit')
1345 limit = ui.configbytes('ui', 'large-file-limit')
1346 if limit != 0 and st.st_size > limit:
1346 if limit != 0 and st.st_size > limit:
1347 ui.warn(_("%s: up to %d MB of RAM may be required "
1347 ui.warn(_("%s: up to %d MB of RAM may be required "
1348 "to manage this file\n"
1348 "to manage this file\n"
1349 "(use 'hg revert %s' to cancel the "
1349 "(use 'hg revert %s' to cancel the "
1350 "pending addition)\n")
1350 "pending addition)\n")
1351 % (f, 3 * st.st_size // 1000000, uipath(f)))
1351 % (f, 3 * st.st_size // 1000000, uipath(f)))
1352 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1352 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1353 ui.warn(_("%s not added: only files and symlinks "
1353 ui.warn(_("%s not added: only files and symlinks "
1354 "supported currently\n") % uipath(f))
1354 "supported currently\n") % uipath(f))
1355 rejected.append(f)
1355 rejected.append(f)
1356 elif ds[f] in 'amn':
1356 elif ds[f] in 'amn':
1357 ui.warn(_("%s already tracked!\n") % uipath(f))
1357 ui.warn(_("%s already tracked!\n") % uipath(f))
1358 elif ds[f] == 'r':
1358 elif ds[f] == 'r':
1359 ds.normallookup(f)
1359 ds.normallookup(f)
1360 else:
1360 else:
1361 ds.add(f)
1361 ds.add(f)
1362 return rejected
1362 return rejected
1363
1363
1364 def forget(self, files, prefix=""):
1364 def forget(self, files, prefix=""):
1365 with self._repo.wlock():
1365 with self._repo.wlock():
1366 ds = self._repo.dirstate
1366 ds = self._repo.dirstate
1367 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1367 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1368 rejected = []
1368 rejected = []
1369 for f in files:
1369 for f in files:
1370 if f not in self._repo.dirstate:
1370 if f not in self._repo.dirstate:
1371 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1371 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1372 rejected.append(f)
1372 rejected.append(f)
1373 elif self._repo.dirstate[f] != 'a':
1373 elif self._repo.dirstate[f] != 'a':
1374 self._repo.dirstate.remove(f)
1374 self._repo.dirstate.remove(f)
1375 else:
1375 else:
1376 self._repo.dirstate.drop(f)
1376 self._repo.dirstate.drop(f)
1377 return rejected
1377 return rejected
1378
1378
1379 def undelete(self, list):
1379 def undelete(self, list):
1380 pctxs = self.parents()
1380 pctxs = self.parents()
1381 with self._repo.wlock():
1381 with self._repo.wlock():
1382 ds = self._repo.dirstate
1382 ds = self._repo.dirstate
1383 for f in list:
1383 for f in list:
1384 if self._repo.dirstate[f] != 'r':
1384 if self._repo.dirstate[f] != 'r':
1385 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1385 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1386 else:
1386 else:
1387 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1387 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1388 t = fctx.data()
1388 t = fctx.data()
1389 self._repo.wwrite(f, t, fctx.flags())
1389 self._repo.wwrite(f, t, fctx.flags())
1390 self._repo.dirstate.normal(f)
1390 self._repo.dirstate.normal(f)
1391
1391
1392 def copy(self, source, dest):
1392 def copy(self, source, dest):
1393 try:
1393 try:
1394 st = self._repo.wvfs.lstat(dest)
1394 st = self._repo.wvfs.lstat(dest)
1395 except OSError as err:
1395 except OSError as err:
1396 if err.errno != errno.ENOENT:
1396 if err.errno != errno.ENOENT:
1397 raise
1397 raise
1398 self._repo.ui.warn(_("%s does not exist!\n")
1398 self._repo.ui.warn(_("%s does not exist!\n")
1399 % self._repo.dirstate.pathto(dest))
1399 % self._repo.dirstate.pathto(dest))
1400 return
1400 return
1401 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1401 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1402 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1402 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1403 "symbolic link\n")
1403 "symbolic link\n")
1404 % self._repo.dirstate.pathto(dest))
1404 % self._repo.dirstate.pathto(dest))
1405 else:
1405 else:
1406 with self._repo.wlock():
1406 with self._repo.wlock():
1407 if self._repo.dirstate[dest] in '?':
1407 if self._repo.dirstate[dest] in '?':
1408 self._repo.dirstate.add(dest)
1408 self._repo.dirstate.add(dest)
1409 elif self._repo.dirstate[dest] in 'r':
1409 elif self._repo.dirstate[dest] in 'r':
1410 self._repo.dirstate.normallookup(dest)
1410 self._repo.dirstate.normallookup(dest)
1411 self._repo.dirstate.copy(source, dest)
1411 self._repo.dirstate.copy(source, dest)
1412
1412
1413 def match(self, pats=None, include=None, exclude=None, default='glob',
1413 def match(self, pats=None, include=None, exclude=None, default='glob',
1414 listsubrepos=False, badfn=None):
1414 listsubrepos=False, badfn=None):
1415 r = self._repo
1415 r = self._repo
1416
1416
1417 # Only a case insensitive filesystem needs magic to translate user input
1417 # Only a case insensitive filesystem needs magic to translate user input
1418 # to actual case in the filesystem.
1418 # to actual case in the filesystem.
1419 icasefs = not util.fscasesensitive(r.root)
1419 icasefs = not util.fscasesensitive(r.root)
1420 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1420 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1421 default, auditor=r.auditor, ctx=self,
1421 default, auditor=r.auditor, ctx=self,
1422 listsubrepos=listsubrepos, badfn=badfn,
1422 listsubrepos=listsubrepos, badfn=badfn,
1423 icasefs=icasefs)
1423 icasefs=icasefs)
1424
1424
1425 def _filtersuspectsymlink(self, files):
1425 def _filtersuspectsymlink(self, files):
1426 if not files or self._repo.dirstate._checklink:
1426 if not files or self._repo.dirstate._checklink:
1427 return files
1427 return files
1428
1428
1429 # Symlink placeholders may get non-symlink-like contents
1429 # Symlink placeholders may get non-symlink-like contents
1430 # via user error or dereferencing by NFS or Samba servers,
1430 # via user error or dereferencing by NFS or Samba servers,
1431 # so we filter out any placeholders that don't look like a
1431 # so we filter out any placeholders that don't look like a
1432 # symlink
1432 # symlink
1433 sane = []
1433 sane = []
1434 for f in files:
1434 for f in files:
1435 if self.flags(f) == 'l':
1435 if self.flags(f) == 'l':
1436 d = self[f].data()
1436 d = self[f].data()
1437 if (d == '' or len(d) >= 1024 or '\n' in d
1437 if (d == '' or len(d) >= 1024 or '\n' in d
1438 or stringutil.binary(d)):
1438 or stringutil.binary(d)):
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 ' "%s"\n' % f)
1440 ' "%s"\n' % f)
1441 continue
1441 continue
1442 sane.append(f)
1442 sane.append(f)
1443 return sane
1443 return sane
1444
1444
1445 def _checklookup(self, files):
1445 def _checklookup(self, files):
1446 # check for any possibly clean files
1446 # check for any possibly clean files
1447 if not files:
1447 if not files:
1448 return [], [], []
1448 return [], [], []
1449
1449
1450 modified = []
1450 modified = []
1451 deleted = []
1451 deleted = []
1452 fixup = []
1452 fixup = []
1453 pctx = self._parents[0]
1453 pctx = self._parents[0]
1454 # do a full compare of any files that might have changed
1454 # do a full compare of any files that might have changed
1455 for f in sorted(files):
1455 for f in sorted(files):
1456 try:
1456 try:
1457 # This will return True for a file that got replaced by a
1457 # This will return True for a file that got replaced by a
1458 # directory in the interim, but fixing that is pretty hard.
1458 # directory in the interim, but fixing that is pretty hard.
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 or pctx[f].cmp(self[f])):
1460 or pctx[f].cmp(self[f])):
1461 modified.append(f)
1461 modified.append(f)
1462 else:
1462 else:
1463 fixup.append(f)
1463 fixup.append(f)
1464 except (IOError, OSError):
1464 except (IOError, OSError):
1465 # A file become inaccessible in between? Mark it as deleted,
1465 # A file become inaccessible in between? Mark it as deleted,
1466 # matching dirstate behavior (issue5584).
1466 # matching dirstate behavior (issue5584).
1467 # The dirstate has more complex behavior around whether a
1467 # The dirstate has more complex behavior around whether a
1468 # missing file matches a directory, etc, but we don't need to
1468 # missing file matches a directory, etc, but we don't need to
1469 # bother with that: if f has made it to this point, we're sure
1469 # bother with that: if f has made it to this point, we're sure
1470 # it's in the dirstate.
1470 # it's in the dirstate.
1471 deleted.append(f)
1471 deleted.append(f)
1472
1472
1473 return modified, deleted, fixup
1473 return modified, deleted, fixup
1474
1474
1475 def _poststatusfixup(self, status, fixup):
1475 def _poststatusfixup(self, status, fixup):
1476 """update dirstate for files that are actually clean"""
1476 """update dirstate for files that are actually clean"""
1477 poststatus = self._repo.postdsstatus()
1477 poststatus = self._repo.postdsstatus()
1478 if fixup or poststatus:
1478 if fixup or poststatus:
1479 try:
1479 try:
1480 oldid = self._repo.dirstate.identity()
1480 oldid = self._repo.dirstate.identity()
1481
1481
1482 # updating the dirstate is optional
1482 # updating the dirstate is optional
1483 # so we don't wait on the lock
1483 # so we don't wait on the lock
1484 # wlock can invalidate the dirstate, so cache normal _after_
1484 # wlock can invalidate the dirstate, so cache normal _after_
1485 # taking the lock
1485 # taking the lock
1486 with self._repo.wlock(False):
1486 with self._repo.wlock(False):
1487 if self._repo.dirstate.identity() == oldid:
1487 if self._repo.dirstate.identity() == oldid:
1488 if fixup:
1488 if fixup:
1489 normal = self._repo.dirstate.normal
1489 normal = self._repo.dirstate.normal
1490 for f in fixup:
1490 for f in fixup:
1491 normal(f)
1491 normal(f)
1492 # write changes out explicitly, because nesting
1492 # write changes out explicitly, because nesting
1493 # wlock at runtime may prevent 'wlock.release()'
1493 # wlock at runtime may prevent 'wlock.release()'
1494 # after this block from doing so for subsequent
1494 # after this block from doing so for subsequent
1495 # changing files
1495 # changing files
1496 tr = self._repo.currenttransaction()
1496 tr = self._repo.currenttransaction()
1497 self._repo.dirstate.write(tr)
1497 self._repo.dirstate.write(tr)
1498
1498
1499 if poststatus:
1499 if poststatus:
1500 for ps in poststatus:
1500 for ps in poststatus:
1501 ps(self, status)
1501 ps(self, status)
1502 else:
1502 else:
1503 # in this case, writing changes out breaks
1503 # in this case, writing changes out breaks
1504 # consistency, because .hg/dirstate was
1504 # consistency, because .hg/dirstate was
1505 # already changed simultaneously after last
1505 # already changed simultaneously after last
1506 # caching (see also issue5584 for detail)
1506 # caching (see also issue5584 for detail)
1507 self._repo.ui.debug('skip updating dirstate: '
1507 self._repo.ui.debug('skip updating dirstate: '
1508 'identity mismatch\n')
1508 'identity mismatch\n')
1509 except error.LockError:
1509 except error.LockError:
1510 pass
1510 pass
1511 finally:
1511 finally:
1512 # Even if the wlock couldn't be grabbed, clear out the list.
1512 # Even if the wlock couldn't be grabbed, clear out the list.
1513 self._repo.clearpostdsstatus()
1513 self._repo.clearpostdsstatus()
1514
1514
1515 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1515 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1516 '''Gets the status from the dirstate -- internal use only.'''
1516 '''Gets the status from the dirstate -- internal use only.'''
1517 subrepos = []
1517 subrepos = []
1518 if '.hgsub' in self:
1518 if '.hgsub' in self:
1519 subrepos = sorted(self.substate)
1519 subrepos = sorted(self.substate)
1520 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1520 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1521 clean=clean, unknown=unknown)
1521 clean=clean, unknown=unknown)
1522
1522
1523 # check for any possibly clean files
1523 # check for any possibly clean files
1524 fixup = []
1524 fixup = []
1525 if cmp:
1525 if cmp:
1526 modified2, deleted2, fixup = self._checklookup(cmp)
1526 modified2, deleted2, fixup = self._checklookup(cmp)
1527 s.modified.extend(modified2)
1527 s.modified.extend(modified2)
1528 s.deleted.extend(deleted2)
1528 s.deleted.extend(deleted2)
1529
1529
1530 if fixup and clean:
1530 if fixup and clean:
1531 s.clean.extend(fixup)
1531 s.clean.extend(fixup)
1532
1532
1533 self._poststatusfixup(s, fixup)
1533 self._poststatusfixup(s, fixup)
1534
1534
1535 if match.always():
1535 if match.always():
1536 # cache for performance
1536 # cache for performance
1537 if s.unknown or s.ignored or s.clean:
1537 if s.unknown or s.ignored or s.clean:
1538 # "_status" is cached with list*=False in the normal route
1538 # "_status" is cached with list*=False in the normal route
1539 self._status = scmutil.status(s.modified, s.added, s.removed,
1539 self._status = scmutil.status(s.modified, s.added, s.removed,
1540 s.deleted, [], [], [])
1540 s.deleted, [], [], [])
1541 else:
1541 else:
1542 self._status = s
1542 self._status = s
1543
1543
1544 return s
1544 return s
1545
1545
1546 @propertycache
1546 @propertycache
1547 def _manifest(self):
1547 def _manifest(self):
1548 """generate a manifest corresponding to the values in self._status
1548 """generate a manifest corresponding to the values in self._status
1549
1549
1550 This reuse the file nodeid from parent, but we use special node
1550 This reuse the file nodeid from parent, but we use special node
1551 identifiers for added and modified files. This is used by manifests
1551 identifiers for added and modified files. This is used by manifests
1552 merge to see that files are different and by update logic to avoid
1552 merge to see that files are different and by update logic to avoid
1553 deleting newly added files.
1553 deleting newly added files.
1554 """
1554 """
1555 return self._buildstatusmanifest(self._status)
1555 return self._buildstatusmanifest(self._status)
1556
1556
1557 def _buildstatusmanifest(self, status):
1557 def _buildstatusmanifest(self, status):
1558 """Builds a manifest that includes the given status results."""
1558 """Builds a manifest that includes the given status results."""
1559 parents = self.parents()
1559 parents = self.parents()
1560
1560
1561 man = parents[0].manifest().copy()
1561 man = parents[0].manifest().copy()
1562
1562
1563 ff = self._flagfunc
1563 ff = self._flagfunc
1564 for i, l in ((addednodeid, status.added),
1564 for i, l in ((addednodeid, status.added),
1565 (modifiednodeid, status.modified)):
1565 (modifiednodeid, status.modified)):
1566 for f in l:
1566 for f in l:
1567 man[f] = i
1567 man[f] = i
1568 try:
1568 try:
1569 man.setflag(f, ff(f))
1569 man.setflag(f, ff(f))
1570 except OSError:
1570 except OSError:
1571 pass
1571 pass
1572
1572
1573 for f in status.deleted + status.removed:
1573 for f in status.deleted + status.removed:
1574 if f in man:
1574 if f in man:
1575 del man[f]
1575 del man[f]
1576
1576
1577 return man
1577 return man
1578
1578
1579 def _buildstatus(self, other, s, match, listignored, listclean,
1579 def _buildstatus(self, other, s, match, listignored, listclean,
1580 listunknown):
1580 listunknown):
1581 """build a status with respect to another context
1581 """build a status with respect to another context
1582
1582
1583 This includes logic for maintaining the fast path of status when
1583 This includes logic for maintaining the fast path of status when
1584 comparing the working directory against its parent, which is to skip
1584 comparing the working directory against its parent, which is to skip
1585 building a new manifest if self (working directory) is not comparing
1585 building a new manifest if self (working directory) is not comparing
1586 against its parent (repo['.']).
1586 against its parent (repo['.']).
1587 """
1587 """
1588 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1588 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1589 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1589 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1590 # might have accidentally ended up with the entire contents of the file
1590 # might have accidentally ended up with the entire contents of the file
1591 # they are supposed to be linking to.
1591 # they are supposed to be linking to.
1592 s.modified[:] = self._filtersuspectsymlink(s.modified)
1592 s.modified[:] = self._filtersuspectsymlink(s.modified)
1593 if other != self._repo['.']:
1593 if other != self._repo['.']:
1594 s = super(workingctx, self)._buildstatus(other, s, match,
1594 s = super(workingctx, self)._buildstatus(other, s, match,
1595 listignored, listclean,
1595 listignored, listclean,
1596 listunknown)
1596 listunknown)
1597 return s
1597 return s
1598
1598
1599 def _matchstatus(self, other, match):
1599 def _matchstatus(self, other, match):
1600 """override the match method with a filter for directory patterns
1600 """override the match method with a filter for directory patterns
1601
1601
1602 We use inheritance to customize the match.bad method only in cases of
1602 We use inheritance to customize the match.bad method only in cases of
1603 workingctx since it belongs only to the working directory when
1603 workingctx since it belongs only to the working directory when
1604 comparing against the parent changeset.
1604 comparing against the parent changeset.
1605
1605
1606 If we aren't comparing against the working directory's parent, then we
1606 If we aren't comparing against the working directory's parent, then we
1607 just use the default match object sent to us.
1607 just use the default match object sent to us.
1608 """
1608 """
1609 if other != self._repo['.']:
1609 if other != self._repo['.']:
1610 def bad(f, msg):
1610 def bad(f, msg):
1611 # 'f' may be a directory pattern from 'match.files()',
1611 # 'f' may be a directory pattern from 'match.files()',
1612 # so 'f not in ctx1' is not enough
1612 # so 'f not in ctx1' is not enough
1613 if f not in other and not other.hasdir(f):
1613 if f not in other and not other.hasdir(f):
1614 self._repo.ui.warn('%s: %s\n' %
1614 self._repo.ui.warn('%s: %s\n' %
1615 (self._repo.dirstate.pathto(f), msg))
1615 (self._repo.dirstate.pathto(f), msg))
1616 match.bad = bad
1616 match.bad = bad
1617 return match
1617 return match
1618
1618
1619 def markcommitted(self, node):
1619 def markcommitted(self, node):
1620 super(workingctx, self).markcommitted(node)
1620 super(workingctx, self).markcommitted(node)
1621
1621
1622 sparse.aftercommit(self._repo, node)
1622 sparse.aftercommit(self._repo, node)
1623
1623
1624 class committablefilectx(basefilectx):
1624 class committablefilectx(basefilectx):
1625 """A committablefilectx provides common functionality for a file context
1625 """A committablefilectx provides common functionality for a file context
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1628 self._repo = repo
1628 self._repo = repo
1629 self._path = path
1629 self._path = path
1630 self._changeid = None
1630 self._changeid = None
1631 self._filerev = self._filenode = None
1631 self._filerev = self._filenode = None
1632
1632
1633 if filelog is not None:
1633 if filelog is not None:
1634 self._filelog = filelog
1634 self._filelog = filelog
1635 if ctx:
1635 if ctx:
1636 self._changectx = ctx
1636 self._changectx = ctx
1637
1637
1638 def __nonzero__(self):
1638 def __nonzero__(self):
1639 return True
1639 return True
1640
1640
1641 __bool__ = __nonzero__
1641 __bool__ = __nonzero__
1642
1642
1643 def linkrev(self):
1643 def linkrev(self):
1644 # linked to self._changectx no matter if file is modified or not
1644 # linked to self._changectx no matter if file is modified or not
1645 return self.rev()
1645 return self.rev()
1646
1646
1647 def parents(self):
1647 def parents(self):
1648 '''return parent filectxs, following copies if necessary'''
1648 '''return parent filectxs, following copies if necessary'''
1649 def filenode(ctx, path):
1649 def filenode(ctx, path):
1650 return ctx._manifest.get(path, nullid)
1650 return ctx._manifest.get(path, nullid)
1651
1651
1652 path = self._path
1652 path = self._path
1653 fl = self._filelog
1653 fl = self._filelog
1654 pcl = self._changectx._parents
1654 pcl = self._changectx._parents
1655 renamed = self.renamed()
1655 renamed = self.renamed()
1656
1656
1657 if renamed:
1657 if renamed:
1658 pl = [renamed + (None,)]
1658 pl = [renamed + (None,)]
1659 else:
1659 else:
1660 pl = [(path, filenode(pcl[0], path), fl)]
1660 pl = [(path, filenode(pcl[0], path), fl)]
1661
1661
1662 for pc in pcl[1:]:
1662 for pc in pcl[1:]:
1663 pl.append((path, filenode(pc, path), fl))
1663 pl.append((path, filenode(pc, path), fl))
1664
1664
1665 return [self._parentfilectx(p, fileid=n, filelog=l)
1665 return [self._parentfilectx(p, fileid=n, filelog=l)
1666 for p, n, l in pl if n != nullid]
1666 for p, n, l in pl if n != nullid]
1667
1667
1668 def children(self):
1668 def children(self):
1669 return []
1669 return []
1670
1670
1671 class workingfilectx(committablefilectx):
1671 class workingfilectx(committablefilectx):
1672 """A workingfilectx object makes access to data related to a particular
1672 """A workingfilectx object makes access to data related to a particular
1673 file in the working directory convenient."""
1673 file in the working directory convenient."""
1674 def __init__(self, repo, path, filelog=None, workingctx=None):
1674 def __init__(self, repo, path, filelog=None, workingctx=None):
1675 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1675 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1676
1676
1677 @propertycache
1677 @propertycache
1678 def _changectx(self):
1678 def _changectx(self):
1679 return workingctx(self._repo)
1679 return workingctx(self._repo)
1680
1680
1681 def data(self):
1681 def data(self):
1682 return self._repo.wread(self._path)
1682 return self._repo.wread(self._path)
1683 def renamed(self):
1683 def renamed(self):
1684 rp = self._repo.dirstate.copied(self._path)
1684 rp = self._repo.dirstate.copied(self._path)
1685 if not rp:
1685 if not rp:
1686 return None
1686 return None
1687 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1687 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1688
1688
1689 def size(self):
1689 def size(self):
1690 return self._repo.wvfs.lstat(self._path).st_size
1690 return self._repo.wvfs.lstat(self._path).st_size
1691 def date(self):
1691 def date(self):
1692 t, tz = self._changectx.date()
1692 t, tz = self._changectx.date()
1693 try:
1693 try:
1694 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1694 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1695 except OSError as err:
1695 except OSError as err:
1696 if err.errno != errno.ENOENT:
1696 if err.errno != errno.ENOENT:
1697 raise
1697 raise
1698 return (t, tz)
1698 return (t, tz)
1699
1699
1700 def exists(self):
1700 def exists(self):
1701 return self._repo.wvfs.exists(self._path)
1701 return self._repo.wvfs.exists(self._path)
1702
1702
1703 def lexists(self):
1703 def lexists(self):
1704 return self._repo.wvfs.lexists(self._path)
1704 return self._repo.wvfs.lexists(self._path)
1705
1705
1706 def audit(self):
1706 def audit(self):
1707 return self._repo.wvfs.audit(self._path)
1707 return self._repo.wvfs.audit(self._path)
1708
1708
1709 def cmp(self, fctx):
1709 def cmp(self, fctx):
1710 """compare with other file context
1710 """compare with other file context
1711
1711
1712 returns True if different than fctx.
1712 returns True if different than fctx.
1713 """
1713 """
1714 # fctx should be a filectx (not a workingfilectx)
1714 # fctx should be a filectx (not a workingfilectx)
1715 # invert comparison to reuse the same code path
1715 # invert comparison to reuse the same code path
1716 return fctx.cmp(self)
1716 return fctx.cmp(self)
1717
1717
1718 def remove(self, ignoremissing=False):
1718 def remove(self, ignoremissing=False):
1719 """wraps unlink for a repo's working directory"""
1719 """wraps unlink for a repo's working directory"""
1720 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1720 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1721 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1721 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1722 rmdir=rmdir)
1722 rmdir=rmdir)
1723
1723
1724 def write(self, data, flags, backgroundclose=False, **kwargs):
1724 def write(self, data, flags, backgroundclose=False, **kwargs):
1725 """wraps repo.wwrite"""
1725 """wraps repo.wwrite"""
1726 self._repo.wwrite(self._path, data, flags,
1726 self._repo.wwrite(self._path, data, flags,
1727 backgroundclose=backgroundclose,
1727 backgroundclose=backgroundclose,
1728 **kwargs)
1728 **kwargs)
1729
1729
1730 def markcopied(self, src):
1730 def markcopied(self, src):
1731 """marks this file a copy of `src`"""
1731 """marks this file a copy of `src`"""
1732 if self._repo.dirstate[self._path] in "nma":
1732 if self._repo.dirstate[self._path] in "nma":
1733 self._repo.dirstate.copy(src, self._path)
1733 self._repo.dirstate.copy(src, self._path)
1734
1734
1735 def clearunknown(self):
1735 def clearunknown(self):
1736 """Removes conflicting items in the working directory so that
1736 """Removes conflicting items in the working directory so that
1737 ``write()`` can be called successfully.
1737 ``write()`` can be called successfully.
1738 """
1738 """
1739 wvfs = self._repo.wvfs
1739 wvfs = self._repo.wvfs
1740 f = self._path
1740 f = self._path
1741 wvfs.audit(f)
1741 wvfs.audit(f)
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1743 # remove files under the directory as they should already be
1743 # remove files under the directory as they should already be
1744 # warned and backed up
1744 # warned and backed up
1745 if wvfs.isdir(f) and not wvfs.islink(f):
1745 if wvfs.isdir(f) and not wvfs.islink(f):
1746 wvfs.rmtree(f, forcibly=True)
1746 wvfs.rmtree(f, forcibly=True)
1747 for p in reversed(list(util.finddirs(f))):
1747 for p in reversed(list(util.finddirs(f))):
1748 if wvfs.isfileorlink(p):
1748 if wvfs.isfileorlink(p):
1749 wvfs.unlink(p)
1749 wvfs.unlink(p)
1750 break
1750 break
1751 else:
1751 else:
1752 # don't remove files if path conflicts are not processed
1752 # don't remove files if path conflicts are not processed
1753 if wvfs.isdir(f) and not wvfs.islink(f):
1753 if wvfs.isdir(f) and not wvfs.islink(f):
1754 wvfs.removedirs(f)
1754 wvfs.removedirs(f)
1755
1755
1756 def setflags(self, l, x):
1756 def setflags(self, l, x):
1757 self._repo.wvfs.setflags(self._path, l, x)
1757 self._repo.wvfs.setflags(self._path, l, x)
1758
1758
1759 class overlayworkingctx(committablectx):
1759 class overlayworkingctx(committablectx):
1760 """Wraps another mutable context with a write-back cache that can be
1760 """Wraps another mutable context with a write-back cache that can be
1761 converted into a commit context.
1761 converted into a commit context.
1762
1762
1763 self._cache[path] maps to a dict with keys: {
1763 self._cache[path] maps to a dict with keys: {
1764 'exists': bool?
1764 'exists': bool?
1765 'date': date?
1765 'date': date?
1766 'data': str?
1766 'data': str?
1767 'flags': str?
1767 'flags': str?
1768 'copied': str? (path or None)
1768 'copied': str? (path or None)
1769 }
1769 }
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1771 is `False`, the file was deleted.
1771 is `False`, the file was deleted.
1772 """
1772 """
1773
1773
1774 def __init__(self, repo):
1774 def __init__(self, repo):
1775 super(overlayworkingctx, self).__init__(repo)
1775 super(overlayworkingctx, self).__init__(repo)
1776 self.clean()
1776 self.clean()
1777
1777
1778 def setbase(self, wrappedctx):
1778 def setbase(self, wrappedctx):
1779 self._wrappedctx = wrappedctx
1779 self._wrappedctx = wrappedctx
1780 self._parents = [wrappedctx]
1780 self._parents = [wrappedctx]
1781 # Drop old manifest cache as it is now out of date.
1781 # Drop old manifest cache as it is now out of date.
1782 # This is necessary when, e.g., rebasing several nodes with one
1782 # This is necessary when, e.g., rebasing several nodes with one
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1784 util.clearcachedproperty(self, '_manifest')
1784 util.clearcachedproperty(self, '_manifest')
1785
1785
1786 def data(self, path):
1786 def data(self, path):
1787 if self.isdirty(path):
1787 if self.isdirty(path):
1788 if self._cache[path]['exists']:
1788 if self._cache[path]['exists']:
1789 if self._cache[path]['data']:
1789 if self._cache[path]['data']:
1790 return self._cache[path]['data']
1790 return self._cache[path]['data']
1791 else:
1791 else:
1792 # Must fallback here, too, because we only set flags.
1792 # Must fallback here, too, because we only set flags.
1793 return self._wrappedctx[path].data()
1793 return self._wrappedctx[path].data()
1794 else:
1794 else:
1795 raise error.ProgrammingError("No such file or directory: %s" %
1795 raise error.ProgrammingError("No such file or directory: %s" %
1796 path)
1796 path)
1797 else:
1797 else:
1798 return self._wrappedctx[path].data()
1798 return self._wrappedctx[path].data()
1799
1799
1800 @propertycache
1800 @propertycache
1801 def _manifest(self):
1801 def _manifest(self):
1802 parents = self.parents()
1802 parents = self.parents()
1803 man = parents[0].manifest().copy()
1803 man = parents[0].manifest().copy()
1804
1804
1805 flag = self._flagfunc
1805 flag = self._flagfunc
1806 for path in self.added():
1806 for path in self.added():
1807 man[path] = addednodeid
1807 man[path] = addednodeid
1808 man.setflag(path, flag(path))
1808 man.setflag(path, flag(path))
1809 for path in self.modified():
1809 for path in self.modified():
1810 man[path] = modifiednodeid
1810 man[path] = modifiednodeid
1811 man.setflag(path, flag(path))
1811 man.setflag(path, flag(path))
1812 for path in self.removed():
1812 for path in self.removed():
1813 del man[path]
1813 del man[path]
1814 return man
1814 return man
1815
1815
1816 @propertycache
1816 @propertycache
1817 def _flagfunc(self):
1817 def _flagfunc(self):
1818 def f(path):
1818 def f(path):
1819 return self._cache[path]['flags']
1819 return self._cache[path]['flags']
1820 return f
1820 return f
1821
1821
1822 def files(self):
1822 def files(self):
1823 return sorted(self.added() + self.modified() + self.removed())
1823 return sorted(self.added() + self.modified() + self.removed())
1824
1824
1825 def modified(self):
1825 def modified(self):
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1827 self._existsinparent(f)]
1827 self._existsinparent(f)]
1828
1828
1829 def added(self):
1829 def added(self):
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 not self._existsinparent(f)]
1831 not self._existsinparent(f)]
1832
1832
1833 def removed(self):
1833 def removed(self):
1834 return [f for f in self._cache.keys() if
1834 return [f for f in self._cache.keys() if
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1836
1836
1837 def isinmemory(self):
1837 def isinmemory(self):
1838 return True
1838 return True
1839
1839
1840 def filedate(self, path):
1840 def filedate(self, path):
1841 if self.isdirty(path):
1841 if self.isdirty(path):
1842 return self._cache[path]['date']
1842 return self._cache[path]['date']
1843 else:
1843 else:
1844 return self._wrappedctx[path].date()
1844 return self._wrappedctx[path].date()
1845
1845
1846 def markcopied(self, path, origin):
1846 def markcopied(self, path, origin):
1847 if self.isdirty(path):
1847 if self.isdirty(path):
1848 self._cache[path]['copied'] = origin
1848 self._cache[path]['copied'] = origin
1849 else:
1849 else:
1850 raise error.ProgrammingError('markcopied() called on clean context')
1850 raise error.ProgrammingError('markcopied() called on clean context')
1851
1851
1852 def copydata(self, path):
1852 def copydata(self, path):
1853 if self.isdirty(path):
1853 if self.isdirty(path):
1854 return self._cache[path]['copied']
1854 return self._cache[path]['copied']
1855 else:
1855 else:
1856 raise error.ProgrammingError('copydata() called on clean context')
1856 raise error.ProgrammingError('copydata() called on clean context')
1857
1857
1858 def flags(self, path):
1858 def flags(self, path):
1859 if self.isdirty(path):
1859 if self.isdirty(path):
1860 if self._cache[path]['exists']:
1860 if self._cache[path]['exists']:
1861 return self._cache[path]['flags']
1861 return self._cache[path]['flags']
1862 else:
1862 else:
1863 raise error.ProgrammingError("No such file or directory: %s" %
1863 raise error.ProgrammingError("No such file or directory: %s" %
1864 self._path)
1864 self._path)
1865 else:
1865 else:
1866 return self._wrappedctx[path].flags()
1866 return self._wrappedctx[path].flags()
1867
1867
1868 def _existsinparent(self, path):
1868 def _existsinparent(self, path):
1869 try:
1869 try:
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1872 # with an ``exists()`` function.
1872 # with an ``exists()`` function.
1873 self._wrappedctx[path]
1873 self._wrappedctx[path]
1874 return True
1874 return True
1875 except error.ManifestLookupError:
1875 except error.ManifestLookupError:
1876 return False
1876 return False
1877
1877
1878 def _auditconflicts(self, path):
1878 def _auditconflicts(self, path):
1879 """Replicates conflict checks done by wvfs.write().
1879 """Replicates conflict checks done by wvfs.write().
1880
1880
1881 Since we never write to the filesystem and never call `applyupdates` in
1881 Since we never write to the filesystem and never call `applyupdates` in
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1884 """
1884 """
1885 def fail(path, component):
1885 def fail(path, component):
1886 # p1() is the base and we're receiving "writes" for p2()'s
1886 # p1() is the base and we're receiving "writes" for p2()'s
1887 # files.
1887 # files.
1888 if 'l' in self.p1()[component].flags():
1888 if 'l' in self.p1()[component].flags():
1889 raise error.Abort("error: %s conflicts with symlink %s "
1889 raise error.Abort("error: %s conflicts with symlink %s "
1890 "in %s." % (path, component,
1890 "in %s." % (path, component,
1891 self.p1().rev()))
1891 self.p1().rev()))
1892 else:
1892 else:
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1894 "%s." % (path, component,
1894 "%s." % (path, component,
1895 self.p1().rev()))
1895 self.p1().rev()))
1896
1896
1897 # Test that each new directory to be created to write this path from p2
1897 # Test that each new directory to be created to write this path from p2
1898 # is not a file in p1.
1898 # is not a file in p1.
1899 components = path.split('/')
1899 components = path.split('/')
1900 for i in pycompat.xrange(len(components)):
1900 for i in pycompat.xrange(len(components)):
1901 component = "/".join(components[0:i])
1901 component = "/".join(components[0:i])
1902 if component in self.p1() and self._cache[component]['exists']:
1902 if component in self.p1() and self._cache[component]['exists']:
1903 fail(path, component)
1903 fail(path, component)
1904
1904
1905 # Test the other direction -- that this path from p2 isn't a directory
1905 # Test the other direction -- that this path from p2 isn't a directory
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1908 matches = self.p1().manifest().matches(match)
1908 matches = self.p1().manifest().matches(match)
1909 mfiles = matches.keys()
1909 mfiles = matches.keys()
1910 if len(mfiles) > 0:
1910 if len(mfiles) > 0:
1911 if len(mfiles) == 1 and mfiles[0] == path:
1911 if len(mfiles) == 1 and mfiles[0] == path:
1912 return
1912 return
1913 # omit the files which are deleted in current IMM wctx
1913 # omit the files which are deleted in current IMM wctx
1914 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1914 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1915 if not mfiles:
1915 if not mfiles:
1916 return
1916 return
1917 raise error.Abort("error: file '%s' cannot be written because "
1917 raise error.Abort("error: file '%s' cannot be written because "
1918 " '%s/' is a folder in %s (containing %d "
1918 " '%s/' is a folder in %s (containing %d "
1919 "entries: %s)"
1919 "entries: %s)"
1920 % (path, path, self.p1(), len(mfiles),
1920 % (path, path, self.p1(), len(mfiles),
1921 ', '.join(mfiles)))
1921 ', '.join(mfiles)))
1922
1922
1923 def write(self, path, data, flags='', **kwargs):
1923 def write(self, path, data, flags='', **kwargs):
1924 if data is None:
1924 if data is None:
1925 raise error.ProgrammingError("data must be non-None")
1925 raise error.ProgrammingError("data must be non-None")
1926 self._auditconflicts(path)
1926 self._auditconflicts(path)
1927 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1927 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1928 flags=flags)
1928 flags=flags)
1929
1929
1930 def setflags(self, path, l, x):
1930 def setflags(self, path, l, x):
1931 flag = ''
1931 flag = ''
1932 if l:
1932 if l:
1933 flag = 'l'
1933 flag = 'l'
1934 elif x:
1934 elif x:
1935 flag = 'x'
1935 flag = 'x'
1936 self._markdirty(path, exists=True, date=dateutil.makedate(),
1936 self._markdirty(path, exists=True, date=dateutil.makedate(),
1937 flags=flag)
1937 flags=flag)
1938
1938
1939 def remove(self, path):
1939 def remove(self, path):
1940 self._markdirty(path, exists=False)
1940 self._markdirty(path, exists=False)
1941
1941
1942 def exists(self, path):
1942 def exists(self, path):
1943 """exists behaves like `lexists`, but needs to follow symlinks and
1943 """exists behaves like `lexists`, but needs to follow symlinks and
1944 return False if they are broken.
1944 return False if they are broken.
1945 """
1945 """
1946 if self.isdirty(path):
1946 if self.isdirty(path):
1947 # If this path exists and is a symlink, "follow" it by calling
1947 # If this path exists and is a symlink, "follow" it by calling
1948 # exists on the destination path.
1948 # exists on the destination path.
1949 if (self._cache[path]['exists'] and
1949 if (self._cache[path]['exists'] and
1950 'l' in self._cache[path]['flags']):
1950 'l' in self._cache[path]['flags']):
1951 return self.exists(self._cache[path]['data'].strip())
1951 return self.exists(self._cache[path]['data'].strip())
1952 else:
1952 else:
1953 return self._cache[path]['exists']
1953 return self._cache[path]['exists']
1954
1954
1955 return self._existsinparent(path)
1955 return self._existsinparent(path)
1956
1956
1957 def lexists(self, path):
1957 def lexists(self, path):
1958 """lexists returns True if the path exists"""
1958 """lexists returns True if the path exists"""
1959 if self.isdirty(path):
1959 if self.isdirty(path):
1960 return self._cache[path]['exists']
1960 return self._cache[path]['exists']
1961
1961
1962 return self._existsinparent(path)
1962 return self._existsinparent(path)
1963
1963
1964 def size(self, path):
1964 def size(self, path):
1965 if self.isdirty(path):
1965 if self.isdirty(path):
1966 if self._cache[path]['exists']:
1966 if self._cache[path]['exists']:
1967 return len(self._cache[path]['data'])
1967 return len(self._cache[path]['data'])
1968 else:
1968 else:
1969 raise error.ProgrammingError("No such file or directory: %s" %
1969 raise error.ProgrammingError("No such file or directory: %s" %
1970 self._path)
1970 self._path)
1971 return self._wrappedctx[path].size()
1971 return self._wrappedctx[path].size()
1972
1972
1973 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1973 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1974 user=None, editor=None):
1974 user=None, editor=None):
1975 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1975 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1976 committed.
1976 committed.
1977
1977
1978 ``text`` is the commit message.
1978 ``text`` is the commit message.
1979 ``parents`` (optional) are rev numbers.
1979 ``parents`` (optional) are rev numbers.
1980 """
1980 """
1981 # Default parents to the wrapped contexts' if not passed.
1981 # Default parents to the wrapped contexts' if not passed.
1982 if parents is None:
1982 if parents is None:
1983 parents = self._wrappedctx.parents()
1983 parents = self._wrappedctx.parents()
1984 if len(parents) == 1:
1984 if len(parents) == 1:
1985 parents = (parents[0], None)
1985 parents = (parents[0], None)
1986
1986
1987 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1987 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1988 if parents[1] is None:
1988 if parents[1] is None:
1989 parents = (self._repo[parents[0]], None)
1989 parents = (self._repo[parents[0]], None)
1990 else:
1990 else:
1991 parents = (self._repo[parents[0]], self._repo[parents[1]])
1991 parents = (self._repo[parents[0]], self._repo[parents[1]])
1992
1992
1993 files = self._cache.keys()
1993 files = self._cache.keys()
1994 def getfile(repo, memctx, path):
1994 def getfile(repo, memctx, path):
1995 if self._cache[path]['exists']:
1995 if self._cache[path]['exists']:
1996 return memfilectx(repo, memctx, path,
1996 return memfilectx(repo, memctx, path,
1997 self._cache[path]['data'],
1997 self._cache[path]['data'],
1998 'l' in self._cache[path]['flags'],
1998 'l' in self._cache[path]['flags'],
1999 'x' in self._cache[path]['flags'],
1999 'x' in self._cache[path]['flags'],
2000 self._cache[path]['copied'])
2000 self._cache[path]['copied'])
2001 else:
2001 else:
2002 # Returning None, but including the path in `files`, is
2002 # Returning None, but including the path in `files`, is
2003 # necessary for memctx to register a deletion.
2003 # necessary for memctx to register a deletion.
2004 return None
2004 return None
2005 return memctx(self._repo, parents, text, files, getfile, date=date,
2005 return memctx(self._repo, parents, text, files, getfile, date=date,
2006 extra=extra, user=user, branch=branch, editor=editor)
2006 extra=extra, user=user, branch=branch, editor=editor)
2007
2007
2008 def isdirty(self, path):
2008 def isdirty(self, path):
2009 return path in self._cache
2009 return path in self._cache
2010
2010
2011 def isempty(self):
2011 def isempty(self):
2012 # We need to discard any keys that are actually clean before the empty
2012 # We need to discard any keys that are actually clean before the empty
2013 # commit check.
2013 # commit check.
2014 self._compact()
2014 self._compact()
2015 return len(self._cache) == 0
2015 return len(self._cache) == 0
2016
2016
2017 def clean(self):
2017 def clean(self):
2018 self._cache = {}
2018 self._cache = {}
2019
2019
2020 def _compact(self):
2020 def _compact(self):
2021 """Removes keys from the cache that are actually clean, by comparing
2021 """Removes keys from the cache that are actually clean, by comparing
2022 them with the underlying context.
2022 them with the underlying context.
2023
2023
2024 This can occur during the merge process, e.g. by passing --tool :local
2024 This can occur during the merge process, e.g. by passing --tool :local
2025 to resolve a conflict.
2025 to resolve a conflict.
2026 """
2026 """
2027 keys = []
2027 keys = []
2028 for path in self._cache.keys():
2028 for path in self._cache.keys():
2029 cache = self._cache[path]
2029 cache = self._cache[path]
2030 try:
2030 try:
2031 underlying = self._wrappedctx[path]
2031 underlying = self._wrappedctx[path]
2032 if (underlying.data() == cache['data'] and
2032 if (underlying.data() == cache['data'] and
2033 underlying.flags() == cache['flags']):
2033 underlying.flags() == cache['flags']):
2034 keys.append(path)
2034 keys.append(path)
2035 except error.ManifestLookupError:
2035 except error.ManifestLookupError:
2036 # Path not in the underlying manifest (created).
2036 # Path not in the underlying manifest (created).
2037 continue
2037 continue
2038
2038
2039 for path in keys:
2039 for path in keys:
2040 del self._cache[path]
2040 del self._cache[path]
2041 return keys
2041 return keys
2042
2042
2043 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2043 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2044 # data not provided, let's see if we already have some; if not, let's
2044 # data not provided, let's see if we already have some; if not, let's
2045 # grab it from our underlying context, so that we always have data if
2045 # grab it from our underlying context, so that we always have data if
2046 # the file is marked as existing.
2046 # the file is marked as existing.
2047 if exists and data is None:
2047 if exists and data is None:
2048 oldentry = self._cache.get(path) or {}
2048 oldentry = self._cache.get(path) or {}
2049 data = oldentry.get('data') or self._wrappedctx[path].data()
2049 data = oldentry.get('data') or self._wrappedctx[path].data()
2050
2050
2051 self._cache[path] = {
2051 self._cache[path] = {
2052 'exists': exists,
2052 'exists': exists,
2053 'data': data,
2053 'data': data,
2054 'date': date,
2054 'date': date,
2055 'flags': flags,
2055 'flags': flags,
2056 'copied': None,
2056 'copied': None,
2057 }
2057 }
2058
2058
2059 def filectx(self, path, filelog=None):
2059 def filectx(self, path, filelog=None):
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2061 filelog=filelog)
2061 filelog=filelog)
2062
2062
2063 class overlayworkingfilectx(committablefilectx):
2063 class overlayworkingfilectx(committablefilectx):
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 cache, which can be flushed through later by calling ``flush()``."""
2065 cache, which can be flushed through later by calling ``flush()``."""
2066
2066
2067 def __init__(self, repo, path, filelog=None, parent=None):
2067 def __init__(self, repo, path, filelog=None, parent=None):
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 parent)
2069 parent)
2070 self._repo = repo
2070 self._repo = repo
2071 self._parent = parent
2071 self._parent = parent
2072 self._path = path
2072 self._path = path
2073
2073
2074 def cmp(self, fctx):
2074 def cmp(self, fctx):
2075 return self.data() != fctx.data()
2075 return self.data() != fctx.data()
2076
2076
2077 def changectx(self):
2077 def changectx(self):
2078 return self._parent
2078 return self._parent
2079
2079
2080 def data(self):
2080 def data(self):
2081 return self._parent.data(self._path)
2081 return self._parent.data(self._path)
2082
2082
2083 def date(self):
2083 def date(self):
2084 return self._parent.filedate(self._path)
2084 return self._parent.filedate(self._path)
2085
2085
2086 def exists(self):
2086 def exists(self):
2087 return self.lexists()
2087 return self.lexists()
2088
2088
2089 def lexists(self):
2089 def lexists(self):
2090 return self._parent.exists(self._path)
2090 return self._parent.exists(self._path)
2091
2091
2092 def renamed(self):
2092 def renamed(self):
2093 path = self._parent.copydata(self._path)
2093 path = self._parent.copydata(self._path)
2094 if not path:
2094 if not path:
2095 return None
2095 return None
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2097
2097
2098 def size(self):
2098 def size(self):
2099 return self._parent.size(self._path)
2099 return self._parent.size(self._path)
2100
2100
2101 def markcopied(self, origin):
2101 def markcopied(self, origin):
2102 self._parent.markcopied(self._path, origin)
2102 self._parent.markcopied(self._path, origin)
2103
2103
2104 def audit(self):
2104 def audit(self):
2105 pass
2105 pass
2106
2106
2107 def flags(self):
2107 def flags(self):
2108 return self._parent.flags(self._path)
2108 return self._parent.flags(self._path)
2109
2109
2110 def setflags(self, islink, isexec):
2110 def setflags(self, islink, isexec):
2111 return self._parent.setflags(self._path, islink, isexec)
2111 return self._parent.setflags(self._path, islink, isexec)
2112
2112
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2114 return self._parent.write(self._path, data, flags, **kwargs)
2114 return self._parent.write(self._path, data, flags, **kwargs)
2115
2115
2116 def remove(self, ignoremissing=False):
2116 def remove(self, ignoremissing=False):
2117 return self._parent.remove(self._path)
2117 return self._parent.remove(self._path)
2118
2118
2119 def clearunknown(self):
2119 def clearunknown(self):
2120 pass
2120 pass
2121
2121
2122 class workingcommitctx(workingctx):
2122 class workingcommitctx(workingctx):
2123 """A workingcommitctx object makes access to data related to
2123 """A workingcommitctx object makes access to data related to
2124 the revision being committed convenient.
2124 the revision being committed convenient.
2125
2125
2126 This hides changes in the working directory, if they aren't
2126 This hides changes in the working directory, if they aren't
2127 committed in this context.
2127 committed in this context.
2128 """
2128 """
2129 def __init__(self, repo, changes,
2129 def __init__(self, repo, changes,
2130 text="", user=None, date=None, extra=None):
2130 text="", user=None, date=None, extra=None):
2131 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2131 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2132 changes)
2132 changes)
2133
2133
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2135 """Return matched files only in ``self._status``
2135 """Return matched files only in ``self._status``
2136
2136
2137 Uncommitted files appear "clean" via this context, even if
2137 Uncommitted files appear "clean" via this context, even if
2138 they aren't actually so in the working directory.
2138 they aren't actually so in the working directory.
2139 """
2139 """
2140 if clean:
2140 if clean:
2141 clean = [f for f in self._manifest if f not in self._changedset]
2141 clean = [f for f in self._manifest if f not in self._changedset]
2142 else:
2142 else:
2143 clean = []
2143 clean = []
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2145 [f for f in self._status.added if match(f)],
2145 [f for f in self._status.added if match(f)],
2146 [f for f in self._status.removed if match(f)],
2146 [f for f in self._status.removed if match(f)],
2147 [], [], [], clean)
2147 [], [], [], clean)
2148
2148
2149 @propertycache
2149 @propertycache
2150 def _changedset(self):
2150 def _changedset(self):
2151 """Return the set of files changed in this context
2151 """Return the set of files changed in this context
2152 """
2152 """
2153 changed = set(self._status.modified)
2153 changed = set(self._status.modified)
2154 changed.update(self._status.added)
2154 changed.update(self._status.added)
2155 changed.update(self._status.removed)
2155 changed.update(self._status.removed)
2156 return changed
2156 return changed
2157
2157
2158 def makecachingfilectxfn(func):
2158 def makecachingfilectxfn(func):
2159 """Create a filectxfn that caches based on the path.
2159 """Create a filectxfn that caches based on the path.
2160
2160
2161 We can't use util.cachefunc because it uses all arguments as the cache
2161 We can't use util.cachefunc because it uses all arguments as the cache
2162 key and this creates a cycle since the arguments include the repo and
2162 key and this creates a cycle since the arguments include the repo and
2163 memctx.
2163 memctx.
2164 """
2164 """
2165 cache = {}
2165 cache = {}
2166
2166
2167 def getfilectx(repo, memctx, path):
2167 def getfilectx(repo, memctx, path):
2168 if path not in cache:
2168 if path not in cache:
2169 cache[path] = func(repo, memctx, path)
2169 cache[path] = func(repo, memctx, path)
2170 return cache[path]
2170 return cache[path]
2171
2171
2172 return getfilectx
2172 return getfilectx
2173
2173
2174 def memfilefromctx(ctx):
2174 def memfilefromctx(ctx):
2175 """Given a context return a memfilectx for ctx[path]
2175 """Given a context return a memfilectx for ctx[path]
2176
2176
2177 This is a convenience method for building a memctx based on another
2177 This is a convenience method for building a memctx based on another
2178 context.
2178 context.
2179 """
2179 """
2180 def getfilectx(repo, memctx, path):
2180 def getfilectx(repo, memctx, path):
2181 fctx = ctx[path]
2181 fctx = ctx[path]
2182 # this is weird but apparently we only keep track of one parent
2182 # this is weird but apparently we only keep track of one parent
2183 # (why not only store that instead of a tuple?)
2183 # (why not only store that instead of a tuple?)
2184 copied = fctx.renamed()
2184 copied = fctx.renamed()
2185 if copied:
2185 if copied:
2186 copied = copied[0]
2186 copied = copied[0]
2187 return memfilectx(repo, memctx, path, fctx.data(),
2187 return memfilectx(repo, memctx, path, fctx.data(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2189 copied=copied)
2189 copied=copied)
2190
2190
2191 return getfilectx
2191 return getfilectx
2192
2192
2193 def memfilefrompatch(patchstore):
2193 def memfilefrompatch(patchstore):
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2195
2195
2196 This is a convenience method for building a memctx based on a patchstore.
2196 This is a convenience method for building a memctx based on a patchstore.
2197 """
2197 """
2198 def getfilectx(repo, memctx, path):
2198 def getfilectx(repo, memctx, path):
2199 data, mode, copied = patchstore.getfile(path)
2199 data, mode, copied = patchstore.getfile(path)
2200 if data is None:
2200 if data is None:
2201 return None
2201 return None
2202 islink, isexec = mode
2202 islink, isexec = mode
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2204 isexec=isexec, copied=copied)
2204 isexec=isexec, copied=copied)
2205
2205
2206 return getfilectx
2206 return getfilectx
2207
2207
2208 class memctx(committablectx):
2208 class memctx(committablectx):
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2210
2210
2211 Revision information is supplied at initialization time while
2211 Revision information is supplied at initialization time while
2212 related files data and is made available through a callback
2212 related files data and is made available through a callback
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2214 sequence of two parent revisions identifiers (pass None for every
2214 sequence of two parent revisions identifiers (pass None for every
2215 missing parent), 'text' is the commit message and 'files' lists
2215 missing parent), 'text' is the commit message and 'files' lists
2216 names of files touched by the revision (normalized and relative to
2216 names of files touched by the revision (normalized and relative to
2217 repository root).
2217 repository root).
2218
2218
2219 filectxfn(repo, memctx, path) is a callable receiving the
2219 filectxfn(repo, memctx, path) is a callable receiving the
2220 repository, the current memctx object and the normalized path of
2220 repository, the current memctx object and the normalized path of
2221 requested file, relative to repository root. It is fired by the
2221 requested file, relative to repository root. It is fired by the
2222 commit function for every file in 'files', but calls order is
2222 commit function for every file in 'files', but calls order is
2223 undefined. If the file is available in the revision being
2223 undefined. If the file is available in the revision being
2224 committed (updated or added), filectxfn returns a memfilectx
2224 committed (updated or added), filectxfn returns a memfilectx
2225 object. If the file was removed, filectxfn return None for recent
2225 object. If the file was removed, filectxfn return None for recent
2226 Mercurial. Moved files are represented by marking the source file
2226 Mercurial. Moved files are represented by marking the source file
2227 removed and the new file added with copy information (see
2227 removed and the new file added with copy information (see
2228 memfilectx).
2228 memfilectx).
2229
2229
2230 user receives the committer name and defaults to current
2230 user receives the committer name and defaults to current
2231 repository username, date is the commit date in any format
2231 repository username, date is the commit date in any format
2232 supported by dateutil.parsedate() and defaults to current date, extra
2232 supported by dateutil.parsedate() and defaults to current date, extra
2233 is a dictionary of metadata or is left empty.
2233 is a dictionary of metadata or is left empty.
2234 """
2234 """
2235
2235
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2238 # this field to determine what to do in filectxfn.
2238 # this field to determine what to do in filectxfn.
2239 _returnnoneformissingfiles = True
2239 _returnnoneformissingfiles = True
2240
2240
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2242 date=None, extra=None, branch=None, editor=False):
2242 date=None, extra=None, branch=None, editor=False):
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2244 self._rev = None
2244 self._rev = None
2245 self._node = None
2245 self._node = None
2246 parents = [(p or nullid) for p in parents]
2246 parents = [(p or nullid) for p in parents]
2247 p1, p2 = parents
2247 p1, p2 = parents
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2249 files = sorted(set(files))
2249 files = sorted(set(files))
2250 self._files = files
2250 self._files = files
2251 if branch is not None:
2251 if branch is not None:
2252 self._extra['branch'] = encoding.fromlocal(branch)
2252 self._extra['branch'] = encoding.fromlocal(branch)
2253 self.substate = {}
2253 self.substate = {}
2254
2254
2255 if isinstance(filectxfn, patch.filestore):
2255 if isinstance(filectxfn, patch.filestore):
2256 filectxfn = memfilefrompatch(filectxfn)
2256 filectxfn = memfilefrompatch(filectxfn)
2257 elif not callable(filectxfn):
2257 elif not callable(filectxfn):
2258 # if store is not callable, wrap it in a function
2258 # if store is not callable, wrap it in a function
2259 filectxfn = memfilefromctx(filectxfn)
2259 filectxfn = memfilefromctx(filectxfn)
2260
2260
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2263
2263
2264 if editor:
2264 if editor:
2265 self._text = editor(self._repo, self, [])
2265 self._text = editor(self._repo, self, [])
2266 self._repo.savecommitmessage(self._text)
2266 self._repo.savecommitmessage(self._text)
2267
2267
2268 def filectx(self, path, filelog=None):
2268 def filectx(self, path, filelog=None):
2269 """get a file context from the working directory
2269 """get a file context from the working directory
2270
2270
2271 Returns None if file doesn't exist and should be removed."""
2271 Returns None if file doesn't exist and should be removed."""
2272 return self._filectxfn(self._repo, self, path)
2272 return self._filectxfn(self._repo, self, path)
2273
2273
2274 def commit(self):
2274 def commit(self):
2275 """commit context to the repo"""
2275 """commit context to the repo"""
2276 return self._repo.commitctx(self)
2276 return self._repo.commitctx(self)
2277
2277
2278 @propertycache
2278 @propertycache
2279 def _manifest(self):
2279 def _manifest(self):
2280 """generate a manifest based on the return values of filectxfn"""
2280 """generate a manifest based on the return values of filectxfn"""
2281
2281
2282 # keep this simple for now; just worry about p1
2282 # keep this simple for now; just worry about p1
2283 pctx = self._parents[0]
2283 pctx = self._parents[0]
2284 man = pctx.manifest().copy()
2284 man = pctx.manifest().copy()
2285
2285
2286 for f in self._status.modified:
2286 for f in self._status.modified:
2287 man[f] = modifiednodeid
2287 man[f] = modifiednodeid
2288
2288
2289 for f in self._status.added:
2289 for f in self._status.added:
2290 man[f] = addednodeid
2290 man[f] = addednodeid
2291
2291
2292 for f in self._status.removed:
2292 for f in self._status.removed:
2293 if f in man:
2293 if f in man:
2294 del man[f]
2294 del man[f]
2295
2295
2296 return man
2296 return man
2297
2297
2298 @propertycache
2298 @propertycache
2299 def _status(self):
2299 def _status(self):
2300 """Calculate exact status from ``files`` specified at construction
2300 """Calculate exact status from ``files`` specified at construction
2301 """
2301 """
2302 man1 = self.p1().manifest()
2302 man1 = self.p1().manifest()
2303 p2 = self._parents[1]
2303 p2 = self._parents[1]
2304 # "1 < len(self._parents)" can't be used for checking
2304 # "1 < len(self._parents)" can't be used for checking
2305 # existence of the 2nd parent, because "memctx._parents" is
2305 # existence of the 2nd parent, because "memctx._parents" is
2306 # explicitly initialized by the list, of which length is 2.
2306 # explicitly initialized by the list, of which length is 2.
2307 if p2.node() != nullid:
2307 if p2.node() != nullid:
2308 man2 = p2.manifest()
2308 man2 = p2.manifest()
2309 managing = lambda f: f in man1 or f in man2
2309 managing = lambda f: f in man1 or f in man2
2310 else:
2310 else:
2311 managing = lambda f: f in man1
2311 managing = lambda f: f in man1
2312
2312
2313 modified, added, removed = [], [], []
2313 modified, added, removed = [], [], []
2314 for f in self._files:
2314 for f in self._files:
2315 if not managing(f):
2315 if not managing(f):
2316 added.append(f)
2316 added.append(f)
2317 elif self[f]:
2317 elif self[f]:
2318 modified.append(f)
2318 modified.append(f)
2319 else:
2319 else:
2320 removed.append(f)
2320 removed.append(f)
2321
2321
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2323
2323
2324 class memfilectx(committablefilectx):
2324 class memfilectx(committablefilectx):
2325 """memfilectx represents an in-memory file to commit.
2325 """memfilectx represents an in-memory file to commit.
2326
2326
2327 See memctx and committablefilectx for more details.
2327 See memctx and committablefilectx for more details.
2328 """
2328 """
2329 def __init__(self, repo, changectx, path, data, islink=False,
2329 def __init__(self, repo, changectx, path, data, islink=False,
2330 isexec=False, copied=None):
2330 isexec=False, copied=None):
2331 """
2331 """
2332 path is the normalized file path relative to repository root.
2332 path is the normalized file path relative to repository root.
2333 data is the file content as a string.
2333 data is the file content as a string.
2334 islink is True if the file is a symbolic link.
2334 islink is True if the file is a symbolic link.
2335 isexec is True if the file is executable.
2335 isexec is True if the file is executable.
2336 copied is the source file path if current file was copied in the
2336 copied is the source file path if current file was copied in the
2337 revision being committed, or None."""
2337 revision being committed, or None."""
2338 super(memfilectx, self).__init__(repo, path, None, changectx)
2338 super(memfilectx, self).__init__(repo, path, None, changectx)
2339 self._data = data
2339 self._data = data
2340 if islink:
2340 if islink:
2341 self._flags = 'l'
2341 self._flags = 'l'
2342 elif isexec:
2342 elif isexec:
2343 self._flags = 'x'
2343 self._flags = 'x'
2344 else:
2344 else:
2345 self._flags = ''
2345 self._flags = ''
2346 self._copied = None
2346 self._copied = None
2347 if copied:
2347 if copied:
2348 self._copied = (copied, nullid)
2348 self._copied = (copied, nullid)
2349
2349
2350 def data(self):
2350 def data(self):
2351 return self._data
2351 return self._data
2352
2352
2353 def remove(self, ignoremissing=False):
2353 def remove(self, ignoremissing=False):
2354 """wraps unlink for a repo's working directory"""
2354 """wraps unlink for a repo's working directory"""
2355 # need to figure out what to do here
2355 # need to figure out what to do here
2356 del self._changectx[self._path]
2356 del self._changectx[self._path]
2357
2357
2358 def write(self, data, flags, **kwargs):
2358 def write(self, data, flags, **kwargs):
2359 """wraps repo.wwrite"""
2359 """wraps repo.wwrite"""
2360 self._data = data
2360 self._data = data
2361
2361
2362
2362
2363 class metadataonlyctx(committablectx):
2363 class metadataonlyctx(committablectx):
2364 """Like memctx but it's reusing the manifest of different commit.
2364 """Like memctx but it's reusing the manifest of different commit.
2365 Intended to be used by lightweight operations that are creating
2365 Intended to be used by lightweight operations that are creating
2366 metadata-only changes.
2366 metadata-only changes.
2367
2367
2368 Revision information is supplied at initialization time. 'repo' is the
2368 Revision information is supplied at initialization time. 'repo' is the
2369 current localrepo, 'ctx' is original revision which manifest we're reuisng
2369 current localrepo, 'ctx' is original revision which manifest we're reuisng
2370 'parents' is a sequence of two parent revisions identifiers (pass None for
2370 'parents' is a sequence of two parent revisions identifiers (pass None for
2371 every missing parent), 'text' is the commit.
2371 every missing parent), 'text' is the commit.
2372
2372
2373 user receives the committer name and defaults to current repository
2373 user receives the committer name and defaults to current repository
2374 username, date is the commit date in any format supported by
2374 username, date is the commit date in any format supported by
2375 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2375 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2376 metadata or is left empty.
2376 metadata or is left empty.
2377 """
2377 """
2378 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2378 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2379 date=None, extra=None, editor=False):
2379 date=None, extra=None, editor=False):
2380 if text is None:
2380 if text is None:
2381 text = originalctx.description()
2381 text = originalctx.description()
2382 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2382 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2383 self._rev = None
2383 self._rev = None
2384 self._node = None
2384 self._node = None
2385 self._originalctx = originalctx
2385 self._originalctx = originalctx
2386 self._manifestnode = originalctx.manifestnode()
2386 self._manifestnode = originalctx.manifestnode()
2387 if parents is None:
2387 if parents is None:
2388 parents = originalctx.parents()
2388 parents = originalctx.parents()
2389 else:
2389 else:
2390 parents = [repo[p] for p in parents if p is not None]
2390 parents = [repo[p] for p in parents if p is not None]
2391 parents = parents[:]
2391 parents = parents[:]
2392 while len(parents) < 2:
2392 while len(parents) < 2:
2393 parents.append(repo[nullid])
2393 parents.append(repo[nullid])
2394 p1, p2 = self._parents = parents
2394 p1, p2 = self._parents = parents
2395
2395
2396 # sanity check to ensure that the reused manifest parents are
2396 # sanity check to ensure that the reused manifest parents are
2397 # manifests of our commit parents
2397 # manifests of our commit parents
2398 mp1, mp2 = self.manifestctx().parents
2398 mp1, mp2 = self.manifestctx().parents
2399 if p1 != nullid and p1.manifestnode() != mp1:
2399 if p1 != nullid and p1.manifestnode() != mp1:
2400 raise RuntimeError('can\'t reuse the manifest: '
2400 raise RuntimeError('can\'t reuse the manifest: '
2401 'its p1 doesn\'t match the new ctx p1')
2401 'its p1 doesn\'t match the new ctx p1')
2402 if p2 != nullid and p2.manifestnode() != mp2:
2402 if p2 != nullid and p2.manifestnode() != mp2:
2403 raise RuntimeError('can\'t reuse the manifest: '
2403 raise RuntimeError('can\'t reuse the manifest: '
2404 'its p2 doesn\'t match the new ctx p2')
2404 'its p2 doesn\'t match the new ctx p2')
2405
2405
2406 self._files = originalctx.files()
2406 self._files = originalctx.files()
2407 self.substate = {}
2407 self.substate = {}
2408
2408
2409 if editor:
2409 if editor:
2410 self._text = editor(self._repo, self, [])
2410 self._text = editor(self._repo, self, [])
2411 self._repo.savecommitmessage(self._text)
2411 self._repo.savecommitmessage(self._text)
2412
2412
2413 def manifestnode(self):
2413 def manifestnode(self):
2414 return self._manifestnode
2414 return self._manifestnode
2415
2415
2416 @property
2416 @property
2417 def _manifestctx(self):
2417 def _manifestctx(self):
2418 return self._repo.manifestlog[self._manifestnode]
2418 return self._repo.manifestlog[self._manifestnode]
2419
2419
2420 def filectx(self, path, filelog=None):
2420 def filectx(self, path, filelog=None):
2421 return self._originalctx.filectx(path, filelog=filelog)
2421 return self._originalctx.filectx(path, filelog=filelog)
2422
2422
2423 def commit(self):
2423 def commit(self):
2424 """commit context to the repo"""
2424 """commit context to the repo"""
2425 return self._repo.commitctx(self)
2425 return self._repo.commitctx(self)
2426
2426
2427 @property
2427 @property
2428 def _manifest(self):
2428 def _manifest(self):
2429 return self._originalctx.manifest()
2429 return self._originalctx.manifest()
2430
2430
2431 @propertycache
2431 @propertycache
2432 def _status(self):
2432 def _status(self):
2433 """Calculate exact status from ``files`` specified in the ``origctx``
2433 """Calculate exact status from ``files`` specified in the ``origctx``
2434 and parents manifests.
2434 and parents manifests.
2435 """
2435 """
2436 man1 = self.p1().manifest()
2436 man1 = self.p1().manifest()
2437 p2 = self._parents[1]
2437 p2 = self._parents[1]
2438 # "1 < len(self._parents)" can't be used for checking
2438 # "1 < len(self._parents)" can't be used for checking
2439 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2439 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2440 # explicitly initialized by the list, of which length is 2.
2440 # explicitly initialized by the list, of which length is 2.
2441 if p2.node() != nullid:
2441 if p2.node() != nullid:
2442 man2 = p2.manifest()
2442 man2 = p2.manifest()
2443 managing = lambda f: f in man1 or f in man2
2443 managing = lambda f: f in man1 or f in man2
2444 else:
2444 else:
2445 managing = lambda f: f in man1
2445 managing = lambda f: f in man1
2446
2446
2447 modified, added, removed = [], [], []
2447 modified, added, removed = [], [], []
2448 for f in self._files:
2448 for f in self._files:
2449 if not managing(f):
2449 if not managing(f):
2450 added.append(f)
2450 added.append(f)
2451 elif f in self:
2451 elif f in self:
2452 modified.append(f)
2452 modified.append(f)
2453 else:
2453 else:
2454 removed.append(f)
2454 removed.append(f)
2455
2455
2456 return scmutil.status(modified, added, removed, [], [], [], [])
2456 return scmutil.status(modified, added, removed, [], [], [], [])
2457
2457
2458 class arbitraryfilectx(object):
2458 class arbitraryfilectx(object):
2459 """Allows you to use filectx-like functions on a file in an arbitrary
2459 """Allows you to use filectx-like functions on a file in an arbitrary
2460 location on disk, possibly not in the working directory.
2460 location on disk, possibly not in the working directory.
2461 """
2461 """
2462 def __init__(self, path, repo=None):
2462 def __init__(self, path, repo=None):
2463 # Repo is optional because contrib/simplemerge uses this class.
2463 # Repo is optional because contrib/simplemerge uses this class.
2464 self._repo = repo
2464 self._repo = repo
2465 self._path = path
2465 self._path = path
2466
2466
2467 def cmp(self, fctx):
2467 def cmp(self, fctx):
2468 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2468 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2469 # path if either side is a symlink.
2469 # path if either side is a symlink.
2470 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2470 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2471 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2471 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2472 # Add a fast-path for merge if both sides are disk-backed.
2472 # Add a fast-path for merge if both sides are disk-backed.
2473 # Note that filecmp uses the opposite return values (True if same)
2473 # Note that filecmp uses the opposite return values (True if same)
2474 # from our cmp functions (True if different).
2474 # from our cmp functions (True if different).
2475 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2475 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2476 return self.data() != fctx.data()
2476 return self.data() != fctx.data()
2477
2477
2478 def path(self):
2478 def path(self):
2479 return self._path
2479 return self._path
2480
2480
2481 def flags(self):
2481 def flags(self):
2482 return ''
2482 return ''
2483
2483
2484 def data(self):
2484 def data(self):
2485 return util.readfile(self._path)
2485 return util.readfile(self._path)
2486
2486
2487 def decodeddata(self):
2487 def decodeddata(self):
2488 with open(self._path, "rb") as f:
2488 with open(self._path, "rb") as f:
2489 return f.read()
2489 return f.read()
2490
2490
2491 def remove(self):
2491 def remove(self):
2492 util.unlink(self._path)
2492 util.unlink(self._path)
2493
2493
2494 def write(self, data, flags, **kwargs):
2494 def write(self, data, flags, **kwargs):
2495 assert not flags
2495 assert not flags
2496 with open(self._path, "w") as f:
2496 with open(self._path, "w") as f:
2497 f.write(data)
2497 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now