##// END OF EJS Templates
context: only bother looking for broken dirstate for 20-byte changeid...
Martin von Zweigbergk -
r37872:fdd8da79 default
parent child Browse files
Show More
@@ -1,2541 +1,2540
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirfilenodeids,
25 wdirfilenodeids,
26 wdirid,
26 wdirid,
27 )
27 )
28 from . import (
28 from . import (
29 dagop,
29 dagop,
30 encoding,
30 encoding,
31 error,
31 error,
32 fileset,
32 fileset,
33 match as matchmod,
33 match as matchmod,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 subrepoutil,
44 subrepoutil,
45 util,
45 util,
46 )
46 )
47 from .utils import (
47 from .utils import (
48 dateutil,
48 dateutil,
49 stringutil,
49 stringutil,
50 )
50 )
51
51
52 propertycache = util.propertycache
52 propertycache = util.propertycache
53
53
54 nonascii = re.compile(br'[^\x21-\x7f]').search
54 nonascii = re.compile(br'[^\x21-\x7f]').search
55
55
56 class basectx(object):
56 class basectx(object):
57 """A basectx object represents the common logic for its children:
57 """A basectx object represents the common logic for its children:
58 changectx: read-only context that is already present in the repo,
58 changectx: read-only context that is already present in the repo,
59 workingctx: a context that represents the working directory and can
59 workingctx: a context that represents the working directory and can
60 be committed,
60 be committed,
61 memctx: a context that represents changes in-memory and can also
61 memctx: a context that represents changes in-memory and can also
62 be committed."""
62 be committed."""
63
63
64 def __init__(self, repo):
64 def __init__(self, repo):
65 self._repo = repo
65 self._repo = repo
66
66
67 def __bytes__(self):
67 def __bytes__(self):
68 return short(self.node())
68 return short(self.node())
69
69
70 __str__ = encoding.strmethod(__bytes__)
70 __str__ = encoding.strmethod(__bytes__)
71
71
72 def __repr__(self):
72 def __repr__(self):
73 return r"<%s %s>" % (type(self).__name__, str(self))
73 return r"<%s %s>" % (type(self).__name__, str(self))
74
74
75 def __eq__(self, other):
75 def __eq__(self, other):
76 try:
76 try:
77 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
78 except AttributeError:
79 return False
79 return False
80
80
81 def __ne__(self, other):
81 def __ne__(self, other):
82 return not (self == other)
82 return not (self == other)
83
83
84 def __contains__(self, key):
84 def __contains__(self, key):
85 return key in self._manifest
85 return key in self._manifest
86
86
87 def __getitem__(self, key):
87 def __getitem__(self, key):
88 return self.filectx(key)
88 return self.filectx(key)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self._manifest)
91 return iter(self._manifest)
92
92
93 def _buildstatusmanifest(self, status):
93 def _buildstatusmanifest(self, status):
94 """Builds a manifest that includes the given status results, if this is
94 """Builds a manifest that includes the given status results, if this is
95 a working copy context. For non-working copy contexts, it just returns
95 a working copy context. For non-working copy contexts, it just returns
96 the normal manifest."""
96 the normal manifest."""
97 return self.manifest()
97 return self.manifest()
98
98
99 def _matchstatus(self, other, match):
99 def _matchstatus(self, other, match):
100 """This internal method provides a way for child objects to override the
100 """This internal method provides a way for child objects to override the
101 match operator.
101 match operator.
102 """
102 """
103 return match
103 return match
104
104
105 def _buildstatus(self, other, s, match, listignored, listclean,
105 def _buildstatus(self, other, s, match, listignored, listclean,
106 listunknown):
106 listunknown):
107 """build a status with respect to another context"""
107 """build a status with respect to another context"""
108 # Load earliest manifest first for caching reasons. More specifically,
108 # Load earliest manifest first for caching reasons. More specifically,
109 # if you have revisions 1000 and 1001, 1001 is probably stored as a
109 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
110 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # 1000 and cache it so that when you read 1001, we just need to apply a
111 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # delta to what's in the cache. So that's one full reconstruction + one
112 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta application.
113 # delta application.
114 mf2 = None
114 mf2 = None
115 if self.rev() is not None and self.rev() < other.rev():
115 if self.rev() is not None and self.rev() < other.rev():
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117 mf1 = other._buildstatusmanifest(s)
117 mf1 = other._buildstatusmanifest(s)
118 if mf2 is None:
118 if mf2 is None:
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120
120
121 modified, added = [], []
121 modified, added = [], []
122 removed = []
122 removed = []
123 clean = []
123 clean = []
124 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
124 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deletedset = set(deleted)
125 deletedset = set(deleted)
126 d = mf1.diff(mf2, match=match, clean=listclean)
126 d = mf1.diff(mf2, match=match, clean=listclean)
127 for fn, value in d.iteritems():
127 for fn, value in d.iteritems():
128 if fn in deletedset:
128 if fn in deletedset:
129 continue
129 continue
130 if value is None:
130 if value is None:
131 clean.append(fn)
131 clean.append(fn)
132 continue
132 continue
133 (node1, flag1), (node2, flag2) = value
133 (node1, flag1), (node2, flag2) = value
134 if node1 is None:
134 if node1 is None:
135 added.append(fn)
135 added.append(fn)
136 elif node2 is None:
136 elif node2 is None:
137 removed.append(fn)
137 removed.append(fn)
138 elif flag1 != flag2:
138 elif flag1 != flag2:
139 modified.append(fn)
139 modified.append(fn)
140 elif node2 not in wdirfilenodeids:
140 elif node2 not in wdirfilenodeids:
141 # When comparing files between two commits, we save time by
141 # When comparing files between two commits, we save time by
142 # not comparing the file contents when the nodeids differ.
142 # not comparing the file contents when the nodeids differ.
143 # Note that this means we incorrectly report a reverted change
143 # Note that this means we incorrectly report a reverted change
144 # to a file as a modification.
144 # to a file as a modification.
145 modified.append(fn)
145 modified.append(fn)
146 elif self[fn].cmp(other[fn]):
146 elif self[fn].cmp(other[fn]):
147 modified.append(fn)
147 modified.append(fn)
148 else:
148 else:
149 clean.append(fn)
149 clean.append(fn)
150
150
151 if removed:
151 if removed:
152 # need to filter files if they are already reported as removed
152 # need to filter files if they are already reported as removed
153 unknown = [fn for fn in unknown if fn not in mf1 and
153 unknown = [fn for fn in unknown if fn not in mf1 and
154 (not match or match(fn))]
154 (not match or match(fn))]
155 ignored = [fn for fn in ignored if fn not in mf1 and
155 ignored = [fn for fn in ignored if fn not in mf1 and
156 (not match or match(fn))]
156 (not match or match(fn))]
157 # if they're deleted, don't report them as removed
157 # if they're deleted, don't report them as removed
158 removed = [fn for fn in removed if fn not in deletedset]
158 removed = [fn for fn in removed if fn not in deletedset]
159
159
160 return scmutil.status(modified, added, removed, deleted, unknown,
160 return scmutil.status(modified, added, removed, deleted, unknown,
161 ignored, clean)
161 ignored, clean)
162
162
163 @propertycache
163 @propertycache
164 def substate(self):
164 def substate(self):
165 return subrepoutil.state(self, self._repo.ui)
165 return subrepoutil.state(self, self._repo.ui)
166
166
167 def subrev(self, subpath):
167 def subrev(self, subpath):
168 return self.substate[subpath][1]
168 return self.substate[subpath][1]
169
169
170 def rev(self):
170 def rev(self):
171 return self._rev
171 return self._rev
172 def node(self):
172 def node(self):
173 return self._node
173 return self._node
174 def hex(self):
174 def hex(self):
175 return hex(self.node())
175 return hex(self.node())
176 def manifest(self):
176 def manifest(self):
177 return self._manifest
177 return self._manifest
178 def manifestctx(self):
178 def manifestctx(self):
179 return self._manifestctx
179 return self._manifestctx
180 def repo(self):
180 def repo(self):
181 return self._repo
181 return self._repo
182 def phasestr(self):
182 def phasestr(self):
183 return phases.phasenames[self.phase()]
183 return phases.phasenames[self.phase()]
184 def mutable(self):
184 def mutable(self):
185 return self.phase() > phases.public
185 return self.phase() > phases.public
186
186
187 def getfileset(self, expr):
187 def getfileset(self, expr):
188 return fileset.getfileset(self, expr)
188 return fileset.getfileset(self, expr)
189
189
190 def obsolete(self):
190 def obsolete(self):
191 """True if the changeset is obsolete"""
191 """True if the changeset is obsolete"""
192 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
192 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193
193
194 def extinct(self):
194 def extinct(self):
195 """True if the changeset is extinct"""
195 """True if the changeset is extinct"""
196 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
196 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197
197
198 def orphan(self):
198 def orphan(self):
199 """True if the changeset is not obsolete but it's ancestor are"""
199 """True if the changeset is not obsolete but it's ancestor are"""
200 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
200 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201
201
202 def phasedivergent(self):
202 def phasedivergent(self):
203 """True if the changeset try to be a successor of a public changeset
203 """True if the changeset try to be a successor of a public changeset
204
204
205 Only non-public and non-obsolete changesets may be bumped.
205 Only non-public and non-obsolete changesets may be bumped.
206 """
206 """
207 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
207 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208
208
209 def contentdivergent(self):
209 def contentdivergent(self):
210 """Is a successors of a changeset with multiple possible successors set
210 """Is a successors of a changeset with multiple possible successors set
211
211
212 Only non-public and non-obsolete changesets may be divergent.
212 Only non-public and non-obsolete changesets may be divergent.
213 """
213 """
214 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
214 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215
215
216 def isunstable(self):
216 def isunstable(self):
217 """True if the changeset is either unstable, bumped or divergent"""
217 """True if the changeset is either unstable, bumped or divergent"""
218 return self.orphan() or self.phasedivergent() or self.contentdivergent()
218 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219
219
220 def instabilities(self):
220 def instabilities(self):
221 """return the list of instabilities affecting this changeset.
221 """return the list of instabilities affecting this changeset.
222
222
223 Instabilities are returned as strings. possible values are:
223 Instabilities are returned as strings. possible values are:
224 - orphan,
224 - orphan,
225 - phase-divergent,
225 - phase-divergent,
226 - content-divergent.
226 - content-divergent.
227 """
227 """
228 instabilities = []
228 instabilities = []
229 if self.orphan():
229 if self.orphan():
230 instabilities.append('orphan')
230 instabilities.append('orphan')
231 if self.phasedivergent():
231 if self.phasedivergent():
232 instabilities.append('phase-divergent')
232 instabilities.append('phase-divergent')
233 if self.contentdivergent():
233 if self.contentdivergent():
234 instabilities.append('content-divergent')
234 instabilities.append('content-divergent')
235 return instabilities
235 return instabilities
236
236
237 def parents(self):
237 def parents(self):
238 """return contexts for each parent changeset"""
238 """return contexts for each parent changeset"""
239 return self._parents
239 return self._parents
240
240
241 def p1(self):
241 def p1(self):
242 return self._parents[0]
242 return self._parents[0]
243
243
244 def p2(self):
244 def p2(self):
245 parents = self._parents
245 parents = self._parents
246 if len(parents) == 2:
246 if len(parents) == 2:
247 return parents[1]
247 return parents[1]
248 return changectx(self._repo, nullrev)
248 return changectx(self._repo, nullrev)
249
249
250 def _fileinfo(self, path):
250 def _fileinfo(self, path):
251 if r'_manifest' in self.__dict__:
251 if r'_manifest' in self.__dict__:
252 try:
252 try:
253 return self._manifest[path], self._manifest.flags(path)
253 return self._manifest[path], self._manifest.flags(path)
254 except KeyError:
254 except KeyError:
255 raise error.ManifestLookupError(self._node, path,
255 raise error.ManifestLookupError(self._node, path,
256 _('not found in manifest'))
256 _('not found in manifest'))
257 if r'_manifestdelta' in self.__dict__ or path in self.files():
257 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 if path in self._manifestdelta:
258 if path in self._manifestdelta:
259 return (self._manifestdelta[path],
259 return (self._manifestdelta[path],
260 self._manifestdelta.flags(path))
260 self._manifestdelta.flags(path))
261 mfl = self._repo.manifestlog
261 mfl = self._repo.manifestlog
262 try:
262 try:
263 node, flag = mfl[self._changeset.manifest].find(path)
263 node, flag = mfl[self._changeset.manifest].find(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267
267
268 return node, flag
268 return node, flag
269
269
270 def filenode(self, path):
270 def filenode(self, path):
271 return self._fileinfo(path)[0]
271 return self._fileinfo(path)[0]
272
272
273 def flags(self, path):
273 def flags(self, path):
274 try:
274 try:
275 return self._fileinfo(path)[1]
275 return self._fileinfo(path)[1]
276 except error.LookupError:
276 except error.LookupError:
277 return ''
277 return ''
278
278
279 def sub(self, path, allowcreate=True):
279 def sub(self, path, allowcreate=True):
280 '''return a subrepo for the stored revision of path, never wdir()'''
280 '''return a subrepo for the stored revision of path, never wdir()'''
281 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282
282
283 def nullsub(self, path, pctx):
283 def nullsub(self, path, pctx):
284 return subrepo.nullsubrepo(self, path, pctx)
284 return subrepo.nullsubrepo(self, path, pctx)
285
285
286 def workingsub(self, path):
286 def workingsub(self, path):
287 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 context.
288 context.
289 '''
289 '''
290 return subrepo.subrepo(self, path, allowwdir=True)
290 return subrepo.subrepo(self, path, allowwdir=True)
291
291
292 def match(self, pats=None, include=None, exclude=None, default='glob',
292 def match(self, pats=None, include=None, exclude=None, default='glob',
293 listsubrepos=False, badfn=None):
293 listsubrepos=False, badfn=None):
294 r = self._repo
294 r = self._repo
295 return matchmod.match(r.root, r.getcwd(), pats,
295 return matchmod.match(r.root, r.getcwd(), pats,
296 include, exclude, default,
296 include, exclude, default,
297 auditor=r.nofsauditor, ctx=self,
297 auditor=r.nofsauditor, ctx=self,
298 listsubrepos=listsubrepos, badfn=badfn)
298 listsubrepos=listsubrepos, badfn=badfn)
299
299
300 def diff(self, ctx2=None, match=None, **opts):
300 def diff(self, ctx2=None, match=None, **opts):
301 """Returns a diff generator for the given contexts and matcher"""
301 """Returns a diff generator for the given contexts and matcher"""
302 if ctx2 is None:
302 if ctx2 is None:
303 ctx2 = self.p1()
303 ctx2 = self.p1()
304 if ctx2 is not None:
304 if ctx2 is not None:
305 ctx2 = self._repo[ctx2]
305 ctx2 = self._repo[ctx2]
306 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
306 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 for l in r:
374 for l in r:
375 l.sort()
375 l.sort()
376
376
377 return r
377 return r
378
378
379 class changectx(basectx):
379 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
382 the repo."""
382 the repo."""
383 def __init__(self, repo, changeid='.'):
383 def __init__(self, repo, changeid='.'):
384 """changeid is a revision number, node, or tag"""
384 """changeid is a revision number, node, or tag"""
385 super(changectx, self).__init__(repo)
385 super(changectx, self).__init__(repo)
386
386
387 try:
387 try:
388 if isinstance(changeid, int):
388 if isinstance(changeid, int):
389 self._node = repo.changelog.node(changeid)
389 self._node = repo.changelog.node(changeid)
390 self._rev = changeid
390 self._rev = changeid
391 return
391 return
392 if changeid == 'null':
392 if changeid == 'null':
393 self._node = nullid
393 self._node = nullid
394 self._rev = nullrev
394 self._rev = nullrev
395 return
395 return
396 if changeid == 'tip':
396 if changeid == 'tip':
397 self._node = repo.changelog.tip()
397 self._node = repo.changelog.tip()
398 self._rev = repo.changelog.rev(self._node)
398 self._rev = repo.changelog.rev(self._node)
399 return
399 return
400 if (changeid == '.'
400 if (changeid == '.'
401 or repo.local() and changeid == repo.dirstate.p1()):
401 or repo.local() and changeid == repo.dirstate.p1()):
402 # this is a hack to delay/avoid loading obsmarkers
402 # this is a hack to delay/avoid loading obsmarkers
403 # when we know that '.' won't be hidden
403 # when we know that '.' won't be hidden
404 self._node = repo.dirstate.p1()
404 self._node = repo.dirstate.p1()
405 self._rev = repo.unfiltered().changelog.rev(self._node)
405 self._rev = repo.unfiltered().changelog.rev(self._node)
406 return
406 return
407 if len(changeid) == 20:
407 if len(changeid) == 20:
408 try:
408 try:
409 self._node = changeid
409 self._node = changeid
410 self._rev = repo.changelog.rev(changeid)
410 self._rev = repo.changelog.rev(changeid)
411 return
411 return
412 except error.FilteredLookupError:
412 except error.FilteredLookupError:
413 raise
413 raise
414 except LookupError:
414 except LookupError:
415 pass
415 # check if it might have come from damaged dirstate
416 #
417 # XXX we could avoid the unfiltered if we had a recognizable
418 # exception for filtered changeset access
419 if (repo.local()
420 and changeid in repo.unfiltered().dirstate.parents()):
421 msg = _("working directory has unknown parent '%s'!")
422 raise error.Abort(msg % short(changeid))
416
423
417 if len(changeid) == 40:
424 if len(changeid) == 40:
418 try:
425 try:
419 self._node = bin(changeid)
426 self._node = bin(changeid)
420 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
421 return
428 return
422 except error.FilteredLookupError:
429 except error.FilteredLookupError:
423 raise
430 raise
424 except (TypeError, LookupError):
431 except (TypeError, LookupError):
425 pass
432 pass
426
433
427 # lookup failed
434 # lookup failed
428 # check if it might have come from damaged dirstate
429 #
430 # XXX we could avoid the unfiltered if we had a recognizable
431 # exception for filtered changeset access
432 if (repo.local()
433 and changeid in repo.unfiltered().dirstate.parents()):
434 msg = _("working directory has unknown parent '%s'!")
435 raise error.Abort(msg % short(changeid))
436 try:
435 try:
437 if len(changeid) == 20 and nonascii(changeid):
436 if len(changeid) == 20 and nonascii(changeid):
438 changeid = hex(changeid)
437 changeid = hex(changeid)
439 except TypeError:
438 except TypeError:
440 pass
439 pass
441 except (error.FilteredIndexError, error.FilteredLookupError):
440 except (error.FilteredIndexError, error.FilteredLookupError):
442 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
441 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
443 % changeid)
442 % changeid)
444 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
445 raise
444 raise
446 except IndexError:
445 except IndexError:
447 pass
446 pass
448 raise error.RepoLookupError(
447 raise error.RepoLookupError(
449 _("unknown revision '%s'") % changeid)
448 _("unknown revision '%s'") % changeid)
450
449
451 def __hash__(self):
450 def __hash__(self):
452 try:
451 try:
453 return hash(self._rev)
452 return hash(self._rev)
454 except AttributeError:
453 except AttributeError:
455 return id(self)
454 return id(self)
456
455
457 def __nonzero__(self):
456 def __nonzero__(self):
458 return self._rev != nullrev
457 return self._rev != nullrev
459
458
460 __bool__ = __nonzero__
459 __bool__ = __nonzero__
461
460
462 @propertycache
461 @propertycache
463 def _changeset(self):
462 def _changeset(self):
464 return self._repo.changelog.changelogrevision(self.rev())
463 return self._repo.changelog.changelogrevision(self.rev())
465
464
466 @propertycache
465 @propertycache
467 def _manifest(self):
466 def _manifest(self):
468 return self._manifestctx.read()
467 return self._manifestctx.read()
469
468
470 @property
469 @property
471 def _manifestctx(self):
470 def _manifestctx(self):
472 return self._repo.manifestlog[self._changeset.manifest]
471 return self._repo.manifestlog[self._changeset.manifest]
473
472
474 @propertycache
473 @propertycache
475 def _manifestdelta(self):
474 def _manifestdelta(self):
476 return self._manifestctx.readdelta()
475 return self._manifestctx.readdelta()
477
476
478 @propertycache
477 @propertycache
479 def _parents(self):
478 def _parents(self):
480 repo = self._repo
479 repo = self._repo
481 p1, p2 = repo.changelog.parentrevs(self._rev)
480 p1, p2 = repo.changelog.parentrevs(self._rev)
482 if p2 == nullrev:
481 if p2 == nullrev:
483 return [changectx(repo, p1)]
482 return [changectx(repo, p1)]
484 return [changectx(repo, p1), changectx(repo, p2)]
483 return [changectx(repo, p1), changectx(repo, p2)]
485
484
486 def changeset(self):
485 def changeset(self):
487 c = self._changeset
486 c = self._changeset
488 return (
487 return (
489 c.manifest,
488 c.manifest,
490 c.user,
489 c.user,
491 c.date,
490 c.date,
492 c.files,
491 c.files,
493 c.description,
492 c.description,
494 c.extra,
493 c.extra,
495 )
494 )
496 def manifestnode(self):
495 def manifestnode(self):
497 return self._changeset.manifest
496 return self._changeset.manifest
498
497
499 def user(self):
498 def user(self):
500 return self._changeset.user
499 return self._changeset.user
501 def date(self):
500 def date(self):
502 return self._changeset.date
501 return self._changeset.date
503 def files(self):
502 def files(self):
504 return self._changeset.files
503 return self._changeset.files
505 def description(self):
504 def description(self):
506 return self._changeset.description
505 return self._changeset.description
507 def branch(self):
506 def branch(self):
508 return encoding.tolocal(self._changeset.extra.get("branch"))
507 return encoding.tolocal(self._changeset.extra.get("branch"))
509 def closesbranch(self):
508 def closesbranch(self):
510 return 'close' in self._changeset.extra
509 return 'close' in self._changeset.extra
511 def extra(self):
510 def extra(self):
512 """Return a dict of extra information."""
511 """Return a dict of extra information."""
513 return self._changeset.extra
512 return self._changeset.extra
514 def tags(self):
513 def tags(self):
515 """Return a list of byte tag names"""
514 """Return a list of byte tag names"""
516 return self._repo.nodetags(self._node)
515 return self._repo.nodetags(self._node)
517 def bookmarks(self):
516 def bookmarks(self):
518 """Return a list of byte bookmark names."""
517 """Return a list of byte bookmark names."""
519 return self._repo.nodebookmarks(self._node)
518 return self._repo.nodebookmarks(self._node)
520 def phase(self):
519 def phase(self):
521 return self._repo._phasecache.phase(self._repo, self._rev)
520 return self._repo._phasecache.phase(self._repo, self._rev)
522 def hidden(self):
521 def hidden(self):
523 return self._rev in repoview.filterrevs(self._repo, 'visible')
522 return self._rev in repoview.filterrevs(self._repo, 'visible')
524
523
525 def isinmemory(self):
524 def isinmemory(self):
526 return False
525 return False
527
526
528 def children(self):
527 def children(self):
529 """return list of changectx contexts for each child changeset.
528 """return list of changectx contexts for each child changeset.
530
529
531 This returns only the immediate child changesets. Use descendants() to
530 This returns only the immediate child changesets. Use descendants() to
532 recursively walk children.
531 recursively walk children.
533 """
532 """
534 c = self._repo.changelog.children(self._node)
533 c = self._repo.changelog.children(self._node)
535 return [changectx(self._repo, x) for x in c]
534 return [changectx(self._repo, x) for x in c]
536
535
537 def ancestors(self):
536 def ancestors(self):
538 for a in self._repo.changelog.ancestors([self._rev]):
537 for a in self._repo.changelog.ancestors([self._rev]):
539 yield changectx(self._repo, a)
538 yield changectx(self._repo, a)
540
539
541 def descendants(self):
540 def descendants(self):
542 """Recursively yield all children of the changeset.
541 """Recursively yield all children of the changeset.
543
542
544 For just the immediate children, use children()
543 For just the immediate children, use children()
545 """
544 """
546 for d in self._repo.changelog.descendants([self._rev]):
545 for d in self._repo.changelog.descendants([self._rev]):
547 yield changectx(self._repo, d)
546 yield changectx(self._repo, d)
548
547
549 def filectx(self, path, fileid=None, filelog=None):
548 def filectx(self, path, fileid=None, filelog=None):
550 """get a file context from this changeset"""
549 """get a file context from this changeset"""
551 if fileid is None:
550 if fileid is None:
552 fileid = self.filenode(path)
551 fileid = self.filenode(path)
553 return filectx(self._repo, path, fileid=fileid,
552 return filectx(self._repo, path, fileid=fileid,
554 changectx=self, filelog=filelog)
553 changectx=self, filelog=filelog)
555
554
556 def ancestor(self, c2, warn=False):
555 def ancestor(self, c2, warn=False):
557 """return the "best" ancestor context of self and c2
556 """return the "best" ancestor context of self and c2
558
557
559 If there are multiple candidates, it will show a message and check
558 If there are multiple candidates, it will show a message and check
560 merge.preferancestor configuration before falling back to the
559 merge.preferancestor configuration before falling back to the
561 revlog ancestor."""
560 revlog ancestor."""
562 # deal with workingctxs
561 # deal with workingctxs
563 n2 = c2._node
562 n2 = c2._node
564 if n2 is None:
563 if n2 is None:
565 n2 = c2._parents[0]._node
564 n2 = c2._parents[0]._node
566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
565 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 if not cahs:
566 if not cahs:
568 anc = nullid
567 anc = nullid
569 elif len(cahs) == 1:
568 elif len(cahs) == 1:
570 anc = cahs[0]
569 anc = cahs[0]
571 else:
570 else:
572 # experimental config: merge.preferancestor
571 # experimental config: merge.preferancestor
573 for r in self._repo.ui.configlist('merge', 'preferancestor'):
572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
574 try:
573 try:
575 ctx = scmutil.revsymbol(self._repo, r)
574 ctx = scmutil.revsymbol(self._repo, r)
576 except error.RepoLookupError:
575 except error.RepoLookupError:
577 continue
576 continue
578 anc = ctx.node()
577 anc = ctx.node()
579 if anc in cahs:
578 if anc in cahs:
580 break
579 break
581 else:
580 else:
582 anc = self._repo.changelog.ancestor(self._node, n2)
581 anc = self._repo.changelog.ancestor(self._node, n2)
583 if warn:
582 if warn:
584 self._repo.ui.status(
583 self._repo.ui.status(
585 (_("note: using %s as ancestor of %s and %s\n") %
584 (_("note: using %s as ancestor of %s and %s\n") %
586 (short(anc), short(self._node), short(n2))) +
585 (short(anc), short(self._node), short(n2))) +
587 ''.join(_(" alternatively, use --config "
586 ''.join(_(" alternatively, use --config "
588 "merge.preferancestor=%s\n") %
587 "merge.preferancestor=%s\n") %
589 short(n) for n in sorted(cahs) if n != anc))
588 short(n) for n in sorted(cahs) if n != anc))
590 return changectx(self._repo, anc)
589 return changectx(self._repo, anc)
591
590
592 def descendant(self, other):
591 def descendant(self, other):
593 """True if other is descendant of this changeset"""
592 """True if other is descendant of this changeset"""
594 return self._repo.changelog.descendant(self._rev, other._rev)
593 return self._repo.changelog.descendant(self._rev, other._rev)
595
594
596 def walk(self, match):
595 def walk(self, match):
597 '''Generates matching file names.'''
596 '''Generates matching file names.'''
598
597
599 # Wrap match.bad method to have message with nodeid
598 # Wrap match.bad method to have message with nodeid
600 def bad(fn, msg):
599 def bad(fn, msg):
601 # The manifest doesn't know about subrepos, so don't complain about
600 # The manifest doesn't know about subrepos, so don't complain about
602 # paths into valid subrepos.
601 # paths into valid subrepos.
603 if any(fn == s or fn.startswith(s + '/')
602 if any(fn == s or fn.startswith(s + '/')
604 for s in self.substate):
603 for s in self.substate):
605 return
604 return
606 match.bad(fn, _('no such file in rev %s') % self)
605 match.bad(fn, _('no such file in rev %s') % self)
607
606
608 m = matchmod.badmatch(match, bad)
607 m = matchmod.badmatch(match, bad)
609 return self._manifest.walk(m)
608 return self._manifest.walk(m)
610
609
611 def matches(self, match):
610 def matches(self, match):
612 return self.walk(match)
611 return self.walk(match)
613
612
614 class basefilectx(object):
613 class basefilectx(object):
615 """A filecontext object represents the common logic for its children:
614 """A filecontext object represents the common logic for its children:
616 filectx: read-only access to a filerevision that is already present
615 filectx: read-only access to a filerevision that is already present
617 in the repo,
616 in the repo,
618 workingfilectx: a filecontext that represents files from the working
617 workingfilectx: a filecontext that represents files from the working
619 directory,
618 directory,
620 memfilectx: a filecontext that represents files in-memory,
619 memfilectx: a filecontext that represents files in-memory,
621 overlayfilectx: duplicate another filecontext with some fields overridden.
620 overlayfilectx: duplicate another filecontext with some fields overridden.
622 """
621 """
623 @propertycache
622 @propertycache
624 def _filelog(self):
623 def _filelog(self):
625 return self._repo.file(self._path)
624 return self._repo.file(self._path)
626
625
627 @propertycache
626 @propertycache
628 def _changeid(self):
627 def _changeid(self):
629 if r'_changeid' in self.__dict__:
628 if r'_changeid' in self.__dict__:
630 return self._changeid
629 return self._changeid
631 elif r'_changectx' in self.__dict__:
630 elif r'_changectx' in self.__dict__:
632 return self._changectx.rev()
631 return self._changectx.rev()
633 elif r'_descendantrev' in self.__dict__:
632 elif r'_descendantrev' in self.__dict__:
634 # this file context was created from a revision with a known
633 # this file context was created from a revision with a known
635 # descendant, we can (lazily) correct for linkrev aliases
634 # descendant, we can (lazily) correct for linkrev aliases
636 return self._adjustlinkrev(self._descendantrev)
635 return self._adjustlinkrev(self._descendantrev)
637 else:
636 else:
638 return self._filelog.linkrev(self._filerev)
637 return self._filelog.linkrev(self._filerev)
639
638
640 @propertycache
639 @propertycache
641 def _filenode(self):
640 def _filenode(self):
642 if r'_fileid' in self.__dict__:
641 if r'_fileid' in self.__dict__:
643 return self._filelog.lookup(self._fileid)
642 return self._filelog.lookup(self._fileid)
644 else:
643 else:
645 return self._changectx.filenode(self._path)
644 return self._changectx.filenode(self._path)
646
645
647 @propertycache
646 @propertycache
648 def _filerev(self):
647 def _filerev(self):
649 return self._filelog.rev(self._filenode)
648 return self._filelog.rev(self._filenode)
650
649
651 @propertycache
650 @propertycache
652 def _repopath(self):
651 def _repopath(self):
653 return self._path
652 return self._path
654
653
655 def __nonzero__(self):
654 def __nonzero__(self):
656 try:
655 try:
657 self._filenode
656 self._filenode
658 return True
657 return True
659 except error.LookupError:
658 except error.LookupError:
660 # file is missing
659 # file is missing
661 return False
660 return False
662
661
663 __bool__ = __nonzero__
662 __bool__ = __nonzero__
664
663
665 def __bytes__(self):
664 def __bytes__(self):
666 try:
665 try:
667 return "%s@%s" % (self.path(), self._changectx)
666 return "%s@%s" % (self.path(), self._changectx)
668 except error.LookupError:
667 except error.LookupError:
669 return "%s@???" % self.path()
668 return "%s@???" % self.path()
670
669
671 __str__ = encoding.strmethod(__bytes__)
670 __str__ = encoding.strmethod(__bytes__)
672
671
673 def __repr__(self):
672 def __repr__(self):
674 return r"<%s %s>" % (type(self).__name__, str(self))
673 return r"<%s %s>" % (type(self).__name__, str(self))
675
674
676 def __hash__(self):
675 def __hash__(self):
677 try:
676 try:
678 return hash((self._path, self._filenode))
677 return hash((self._path, self._filenode))
679 except AttributeError:
678 except AttributeError:
680 return id(self)
679 return id(self)
681
680
682 def __eq__(self, other):
681 def __eq__(self, other):
683 try:
682 try:
684 return (type(self) == type(other) and self._path == other._path
683 return (type(self) == type(other) and self._path == other._path
685 and self._filenode == other._filenode)
684 and self._filenode == other._filenode)
686 except AttributeError:
685 except AttributeError:
687 return False
686 return False
688
687
689 def __ne__(self, other):
688 def __ne__(self, other):
690 return not (self == other)
689 return not (self == other)
691
690
692 def filerev(self):
691 def filerev(self):
693 return self._filerev
692 return self._filerev
694 def filenode(self):
693 def filenode(self):
695 return self._filenode
694 return self._filenode
696 @propertycache
695 @propertycache
697 def _flags(self):
696 def _flags(self):
698 return self._changectx.flags(self._path)
697 return self._changectx.flags(self._path)
699 def flags(self):
698 def flags(self):
700 return self._flags
699 return self._flags
701 def filelog(self):
700 def filelog(self):
702 return self._filelog
701 return self._filelog
703 def rev(self):
702 def rev(self):
704 return self._changeid
703 return self._changeid
705 def linkrev(self):
704 def linkrev(self):
706 return self._filelog.linkrev(self._filerev)
705 return self._filelog.linkrev(self._filerev)
707 def node(self):
706 def node(self):
708 return self._changectx.node()
707 return self._changectx.node()
709 def hex(self):
708 def hex(self):
710 return self._changectx.hex()
709 return self._changectx.hex()
711 def user(self):
710 def user(self):
712 return self._changectx.user()
711 return self._changectx.user()
713 def date(self):
712 def date(self):
714 return self._changectx.date()
713 return self._changectx.date()
715 def files(self):
714 def files(self):
716 return self._changectx.files()
715 return self._changectx.files()
717 def description(self):
716 def description(self):
718 return self._changectx.description()
717 return self._changectx.description()
719 def branch(self):
718 def branch(self):
720 return self._changectx.branch()
719 return self._changectx.branch()
721 def extra(self):
720 def extra(self):
722 return self._changectx.extra()
721 return self._changectx.extra()
723 def phase(self):
722 def phase(self):
724 return self._changectx.phase()
723 return self._changectx.phase()
725 def phasestr(self):
724 def phasestr(self):
726 return self._changectx.phasestr()
725 return self._changectx.phasestr()
727 def obsolete(self):
726 def obsolete(self):
728 return self._changectx.obsolete()
727 return self._changectx.obsolete()
729 def instabilities(self):
728 def instabilities(self):
730 return self._changectx.instabilities()
729 return self._changectx.instabilities()
731 def manifest(self):
730 def manifest(self):
732 return self._changectx.manifest()
731 return self._changectx.manifest()
733 def changectx(self):
732 def changectx(self):
734 return self._changectx
733 return self._changectx
735 def renamed(self):
734 def renamed(self):
736 return self._copied
735 return self._copied
737 def repo(self):
736 def repo(self):
738 return self._repo
737 return self._repo
739 def size(self):
738 def size(self):
740 return len(self.data())
739 return len(self.data())
741
740
742 def path(self):
741 def path(self):
743 return self._path
742 return self._path
744
743
745 def isbinary(self):
744 def isbinary(self):
746 try:
745 try:
747 return stringutil.binary(self.data())
746 return stringutil.binary(self.data())
748 except IOError:
747 except IOError:
749 return False
748 return False
750 def isexec(self):
749 def isexec(self):
751 return 'x' in self.flags()
750 return 'x' in self.flags()
752 def islink(self):
751 def islink(self):
753 return 'l' in self.flags()
752 return 'l' in self.flags()
754
753
755 def isabsent(self):
754 def isabsent(self):
756 """whether this filectx represents a file not in self._changectx
755 """whether this filectx represents a file not in self._changectx
757
756
758 This is mainly for merge code to detect change/delete conflicts. This is
757 This is mainly for merge code to detect change/delete conflicts. This is
759 expected to be True for all subclasses of basectx."""
758 expected to be True for all subclasses of basectx."""
760 return False
759 return False
761
760
762 _customcmp = False
761 _customcmp = False
763 def cmp(self, fctx):
762 def cmp(self, fctx):
764 """compare with other file context
763 """compare with other file context
765
764
766 returns True if different than fctx.
765 returns True if different than fctx.
767 """
766 """
768 if fctx._customcmp:
767 if fctx._customcmp:
769 return fctx.cmp(self)
768 return fctx.cmp(self)
770
769
771 if (fctx._filenode is None
770 if (fctx._filenode is None
772 and (self._repo._encodefilterpats
771 and (self._repo._encodefilterpats
773 # if file data starts with '\1\n', empty metadata block is
772 # if file data starts with '\1\n', empty metadata block is
774 # prepended, which adds 4 bytes to filelog.size().
773 # prepended, which adds 4 bytes to filelog.size().
775 or self.size() - 4 == fctx.size())
774 or self.size() - 4 == fctx.size())
776 or self.size() == fctx.size()):
775 or self.size() == fctx.size()):
777 return self._filelog.cmp(self._filenode, fctx.data())
776 return self._filelog.cmp(self._filenode, fctx.data())
778
777
779 return True
778 return True
780
779
781 def _adjustlinkrev(self, srcrev, inclusive=False):
780 def _adjustlinkrev(self, srcrev, inclusive=False):
782 """return the first ancestor of <srcrev> introducing <fnode>
781 """return the first ancestor of <srcrev> introducing <fnode>
783
782
784 If the linkrev of the file revision does not point to an ancestor of
783 If the linkrev of the file revision does not point to an ancestor of
785 srcrev, we'll walk down the ancestors until we find one introducing
784 srcrev, we'll walk down the ancestors until we find one introducing
786 this file revision.
785 this file revision.
787
786
788 :srcrev: the changeset revision we search ancestors from
787 :srcrev: the changeset revision we search ancestors from
789 :inclusive: if true, the src revision will also be checked
788 :inclusive: if true, the src revision will also be checked
790 """
789 """
791 repo = self._repo
790 repo = self._repo
792 cl = repo.unfiltered().changelog
791 cl = repo.unfiltered().changelog
793 mfl = repo.manifestlog
792 mfl = repo.manifestlog
794 # fetch the linkrev
793 # fetch the linkrev
795 lkr = self.linkrev()
794 lkr = self.linkrev()
796 # hack to reuse ancestor computation when searching for renames
795 # hack to reuse ancestor computation when searching for renames
797 memberanc = getattr(self, '_ancestrycontext', None)
796 memberanc = getattr(self, '_ancestrycontext', None)
798 iteranc = None
797 iteranc = None
799 if srcrev is None:
798 if srcrev is None:
800 # wctx case, used by workingfilectx during mergecopy
799 # wctx case, used by workingfilectx during mergecopy
801 revs = [p.rev() for p in self._repo[None].parents()]
800 revs = [p.rev() for p in self._repo[None].parents()]
802 inclusive = True # we skipped the real (revless) source
801 inclusive = True # we skipped the real (revless) source
803 else:
802 else:
804 revs = [srcrev]
803 revs = [srcrev]
805 if memberanc is None:
804 if memberanc is None:
806 memberanc = iteranc = cl.ancestors(revs, lkr,
805 memberanc = iteranc = cl.ancestors(revs, lkr,
807 inclusive=inclusive)
806 inclusive=inclusive)
808 # check if this linkrev is an ancestor of srcrev
807 # check if this linkrev is an ancestor of srcrev
809 if lkr not in memberanc:
808 if lkr not in memberanc:
810 if iteranc is None:
809 if iteranc is None:
811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
810 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 fnode = self._filenode
811 fnode = self._filenode
813 path = self._path
812 path = self._path
814 for a in iteranc:
813 for a in iteranc:
815 ac = cl.read(a) # get changeset data (we avoid object creation)
814 ac = cl.read(a) # get changeset data (we avoid object creation)
816 if path in ac[3]: # checking the 'files' field.
815 if path in ac[3]: # checking the 'files' field.
817 # The file has been touched, check if the content is
816 # The file has been touched, check if the content is
818 # similar to the one we search for.
817 # similar to the one we search for.
819 if fnode == mfl[ac[0]].readfast().get(path):
818 if fnode == mfl[ac[0]].readfast().get(path):
820 return a
819 return a
821 # In theory, we should never get out of that loop without a result.
820 # In theory, we should never get out of that loop without a result.
822 # But if manifest uses a buggy file revision (not children of the
821 # But if manifest uses a buggy file revision (not children of the
823 # one it replaces) we could. Such a buggy situation will likely
822 # one it replaces) we could. Such a buggy situation will likely
824 # result is crash somewhere else at to some point.
823 # result is crash somewhere else at to some point.
825 return lkr
824 return lkr
826
825
827 def introrev(self):
826 def introrev(self):
828 """return the rev of the changeset which introduced this file revision
827 """return the rev of the changeset which introduced this file revision
829
828
830 This method is different from linkrev because it take into account the
829 This method is different from linkrev because it take into account the
831 changeset the filectx was created from. It ensures the returned
830 changeset the filectx was created from. It ensures the returned
832 revision is one of its ancestors. This prevents bugs from
831 revision is one of its ancestors. This prevents bugs from
833 'linkrev-shadowing' when a file revision is used by multiple
832 'linkrev-shadowing' when a file revision is used by multiple
834 changesets.
833 changesets.
835 """
834 """
836 lkr = self.linkrev()
835 lkr = self.linkrev()
837 attrs = vars(self)
836 attrs = vars(self)
838 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
837 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
839 if noctx or self.rev() == lkr:
838 if noctx or self.rev() == lkr:
840 return self.linkrev()
839 return self.linkrev()
841 return self._adjustlinkrev(self.rev(), inclusive=True)
840 return self._adjustlinkrev(self.rev(), inclusive=True)
842
841
843 def introfilectx(self):
842 def introfilectx(self):
844 """Return filectx having identical contents, but pointing to the
843 """Return filectx having identical contents, but pointing to the
845 changeset revision where this filectx was introduced"""
844 changeset revision where this filectx was introduced"""
846 introrev = self.introrev()
845 introrev = self.introrev()
847 if self.rev() == introrev:
846 if self.rev() == introrev:
848 return self
847 return self
849 return self.filectx(self.filenode(), changeid=introrev)
848 return self.filectx(self.filenode(), changeid=introrev)
850
849
851 def _parentfilectx(self, path, fileid, filelog):
850 def _parentfilectx(self, path, fileid, filelog):
852 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
851 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
853 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
852 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
854 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
853 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
855 # If self is associated with a changeset (probably explicitly
854 # If self is associated with a changeset (probably explicitly
856 # fed), ensure the created filectx is associated with a
855 # fed), ensure the created filectx is associated with a
857 # changeset that is an ancestor of self.changectx.
856 # changeset that is an ancestor of self.changectx.
858 # This lets us later use _adjustlinkrev to get a correct link.
857 # This lets us later use _adjustlinkrev to get a correct link.
859 fctx._descendantrev = self.rev()
858 fctx._descendantrev = self.rev()
860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
859 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 elif r'_descendantrev' in vars(self):
860 elif r'_descendantrev' in vars(self):
862 # Otherwise propagate _descendantrev if we have one associated.
861 # Otherwise propagate _descendantrev if we have one associated.
863 fctx._descendantrev = self._descendantrev
862 fctx._descendantrev = self._descendantrev
864 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
863 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 return fctx
864 return fctx
866
865
867 def parents(self):
866 def parents(self):
868 _path = self._path
867 _path = self._path
869 fl = self._filelog
868 fl = self._filelog
870 parents = self._filelog.parents(self._filenode)
869 parents = self._filelog.parents(self._filenode)
871 pl = [(_path, node, fl) for node in parents if node != nullid]
870 pl = [(_path, node, fl) for node in parents if node != nullid]
872
871
873 r = fl.renamed(self._filenode)
872 r = fl.renamed(self._filenode)
874 if r:
873 if r:
875 # - In the simple rename case, both parent are nullid, pl is empty.
874 # - In the simple rename case, both parent are nullid, pl is empty.
876 # - In case of merge, only one of the parent is null id and should
875 # - In case of merge, only one of the parent is null id and should
877 # be replaced with the rename information. This parent is -always-
876 # be replaced with the rename information. This parent is -always-
878 # the first one.
877 # the first one.
879 #
878 #
880 # As null id have always been filtered out in the previous list
879 # As null id have always been filtered out in the previous list
881 # comprehension, inserting to 0 will always result in "replacing
880 # comprehension, inserting to 0 will always result in "replacing
882 # first nullid parent with rename information.
881 # first nullid parent with rename information.
883 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
882 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
884
883
885 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
884 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
886
885
887 def p1(self):
886 def p1(self):
888 return self.parents()[0]
887 return self.parents()[0]
889
888
890 def p2(self):
889 def p2(self):
891 p = self.parents()
890 p = self.parents()
892 if len(p) == 2:
891 if len(p) == 2:
893 return p[1]
892 return p[1]
894 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
893 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
895
894
896 def annotate(self, follow=False, skiprevs=None, diffopts=None):
895 def annotate(self, follow=False, skiprevs=None, diffopts=None):
897 """Returns a list of annotateline objects for each line in the file
896 """Returns a list of annotateline objects for each line in the file
898
897
899 - line.fctx is the filectx of the node where that line was last changed
898 - line.fctx is the filectx of the node where that line was last changed
900 - line.lineno is the line number at the first appearance in the managed
899 - line.lineno is the line number at the first appearance in the managed
901 file
900 file
902 - line.text is the data on that line (including newline character)
901 - line.text is the data on that line (including newline character)
903 """
902 """
904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
903 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905
904
906 def parents(f):
905 def parents(f):
907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
906 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
907 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
908 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # isn't an ancestor of the srcrev.
909 # isn't an ancestor of the srcrev.
911 f._changeid
910 f._changeid
912 pl = f.parents()
911 pl = f.parents()
913
912
914 # Don't return renamed parents if we aren't following.
913 # Don't return renamed parents if we aren't following.
915 if not follow:
914 if not follow:
916 pl = [p for p in pl if p.path() == f.path()]
915 pl = [p for p in pl if p.path() == f.path()]
917
916
918 # renamed filectx won't have a filelog yet, so set it
917 # renamed filectx won't have a filelog yet, so set it
919 # from the cache to save time
918 # from the cache to save time
920 for p in pl:
919 for p in pl:
921 if not r'_filelog' in p.__dict__:
920 if not r'_filelog' in p.__dict__:
922 p._filelog = getlog(p.path())
921 p._filelog = getlog(p.path())
923
922
924 return pl
923 return pl
925
924
926 # use linkrev to find the first changeset where self appeared
925 # use linkrev to find the first changeset where self appeared
927 base = self.introfilectx()
926 base = self.introfilectx()
928 if getattr(base, '_ancestrycontext', None) is None:
927 if getattr(base, '_ancestrycontext', None) is None:
929 cl = self._repo.changelog
928 cl = self._repo.changelog
930 if base.rev() is None:
929 if base.rev() is None:
931 # wctx is not inclusive, but works because _ancestrycontext
930 # wctx is not inclusive, but works because _ancestrycontext
932 # is used to test filelog revisions
931 # is used to test filelog revisions
933 ac = cl.ancestors([p.rev() for p in base.parents()],
932 ac = cl.ancestors([p.rev() for p in base.parents()],
934 inclusive=True)
933 inclusive=True)
935 else:
934 else:
936 ac = cl.ancestors([base.rev()], inclusive=True)
935 ac = cl.ancestors([base.rev()], inclusive=True)
937 base._ancestrycontext = ac
936 base._ancestrycontext = ac
938
937
939 return dagop.annotate(base, parents, skiprevs=skiprevs,
938 return dagop.annotate(base, parents, skiprevs=skiprevs,
940 diffopts=diffopts)
939 diffopts=diffopts)
941
940
942 def ancestors(self, followfirst=False):
941 def ancestors(self, followfirst=False):
943 visit = {}
942 visit = {}
944 c = self
943 c = self
945 if followfirst:
944 if followfirst:
946 cut = 1
945 cut = 1
947 else:
946 else:
948 cut = None
947 cut = None
949
948
950 while True:
949 while True:
951 for parent in c.parents()[:cut]:
950 for parent in c.parents()[:cut]:
952 visit[(parent.linkrev(), parent.filenode())] = parent
951 visit[(parent.linkrev(), parent.filenode())] = parent
953 if not visit:
952 if not visit:
954 break
953 break
955 c = visit.pop(max(visit))
954 c = visit.pop(max(visit))
956 yield c
955 yield c
957
956
958 def decodeddata(self):
957 def decodeddata(self):
959 """Returns `data()` after running repository decoding filters.
958 """Returns `data()` after running repository decoding filters.
960
959
961 This is often equivalent to how the data would be expressed on disk.
960 This is often equivalent to how the data would be expressed on disk.
962 """
961 """
963 return self._repo.wwritedata(self.path(), self.data())
962 return self._repo.wwritedata(self.path(), self.data())
964
963
965 class filectx(basefilectx):
964 class filectx(basefilectx):
966 """A filecontext object makes access to data related to a particular
965 """A filecontext object makes access to data related to a particular
967 filerevision convenient."""
966 filerevision convenient."""
968 def __init__(self, repo, path, changeid=None, fileid=None,
967 def __init__(self, repo, path, changeid=None, fileid=None,
969 filelog=None, changectx=None):
968 filelog=None, changectx=None):
970 """changeid can be a changeset revision, node, or tag.
969 """changeid can be a changeset revision, node, or tag.
971 fileid can be a file revision or node."""
970 fileid can be a file revision or node."""
972 self._repo = repo
971 self._repo = repo
973 self._path = path
972 self._path = path
974
973
975 assert (changeid is not None
974 assert (changeid is not None
976 or fileid is not None
975 or fileid is not None
977 or changectx is not None), \
976 or changectx is not None), \
978 ("bad args: changeid=%r, fileid=%r, changectx=%r"
977 ("bad args: changeid=%r, fileid=%r, changectx=%r"
979 % (changeid, fileid, changectx))
978 % (changeid, fileid, changectx))
980
979
981 if filelog is not None:
980 if filelog is not None:
982 self._filelog = filelog
981 self._filelog = filelog
983
982
984 if changeid is not None:
983 if changeid is not None:
985 self._changeid = changeid
984 self._changeid = changeid
986 if changectx is not None:
985 if changectx is not None:
987 self._changectx = changectx
986 self._changectx = changectx
988 if fileid is not None:
987 if fileid is not None:
989 self._fileid = fileid
988 self._fileid = fileid
990
989
991 @propertycache
990 @propertycache
992 def _changectx(self):
991 def _changectx(self):
993 try:
992 try:
994 return changectx(self._repo, self._changeid)
993 return changectx(self._repo, self._changeid)
995 except error.FilteredRepoLookupError:
994 except error.FilteredRepoLookupError:
996 # Linkrev may point to any revision in the repository. When the
995 # Linkrev may point to any revision in the repository. When the
997 # repository is filtered this may lead to `filectx` trying to build
996 # repository is filtered this may lead to `filectx` trying to build
998 # `changectx` for filtered revision. In such case we fallback to
997 # `changectx` for filtered revision. In such case we fallback to
999 # creating `changectx` on the unfiltered version of the reposition.
998 # creating `changectx` on the unfiltered version of the reposition.
1000 # This fallback should not be an issue because `changectx` from
999 # This fallback should not be an issue because `changectx` from
1001 # `filectx` are not used in complex operations that care about
1000 # `filectx` are not used in complex operations that care about
1002 # filtering.
1001 # filtering.
1003 #
1002 #
1004 # This fallback is a cheap and dirty fix that prevent several
1003 # This fallback is a cheap and dirty fix that prevent several
1005 # crashes. It does not ensure the behavior is correct. However the
1004 # crashes. It does not ensure the behavior is correct. However the
1006 # behavior was not correct before filtering either and "incorrect
1005 # behavior was not correct before filtering either and "incorrect
1007 # behavior" is seen as better as "crash"
1006 # behavior" is seen as better as "crash"
1008 #
1007 #
1009 # Linkrevs have several serious troubles with filtering that are
1008 # Linkrevs have several serious troubles with filtering that are
1010 # complicated to solve. Proper handling of the issue here should be
1009 # complicated to solve. Proper handling of the issue here should be
1011 # considered when solving linkrev issue are on the table.
1010 # considered when solving linkrev issue are on the table.
1012 return changectx(self._repo.unfiltered(), self._changeid)
1011 return changectx(self._repo.unfiltered(), self._changeid)
1013
1012
1014 def filectx(self, fileid, changeid=None):
1013 def filectx(self, fileid, changeid=None):
1015 '''opens an arbitrary revision of the file without
1014 '''opens an arbitrary revision of the file without
1016 opening a new filelog'''
1015 opening a new filelog'''
1017 return filectx(self._repo, self._path, fileid=fileid,
1016 return filectx(self._repo, self._path, fileid=fileid,
1018 filelog=self._filelog, changeid=changeid)
1017 filelog=self._filelog, changeid=changeid)
1019
1018
1020 def rawdata(self):
1019 def rawdata(self):
1021 return self._filelog.revision(self._filenode, raw=True)
1020 return self._filelog.revision(self._filenode, raw=True)
1022
1021
1023 def rawflags(self):
1022 def rawflags(self):
1024 """low-level revlog flags"""
1023 """low-level revlog flags"""
1025 return self._filelog.flags(self._filerev)
1024 return self._filelog.flags(self._filerev)
1026
1025
1027 def data(self):
1026 def data(self):
1028 try:
1027 try:
1029 return self._filelog.read(self._filenode)
1028 return self._filelog.read(self._filenode)
1030 except error.CensoredNodeError:
1029 except error.CensoredNodeError:
1031 if self._repo.ui.config("censor", "policy") == "ignore":
1030 if self._repo.ui.config("censor", "policy") == "ignore":
1032 return ""
1031 return ""
1033 raise error.Abort(_("censored node: %s") % short(self._filenode),
1032 raise error.Abort(_("censored node: %s") % short(self._filenode),
1034 hint=_("set censor.policy to ignore errors"))
1033 hint=_("set censor.policy to ignore errors"))
1035
1034
1036 def size(self):
1035 def size(self):
1037 return self._filelog.size(self._filerev)
1036 return self._filelog.size(self._filerev)
1038
1037
1039 @propertycache
1038 @propertycache
1040 def _copied(self):
1039 def _copied(self):
1041 """check if file was actually renamed in this changeset revision
1040 """check if file was actually renamed in this changeset revision
1042
1041
1043 If rename logged in file revision, we report copy for changeset only
1042 If rename logged in file revision, we report copy for changeset only
1044 if file revisions linkrev points back to the changeset in question
1043 if file revisions linkrev points back to the changeset in question
1045 or both changeset parents contain different file revisions.
1044 or both changeset parents contain different file revisions.
1046 """
1045 """
1047
1046
1048 renamed = self._filelog.renamed(self._filenode)
1047 renamed = self._filelog.renamed(self._filenode)
1049 if not renamed:
1048 if not renamed:
1050 return renamed
1049 return renamed
1051
1050
1052 if self.rev() == self.linkrev():
1051 if self.rev() == self.linkrev():
1053 return renamed
1052 return renamed
1054
1053
1055 name = self.path()
1054 name = self.path()
1056 fnode = self._filenode
1055 fnode = self._filenode
1057 for p in self._changectx.parents():
1056 for p in self._changectx.parents():
1058 try:
1057 try:
1059 if fnode == p.filenode(name):
1058 if fnode == p.filenode(name):
1060 return None
1059 return None
1061 except error.LookupError:
1060 except error.LookupError:
1062 pass
1061 pass
1063 return renamed
1062 return renamed
1064
1063
1065 def children(self):
1064 def children(self):
1066 # hard for renames
1065 # hard for renames
1067 c = self._filelog.children(self._filenode)
1066 c = self._filelog.children(self._filenode)
1068 return [filectx(self._repo, self._path, fileid=x,
1067 return [filectx(self._repo, self._path, fileid=x,
1069 filelog=self._filelog) for x in c]
1068 filelog=self._filelog) for x in c]
1070
1069
1071 class committablectx(basectx):
1070 class committablectx(basectx):
1072 """A committablectx object provides common functionality for a context that
1071 """A committablectx object provides common functionality for a context that
1073 wants the ability to commit, e.g. workingctx or memctx."""
1072 wants the ability to commit, e.g. workingctx or memctx."""
1074 def __init__(self, repo, text="", user=None, date=None, extra=None,
1073 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 changes=None):
1074 changes=None):
1076 super(committablectx, self).__init__(repo)
1075 super(committablectx, self).__init__(repo)
1077 self._rev = None
1076 self._rev = None
1078 self._node = None
1077 self._node = None
1079 self._text = text
1078 self._text = text
1080 if date:
1079 if date:
1081 self._date = dateutil.parsedate(date)
1080 self._date = dateutil.parsedate(date)
1082 if user:
1081 if user:
1083 self._user = user
1082 self._user = user
1084 if changes:
1083 if changes:
1085 self._status = changes
1084 self._status = changes
1086
1085
1087 self._extra = {}
1086 self._extra = {}
1088 if extra:
1087 if extra:
1089 self._extra = extra.copy()
1088 self._extra = extra.copy()
1090 if 'branch' not in self._extra:
1089 if 'branch' not in self._extra:
1091 try:
1090 try:
1092 branch = encoding.fromlocal(self._repo.dirstate.branch())
1091 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 except UnicodeDecodeError:
1092 except UnicodeDecodeError:
1094 raise error.Abort(_('branch name not in UTF-8!'))
1093 raise error.Abort(_('branch name not in UTF-8!'))
1095 self._extra['branch'] = branch
1094 self._extra['branch'] = branch
1096 if self._extra['branch'] == '':
1095 if self._extra['branch'] == '':
1097 self._extra['branch'] = 'default'
1096 self._extra['branch'] = 'default'
1098
1097
1099 def __bytes__(self):
1098 def __bytes__(self):
1100 return bytes(self._parents[0]) + "+"
1099 return bytes(self._parents[0]) + "+"
1101
1100
1102 __str__ = encoding.strmethod(__bytes__)
1101 __str__ = encoding.strmethod(__bytes__)
1103
1102
1104 def __nonzero__(self):
1103 def __nonzero__(self):
1105 return True
1104 return True
1106
1105
1107 __bool__ = __nonzero__
1106 __bool__ = __nonzero__
1108
1107
1109 def _buildflagfunc(self):
1108 def _buildflagfunc(self):
1110 # Create a fallback function for getting file flags when the
1109 # Create a fallback function for getting file flags when the
1111 # filesystem doesn't support them
1110 # filesystem doesn't support them
1112
1111
1113 copiesget = self._repo.dirstate.copies().get
1112 copiesget = self._repo.dirstate.copies().get
1114 parents = self.parents()
1113 parents = self.parents()
1115 if len(parents) < 2:
1114 if len(parents) < 2:
1116 # when we have one parent, it's easy: copy from parent
1115 # when we have one parent, it's easy: copy from parent
1117 man = parents[0].manifest()
1116 man = parents[0].manifest()
1118 def func(f):
1117 def func(f):
1119 f = copiesget(f, f)
1118 f = copiesget(f, f)
1120 return man.flags(f)
1119 return man.flags(f)
1121 else:
1120 else:
1122 # merges are tricky: we try to reconstruct the unstored
1121 # merges are tricky: we try to reconstruct the unstored
1123 # result from the merge (issue1802)
1122 # result from the merge (issue1802)
1124 p1, p2 = parents
1123 p1, p2 = parents
1125 pa = p1.ancestor(p2)
1124 pa = p1.ancestor(p2)
1126 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1125 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1127
1126
1128 def func(f):
1127 def func(f):
1129 f = copiesget(f, f) # may be wrong for merges with copies
1128 f = copiesget(f, f) # may be wrong for merges with copies
1130 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1129 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1131 if fl1 == fl2:
1130 if fl1 == fl2:
1132 return fl1
1131 return fl1
1133 if fl1 == fla:
1132 if fl1 == fla:
1134 return fl2
1133 return fl2
1135 if fl2 == fla:
1134 if fl2 == fla:
1136 return fl1
1135 return fl1
1137 return '' # punt for conflicts
1136 return '' # punt for conflicts
1138
1137
1139 return func
1138 return func
1140
1139
1141 @propertycache
1140 @propertycache
1142 def _flagfunc(self):
1141 def _flagfunc(self):
1143 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1142 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1144
1143
1145 @propertycache
1144 @propertycache
1146 def _status(self):
1145 def _status(self):
1147 return self._repo.status()
1146 return self._repo.status()
1148
1147
1149 @propertycache
1148 @propertycache
1150 def _user(self):
1149 def _user(self):
1151 return self._repo.ui.username()
1150 return self._repo.ui.username()
1152
1151
1153 @propertycache
1152 @propertycache
1154 def _date(self):
1153 def _date(self):
1155 ui = self._repo.ui
1154 ui = self._repo.ui
1156 date = ui.configdate('devel', 'default-date')
1155 date = ui.configdate('devel', 'default-date')
1157 if date is None:
1156 if date is None:
1158 date = dateutil.makedate()
1157 date = dateutil.makedate()
1159 return date
1158 return date
1160
1159
1161 def subrev(self, subpath):
1160 def subrev(self, subpath):
1162 return None
1161 return None
1163
1162
1164 def manifestnode(self):
1163 def manifestnode(self):
1165 return None
1164 return None
1166 def user(self):
1165 def user(self):
1167 return self._user or self._repo.ui.username()
1166 return self._user or self._repo.ui.username()
1168 def date(self):
1167 def date(self):
1169 return self._date
1168 return self._date
1170 def description(self):
1169 def description(self):
1171 return self._text
1170 return self._text
1172 def files(self):
1171 def files(self):
1173 return sorted(self._status.modified + self._status.added +
1172 return sorted(self._status.modified + self._status.added +
1174 self._status.removed)
1173 self._status.removed)
1175
1174
1176 def modified(self):
1175 def modified(self):
1177 return self._status.modified
1176 return self._status.modified
1178 def added(self):
1177 def added(self):
1179 return self._status.added
1178 return self._status.added
1180 def removed(self):
1179 def removed(self):
1181 return self._status.removed
1180 return self._status.removed
1182 def deleted(self):
1181 def deleted(self):
1183 return self._status.deleted
1182 return self._status.deleted
1184 def branch(self):
1183 def branch(self):
1185 return encoding.tolocal(self._extra['branch'])
1184 return encoding.tolocal(self._extra['branch'])
1186 def closesbranch(self):
1185 def closesbranch(self):
1187 return 'close' in self._extra
1186 return 'close' in self._extra
1188 def extra(self):
1187 def extra(self):
1189 return self._extra
1188 return self._extra
1190
1189
1191 def isinmemory(self):
1190 def isinmemory(self):
1192 return False
1191 return False
1193
1192
1194 def tags(self):
1193 def tags(self):
1195 return []
1194 return []
1196
1195
1197 def bookmarks(self):
1196 def bookmarks(self):
1198 b = []
1197 b = []
1199 for p in self.parents():
1198 for p in self.parents():
1200 b.extend(p.bookmarks())
1199 b.extend(p.bookmarks())
1201 return b
1200 return b
1202
1201
1203 def phase(self):
1202 def phase(self):
1204 phase = phases.draft # default phase to draft
1203 phase = phases.draft # default phase to draft
1205 for p in self.parents():
1204 for p in self.parents():
1206 phase = max(phase, p.phase())
1205 phase = max(phase, p.phase())
1207 return phase
1206 return phase
1208
1207
1209 def hidden(self):
1208 def hidden(self):
1210 return False
1209 return False
1211
1210
1212 def children(self):
1211 def children(self):
1213 return []
1212 return []
1214
1213
1215 def flags(self, path):
1214 def flags(self, path):
1216 if r'_manifest' in self.__dict__:
1215 if r'_manifest' in self.__dict__:
1217 try:
1216 try:
1218 return self._manifest.flags(path)
1217 return self._manifest.flags(path)
1219 except KeyError:
1218 except KeyError:
1220 return ''
1219 return ''
1221
1220
1222 try:
1221 try:
1223 return self._flagfunc(path)
1222 return self._flagfunc(path)
1224 except OSError:
1223 except OSError:
1225 return ''
1224 return ''
1226
1225
1227 def ancestor(self, c2):
1226 def ancestor(self, c2):
1228 """return the "best" ancestor context of self and c2"""
1227 """return the "best" ancestor context of self and c2"""
1229 return self._parents[0].ancestor(c2) # punt on two parents for now
1228 return self._parents[0].ancestor(c2) # punt on two parents for now
1230
1229
1231 def walk(self, match):
1230 def walk(self, match):
1232 '''Generates matching file names.'''
1231 '''Generates matching file names.'''
1233 return sorted(self._repo.dirstate.walk(match,
1232 return sorted(self._repo.dirstate.walk(match,
1234 subrepos=sorted(self.substate),
1233 subrepos=sorted(self.substate),
1235 unknown=True, ignored=False))
1234 unknown=True, ignored=False))
1236
1235
1237 def matches(self, match):
1236 def matches(self, match):
1238 return sorted(self._repo.dirstate.matches(match))
1237 return sorted(self._repo.dirstate.matches(match))
1239
1238
1240 def ancestors(self):
1239 def ancestors(self):
1241 for p in self._parents:
1240 for p in self._parents:
1242 yield p
1241 yield p
1243 for a in self._repo.changelog.ancestors(
1242 for a in self._repo.changelog.ancestors(
1244 [p.rev() for p in self._parents]):
1243 [p.rev() for p in self._parents]):
1245 yield changectx(self._repo, a)
1244 yield changectx(self._repo, a)
1246
1245
1247 def markcommitted(self, node):
1246 def markcommitted(self, node):
1248 """Perform post-commit cleanup necessary after committing this ctx
1247 """Perform post-commit cleanup necessary after committing this ctx
1249
1248
1250 Specifically, this updates backing stores this working context
1249 Specifically, this updates backing stores this working context
1251 wraps to reflect the fact that the changes reflected by this
1250 wraps to reflect the fact that the changes reflected by this
1252 workingctx have been committed. For example, it marks
1251 workingctx have been committed. For example, it marks
1253 modified and added files as normal in the dirstate.
1252 modified and added files as normal in the dirstate.
1254
1253
1255 """
1254 """
1256
1255
1257 with self._repo.dirstate.parentchange():
1256 with self._repo.dirstate.parentchange():
1258 for f in self.modified() + self.added():
1257 for f in self.modified() + self.added():
1259 self._repo.dirstate.normal(f)
1258 self._repo.dirstate.normal(f)
1260 for f in self.removed():
1259 for f in self.removed():
1261 self._repo.dirstate.drop(f)
1260 self._repo.dirstate.drop(f)
1262 self._repo.dirstate.setparents(node)
1261 self._repo.dirstate.setparents(node)
1263
1262
1264 # write changes out explicitly, because nesting wlock at
1263 # write changes out explicitly, because nesting wlock at
1265 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1264 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1266 # from immediately doing so for subsequent changing files
1265 # from immediately doing so for subsequent changing files
1267 self._repo.dirstate.write(self._repo.currenttransaction())
1266 self._repo.dirstate.write(self._repo.currenttransaction())
1268
1267
1269 def dirty(self, missing=False, merge=True, branch=True):
1268 def dirty(self, missing=False, merge=True, branch=True):
1270 return False
1269 return False
1271
1270
1272 class workingctx(committablectx):
1271 class workingctx(committablectx):
1273 """A workingctx object makes access to data related to
1272 """A workingctx object makes access to data related to
1274 the current working directory convenient.
1273 the current working directory convenient.
1275 date - any valid date string or (unixtime, offset), or None.
1274 date - any valid date string or (unixtime, offset), or None.
1276 user - username string, or None.
1275 user - username string, or None.
1277 extra - a dictionary of extra values, or None.
1276 extra - a dictionary of extra values, or None.
1278 changes - a list of file lists as returned by localrepo.status()
1277 changes - a list of file lists as returned by localrepo.status()
1279 or None to use the repository status.
1278 or None to use the repository status.
1280 """
1279 """
1281 def __init__(self, repo, text="", user=None, date=None, extra=None,
1280 def __init__(self, repo, text="", user=None, date=None, extra=None,
1282 changes=None):
1281 changes=None):
1283 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1282 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1284
1283
1285 def __iter__(self):
1284 def __iter__(self):
1286 d = self._repo.dirstate
1285 d = self._repo.dirstate
1287 for f in d:
1286 for f in d:
1288 if d[f] != 'r':
1287 if d[f] != 'r':
1289 yield f
1288 yield f
1290
1289
1291 def __contains__(self, key):
1290 def __contains__(self, key):
1292 return self._repo.dirstate[key] not in "?r"
1291 return self._repo.dirstate[key] not in "?r"
1293
1292
1294 def hex(self):
1293 def hex(self):
1295 return hex(wdirid)
1294 return hex(wdirid)
1296
1295
1297 @propertycache
1296 @propertycache
1298 def _parents(self):
1297 def _parents(self):
1299 p = self._repo.dirstate.parents()
1298 p = self._repo.dirstate.parents()
1300 if p[1] == nullid:
1299 if p[1] == nullid:
1301 p = p[:-1]
1300 p = p[:-1]
1302 return [changectx(self._repo, x) for x in p]
1301 return [changectx(self._repo, x) for x in p]
1303
1302
1304 def _fileinfo(self, path):
1303 def _fileinfo(self, path):
1305 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1304 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1306 self._manifest
1305 self._manifest
1307 return super(workingctx, self)._fileinfo(path)
1306 return super(workingctx, self)._fileinfo(path)
1308
1307
1309 def filectx(self, path, filelog=None):
1308 def filectx(self, path, filelog=None):
1310 """get a file context from the working directory"""
1309 """get a file context from the working directory"""
1311 return workingfilectx(self._repo, path, workingctx=self,
1310 return workingfilectx(self._repo, path, workingctx=self,
1312 filelog=filelog)
1311 filelog=filelog)
1313
1312
1314 def dirty(self, missing=False, merge=True, branch=True):
1313 def dirty(self, missing=False, merge=True, branch=True):
1315 "check whether a working directory is modified"
1314 "check whether a working directory is modified"
1316 # check subrepos first
1315 # check subrepos first
1317 for s in sorted(self.substate):
1316 for s in sorted(self.substate):
1318 if self.sub(s).dirty(missing=missing):
1317 if self.sub(s).dirty(missing=missing):
1319 return True
1318 return True
1320 # check current working dir
1319 # check current working dir
1321 return ((merge and self.p2()) or
1320 return ((merge and self.p2()) or
1322 (branch and self.branch() != self.p1().branch()) or
1321 (branch and self.branch() != self.p1().branch()) or
1323 self.modified() or self.added() or self.removed() or
1322 self.modified() or self.added() or self.removed() or
1324 (missing and self.deleted()))
1323 (missing and self.deleted()))
1325
1324
1326 def add(self, list, prefix=""):
1325 def add(self, list, prefix=""):
1327 with self._repo.wlock():
1326 with self._repo.wlock():
1328 ui, ds = self._repo.ui, self._repo.dirstate
1327 ui, ds = self._repo.ui, self._repo.dirstate
1329 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1328 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1330 rejected = []
1329 rejected = []
1331 lstat = self._repo.wvfs.lstat
1330 lstat = self._repo.wvfs.lstat
1332 for f in list:
1331 for f in list:
1333 # ds.pathto() returns an absolute file when this is invoked from
1332 # ds.pathto() returns an absolute file when this is invoked from
1334 # the keyword extension. That gets flagged as non-portable on
1333 # the keyword extension. That gets flagged as non-portable on
1335 # Windows, since it contains the drive letter and colon.
1334 # Windows, since it contains the drive letter and colon.
1336 scmutil.checkportable(ui, os.path.join(prefix, f))
1335 scmutil.checkportable(ui, os.path.join(prefix, f))
1337 try:
1336 try:
1338 st = lstat(f)
1337 st = lstat(f)
1339 except OSError:
1338 except OSError:
1340 ui.warn(_("%s does not exist!\n") % uipath(f))
1339 ui.warn(_("%s does not exist!\n") % uipath(f))
1341 rejected.append(f)
1340 rejected.append(f)
1342 continue
1341 continue
1343 if st.st_size > 10000000:
1342 if st.st_size > 10000000:
1344 ui.warn(_("%s: up to %d MB of RAM may be required "
1343 ui.warn(_("%s: up to %d MB of RAM may be required "
1345 "to manage this file\n"
1344 "to manage this file\n"
1346 "(use 'hg revert %s' to cancel the "
1345 "(use 'hg revert %s' to cancel the "
1347 "pending addition)\n")
1346 "pending addition)\n")
1348 % (f, 3 * st.st_size // 1000000, uipath(f)))
1347 % (f, 3 * st.st_size // 1000000, uipath(f)))
1349 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1348 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1350 ui.warn(_("%s not added: only files and symlinks "
1349 ui.warn(_("%s not added: only files and symlinks "
1351 "supported currently\n") % uipath(f))
1350 "supported currently\n") % uipath(f))
1352 rejected.append(f)
1351 rejected.append(f)
1353 elif ds[f] in 'amn':
1352 elif ds[f] in 'amn':
1354 ui.warn(_("%s already tracked!\n") % uipath(f))
1353 ui.warn(_("%s already tracked!\n") % uipath(f))
1355 elif ds[f] == 'r':
1354 elif ds[f] == 'r':
1356 ds.normallookup(f)
1355 ds.normallookup(f)
1357 else:
1356 else:
1358 ds.add(f)
1357 ds.add(f)
1359 return rejected
1358 return rejected
1360
1359
1361 def forget(self, files, prefix=""):
1360 def forget(self, files, prefix=""):
1362 with self._repo.wlock():
1361 with self._repo.wlock():
1363 ds = self._repo.dirstate
1362 ds = self._repo.dirstate
1364 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1365 rejected = []
1364 rejected = []
1366 for f in files:
1365 for f in files:
1367 if f not in self._repo.dirstate:
1366 if f not in self._repo.dirstate:
1368 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1367 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1369 rejected.append(f)
1368 rejected.append(f)
1370 elif self._repo.dirstate[f] != 'a':
1369 elif self._repo.dirstate[f] != 'a':
1371 self._repo.dirstate.remove(f)
1370 self._repo.dirstate.remove(f)
1372 else:
1371 else:
1373 self._repo.dirstate.drop(f)
1372 self._repo.dirstate.drop(f)
1374 return rejected
1373 return rejected
1375
1374
1376 def undelete(self, list):
1375 def undelete(self, list):
1377 pctxs = self.parents()
1376 pctxs = self.parents()
1378 with self._repo.wlock():
1377 with self._repo.wlock():
1379 ds = self._repo.dirstate
1378 ds = self._repo.dirstate
1380 for f in list:
1379 for f in list:
1381 if self._repo.dirstate[f] != 'r':
1380 if self._repo.dirstate[f] != 'r':
1382 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1381 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1383 else:
1382 else:
1384 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1383 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1385 t = fctx.data()
1384 t = fctx.data()
1386 self._repo.wwrite(f, t, fctx.flags())
1385 self._repo.wwrite(f, t, fctx.flags())
1387 self._repo.dirstate.normal(f)
1386 self._repo.dirstate.normal(f)
1388
1387
1389 def copy(self, source, dest):
1388 def copy(self, source, dest):
1390 try:
1389 try:
1391 st = self._repo.wvfs.lstat(dest)
1390 st = self._repo.wvfs.lstat(dest)
1392 except OSError as err:
1391 except OSError as err:
1393 if err.errno != errno.ENOENT:
1392 if err.errno != errno.ENOENT:
1394 raise
1393 raise
1395 self._repo.ui.warn(_("%s does not exist!\n")
1394 self._repo.ui.warn(_("%s does not exist!\n")
1396 % self._repo.dirstate.pathto(dest))
1395 % self._repo.dirstate.pathto(dest))
1397 return
1396 return
1398 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1399 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1400 "symbolic link\n")
1399 "symbolic link\n")
1401 % self._repo.dirstate.pathto(dest))
1400 % self._repo.dirstate.pathto(dest))
1402 else:
1401 else:
1403 with self._repo.wlock():
1402 with self._repo.wlock():
1404 if self._repo.dirstate[dest] in '?':
1403 if self._repo.dirstate[dest] in '?':
1405 self._repo.dirstate.add(dest)
1404 self._repo.dirstate.add(dest)
1406 elif self._repo.dirstate[dest] in 'r':
1405 elif self._repo.dirstate[dest] in 'r':
1407 self._repo.dirstate.normallookup(dest)
1406 self._repo.dirstate.normallookup(dest)
1408 self._repo.dirstate.copy(source, dest)
1407 self._repo.dirstate.copy(source, dest)
1409
1408
1410 def match(self, pats=None, include=None, exclude=None, default='glob',
1409 def match(self, pats=None, include=None, exclude=None, default='glob',
1411 listsubrepos=False, badfn=None):
1410 listsubrepos=False, badfn=None):
1412 r = self._repo
1411 r = self._repo
1413
1412
1414 # Only a case insensitive filesystem needs magic to translate user input
1413 # Only a case insensitive filesystem needs magic to translate user input
1415 # to actual case in the filesystem.
1414 # to actual case in the filesystem.
1416 icasefs = not util.fscasesensitive(r.root)
1415 icasefs = not util.fscasesensitive(r.root)
1417 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1416 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1418 default, auditor=r.auditor, ctx=self,
1417 default, auditor=r.auditor, ctx=self,
1419 listsubrepos=listsubrepos, badfn=badfn,
1418 listsubrepos=listsubrepos, badfn=badfn,
1420 icasefs=icasefs)
1419 icasefs=icasefs)
1421
1420
1422 def _filtersuspectsymlink(self, files):
1421 def _filtersuspectsymlink(self, files):
1423 if not files or self._repo.dirstate._checklink:
1422 if not files or self._repo.dirstate._checklink:
1424 return files
1423 return files
1425
1424
1426 # Symlink placeholders may get non-symlink-like contents
1425 # Symlink placeholders may get non-symlink-like contents
1427 # via user error or dereferencing by NFS or Samba servers,
1426 # via user error or dereferencing by NFS or Samba servers,
1428 # so we filter out any placeholders that don't look like a
1427 # so we filter out any placeholders that don't look like a
1429 # symlink
1428 # symlink
1430 sane = []
1429 sane = []
1431 for f in files:
1430 for f in files:
1432 if self.flags(f) == 'l':
1431 if self.flags(f) == 'l':
1433 d = self[f].data()
1432 d = self[f].data()
1434 if (d == '' or len(d) >= 1024 or '\n' in d
1433 if (d == '' or len(d) >= 1024 or '\n' in d
1435 or stringutil.binary(d)):
1434 or stringutil.binary(d)):
1436 self._repo.ui.debug('ignoring suspect symlink placeholder'
1435 self._repo.ui.debug('ignoring suspect symlink placeholder'
1437 ' "%s"\n' % f)
1436 ' "%s"\n' % f)
1438 continue
1437 continue
1439 sane.append(f)
1438 sane.append(f)
1440 return sane
1439 return sane
1441
1440
1442 def _checklookup(self, files):
1441 def _checklookup(self, files):
1443 # check for any possibly clean files
1442 # check for any possibly clean files
1444 if not files:
1443 if not files:
1445 return [], [], []
1444 return [], [], []
1446
1445
1447 modified = []
1446 modified = []
1448 deleted = []
1447 deleted = []
1449 fixup = []
1448 fixup = []
1450 pctx = self._parents[0]
1449 pctx = self._parents[0]
1451 # do a full compare of any files that might have changed
1450 # do a full compare of any files that might have changed
1452 for f in sorted(files):
1451 for f in sorted(files):
1453 try:
1452 try:
1454 # This will return True for a file that got replaced by a
1453 # This will return True for a file that got replaced by a
1455 # directory in the interim, but fixing that is pretty hard.
1454 # directory in the interim, but fixing that is pretty hard.
1456 if (f not in pctx or self.flags(f) != pctx.flags(f)
1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1457 or pctx[f].cmp(self[f])):
1456 or pctx[f].cmp(self[f])):
1458 modified.append(f)
1457 modified.append(f)
1459 else:
1458 else:
1460 fixup.append(f)
1459 fixup.append(f)
1461 except (IOError, OSError):
1460 except (IOError, OSError):
1462 # A file become inaccessible in between? Mark it as deleted,
1461 # A file become inaccessible in between? Mark it as deleted,
1463 # matching dirstate behavior (issue5584).
1462 # matching dirstate behavior (issue5584).
1464 # The dirstate has more complex behavior around whether a
1463 # The dirstate has more complex behavior around whether a
1465 # missing file matches a directory, etc, but we don't need to
1464 # missing file matches a directory, etc, but we don't need to
1466 # bother with that: if f has made it to this point, we're sure
1465 # bother with that: if f has made it to this point, we're sure
1467 # it's in the dirstate.
1466 # it's in the dirstate.
1468 deleted.append(f)
1467 deleted.append(f)
1469
1468
1470 return modified, deleted, fixup
1469 return modified, deleted, fixup
1471
1470
1472 def _poststatusfixup(self, status, fixup):
1471 def _poststatusfixup(self, status, fixup):
1473 """update dirstate for files that are actually clean"""
1472 """update dirstate for files that are actually clean"""
1474 poststatus = self._repo.postdsstatus()
1473 poststatus = self._repo.postdsstatus()
1475 if fixup or poststatus:
1474 if fixup or poststatus:
1476 try:
1475 try:
1477 oldid = self._repo.dirstate.identity()
1476 oldid = self._repo.dirstate.identity()
1478
1477
1479 # updating the dirstate is optional
1478 # updating the dirstate is optional
1480 # so we don't wait on the lock
1479 # so we don't wait on the lock
1481 # wlock can invalidate the dirstate, so cache normal _after_
1480 # wlock can invalidate the dirstate, so cache normal _after_
1482 # taking the lock
1481 # taking the lock
1483 with self._repo.wlock(False):
1482 with self._repo.wlock(False):
1484 if self._repo.dirstate.identity() == oldid:
1483 if self._repo.dirstate.identity() == oldid:
1485 if fixup:
1484 if fixup:
1486 normal = self._repo.dirstate.normal
1485 normal = self._repo.dirstate.normal
1487 for f in fixup:
1486 for f in fixup:
1488 normal(f)
1487 normal(f)
1489 # write changes out explicitly, because nesting
1488 # write changes out explicitly, because nesting
1490 # wlock at runtime may prevent 'wlock.release()'
1489 # wlock at runtime may prevent 'wlock.release()'
1491 # after this block from doing so for subsequent
1490 # after this block from doing so for subsequent
1492 # changing files
1491 # changing files
1493 tr = self._repo.currenttransaction()
1492 tr = self._repo.currenttransaction()
1494 self._repo.dirstate.write(tr)
1493 self._repo.dirstate.write(tr)
1495
1494
1496 if poststatus:
1495 if poststatus:
1497 for ps in poststatus:
1496 for ps in poststatus:
1498 ps(self, status)
1497 ps(self, status)
1499 else:
1498 else:
1500 # in this case, writing changes out breaks
1499 # in this case, writing changes out breaks
1501 # consistency, because .hg/dirstate was
1500 # consistency, because .hg/dirstate was
1502 # already changed simultaneously after last
1501 # already changed simultaneously after last
1503 # caching (see also issue5584 for detail)
1502 # caching (see also issue5584 for detail)
1504 self._repo.ui.debug('skip updating dirstate: '
1503 self._repo.ui.debug('skip updating dirstate: '
1505 'identity mismatch\n')
1504 'identity mismatch\n')
1506 except error.LockError:
1505 except error.LockError:
1507 pass
1506 pass
1508 finally:
1507 finally:
1509 # Even if the wlock couldn't be grabbed, clear out the list.
1508 # Even if the wlock couldn't be grabbed, clear out the list.
1510 self._repo.clearpostdsstatus()
1509 self._repo.clearpostdsstatus()
1511
1510
1512 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1511 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1513 '''Gets the status from the dirstate -- internal use only.'''
1512 '''Gets the status from the dirstate -- internal use only.'''
1514 subrepos = []
1513 subrepos = []
1515 if '.hgsub' in self:
1514 if '.hgsub' in self:
1516 subrepos = sorted(self.substate)
1515 subrepos = sorted(self.substate)
1517 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1516 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1518 clean=clean, unknown=unknown)
1517 clean=clean, unknown=unknown)
1519
1518
1520 # check for any possibly clean files
1519 # check for any possibly clean files
1521 fixup = []
1520 fixup = []
1522 if cmp:
1521 if cmp:
1523 modified2, deleted2, fixup = self._checklookup(cmp)
1522 modified2, deleted2, fixup = self._checklookup(cmp)
1524 s.modified.extend(modified2)
1523 s.modified.extend(modified2)
1525 s.deleted.extend(deleted2)
1524 s.deleted.extend(deleted2)
1526
1525
1527 if fixup and clean:
1526 if fixup and clean:
1528 s.clean.extend(fixup)
1527 s.clean.extend(fixup)
1529
1528
1530 self._poststatusfixup(s, fixup)
1529 self._poststatusfixup(s, fixup)
1531
1530
1532 if match.always():
1531 if match.always():
1533 # cache for performance
1532 # cache for performance
1534 if s.unknown or s.ignored or s.clean:
1533 if s.unknown or s.ignored or s.clean:
1535 # "_status" is cached with list*=False in the normal route
1534 # "_status" is cached with list*=False in the normal route
1536 self._status = scmutil.status(s.modified, s.added, s.removed,
1535 self._status = scmutil.status(s.modified, s.added, s.removed,
1537 s.deleted, [], [], [])
1536 s.deleted, [], [], [])
1538 else:
1537 else:
1539 self._status = s
1538 self._status = s
1540
1539
1541 return s
1540 return s
1542
1541
1543 @propertycache
1542 @propertycache
1544 def _manifest(self):
1543 def _manifest(self):
1545 """generate a manifest corresponding to the values in self._status
1544 """generate a manifest corresponding to the values in self._status
1546
1545
1547 This reuse the file nodeid from parent, but we use special node
1546 This reuse the file nodeid from parent, but we use special node
1548 identifiers for added and modified files. This is used by manifests
1547 identifiers for added and modified files. This is used by manifests
1549 merge to see that files are different and by update logic to avoid
1548 merge to see that files are different and by update logic to avoid
1550 deleting newly added files.
1549 deleting newly added files.
1551 """
1550 """
1552 return self._buildstatusmanifest(self._status)
1551 return self._buildstatusmanifest(self._status)
1553
1552
1554 def _buildstatusmanifest(self, status):
1553 def _buildstatusmanifest(self, status):
1555 """Builds a manifest that includes the given status results."""
1554 """Builds a manifest that includes the given status results."""
1556 parents = self.parents()
1555 parents = self.parents()
1557
1556
1558 man = parents[0].manifest().copy()
1557 man = parents[0].manifest().copy()
1559
1558
1560 ff = self._flagfunc
1559 ff = self._flagfunc
1561 for i, l in ((addednodeid, status.added),
1560 for i, l in ((addednodeid, status.added),
1562 (modifiednodeid, status.modified)):
1561 (modifiednodeid, status.modified)):
1563 for f in l:
1562 for f in l:
1564 man[f] = i
1563 man[f] = i
1565 try:
1564 try:
1566 man.setflag(f, ff(f))
1565 man.setflag(f, ff(f))
1567 except OSError:
1566 except OSError:
1568 pass
1567 pass
1569
1568
1570 for f in status.deleted + status.removed:
1569 for f in status.deleted + status.removed:
1571 if f in man:
1570 if f in man:
1572 del man[f]
1571 del man[f]
1573
1572
1574 return man
1573 return man
1575
1574
1576 def _buildstatus(self, other, s, match, listignored, listclean,
1575 def _buildstatus(self, other, s, match, listignored, listclean,
1577 listunknown):
1576 listunknown):
1578 """build a status with respect to another context
1577 """build a status with respect to another context
1579
1578
1580 This includes logic for maintaining the fast path of status when
1579 This includes logic for maintaining the fast path of status when
1581 comparing the working directory against its parent, which is to skip
1580 comparing the working directory against its parent, which is to skip
1582 building a new manifest if self (working directory) is not comparing
1581 building a new manifest if self (working directory) is not comparing
1583 against its parent (repo['.']).
1582 against its parent (repo['.']).
1584 """
1583 """
1585 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1584 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1586 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1585 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1587 # might have accidentally ended up with the entire contents of the file
1586 # might have accidentally ended up with the entire contents of the file
1588 # they are supposed to be linking to.
1587 # they are supposed to be linking to.
1589 s.modified[:] = self._filtersuspectsymlink(s.modified)
1588 s.modified[:] = self._filtersuspectsymlink(s.modified)
1590 if other != self._repo['.']:
1589 if other != self._repo['.']:
1591 s = super(workingctx, self)._buildstatus(other, s, match,
1590 s = super(workingctx, self)._buildstatus(other, s, match,
1592 listignored, listclean,
1591 listignored, listclean,
1593 listunknown)
1592 listunknown)
1594 return s
1593 return s
1595
1594
1596 def _matchstatus(self, other, match):
1595 def _matchstatus(self, other, match):
1597 """override the match method with a filter for directory patterns
1596 """override the match method with a filter for directory patterns
1598
1597
1599 We use inheritance to customize the match.bad method only in cases of
1598 We use inheritance to customize the match.bad method only in cases of
1600 workingctx since it belongs only to the working directory when
1599 workingctx since it belongs only to the working directory when
1601 comparing against the parent changeset.
1600 comparing against the parent changeset.
1602
1601
1603 If we aren't comparing against the working directory's parent, then we
1602 If we aren't comparing against the working directory's parent, then we
1604 just use the default match object sent to us.
1603 just use the default match object sent to us.
1605 """
1604 """
1606 if other != self._repo['.']:
1605 if other != self._repo['.']:
1607 def bad(f, msg):
1606 def bad(f, msg):
1608 # 'f' may be a directory pattern from 'match.files()',
1607 # 'f' may be a directory pattern from 'match.files()',
1609 # so 'f not in ctx1' is not enough
1608 # so 'f not in ctx1' is not enough
1610 if f not in other and not other.hasdir(f):
1609 if f not in other and not other.hasdir(f):
1611 self._repo.ui.warn('%s: %s\n' %
1610 self._repo.ui.warn('%s: %s\n' %
1612 (self._repo.dirstate.pathto(f), msg))
1611 (self._repo.dirstate.pathto(f), msg))
1613 match.bad = bad
1612 match.bad = bad
1614 return match
1613 return match
1615
1614
1616 def markcommitted(self, node):
1615 def markcommitted(self, node):
1617 super(workingctx, self).markcommitted(node)
1616 super(workingctx, self).markcommitted(node)
1618
1617
1619 sparse.aftercommit(self._repo, node)
1618 sparse.aftercommit(self._repo, node)
1620
1619
1621 class committablefilectx(basefilectx):
1620 class committablefilectx(basefilectx):
1622 """A committablefilectx provides common functionality for a file context
1621 """A committablefilectx provides common functionality for a file context
1623 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1622 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1624 def __init__(self, repo, path, filelog=None, ctx=None):
1623 def __init__(self, repo, path, filelog=None, ctx=None):
1625 self._repo = repo
1624 self._repo = repo
1626 self._path = path
1625 self._path = path
1627 self._changeid = None
1626 self._changeid = None
1628 self._filerev = self._filenode = None
1627 self._filerev = self._filenode = None
1629
1628
1630 if filelog is not None:
1629 if filelog is not None:
1631 self._filelog = filelog
1630 self._filelog = filelog
1632 if ctx:
1631 if ctx:
1633 self._changectx = ctx
1632 self._changectx = ctx
1634
1633
1635 def __nonzero__(self):
1634 def __nonzero__(self):
1636 return True
1635 return True
1637
1636
1638 __bool__ = __nonzero__
1637 __bool__ = __nonzero__
1639
1638
1640 def linkrev(self):
1639 def linkrev(self):
1641 # linked to self._changectx no matter if file is modified or not
1640 # linked to self._changectx no matter if file is modified or not
1642 return self.rev()
1641 return self.rev()
1643
1642
1644 def parents(self):
1643 def parents(self):
1645 '''return parent filectxs, following copies if necessary'''
1644 '''return parent filectxs, following copies if necessary'''
1646 def filenode(ctx, path):
1645 def filenode(ctx, path):
1647 return ctx._manifest.get(path, nullid)
1646 return ctx._manifest.get(path, nullid)
1648
1647
1649 path = self._path
1648 path = self._path
1650 fl = self._filelog
1649 fl = self._filelog
1651 pcl = self._changectx._parents
1650 pcl = self._changectx._parents
1652 renamed = self.renamed()
1651 renamed = self.renamed()
1653
1652
1654 if renamed:
1653 if renamed:
1655 pl = [renamed + (None,)]
1654 pl = [renamed + (None,)]
1656 else:
1655 else:
1657 pl = [(path, filenode(pcl[0], path), fl)]
1656 pl = [(path, filenode(pcl[0], path), fl)]
1658
1657
1659 for pc in pcl[1:]:
1658 for pc in pcl[1:]:
1660 pl.append((path, filenode(pc, path), fl))
1659 pl.append((path, filenode(pc, path), fl))
1661
1660
1662 return [self._parentfilectx(p, fileid=n, filelog=l)
1661 return [self._parentfilectx(p, fileid=n, filelog=l)
1663 for p, n, l in pl if n != nullid]
1662 for p, n, l in pl if n != nullid]
1664
1663
1665 def children(self):
1664 def children(self):
1666 return []
1665 return []
1667
1666
1668 class workingfilectx(committablefilectx):
1667 class workingfilectx(committablefilectx):
1669 """A workingfilectx object makes access to data related to a particular
1668 """A workingfilectx object makes access to data related to a particular
1670 file in the working directory convenient."""
1669 file in the working directory convenient."""
1671 def __init__(self, repo, path, filelog=None, workingctx=None):
1670 def __init__(self, repo, path, filelog=None, workingctx=None):
1672 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1671 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1673
1672
1674 @propertycache
1673 @propertycache
1675 def _changectx(self):
1674 def _changectx(self):
1676 return workingctx(self._repo)
1675 return workingctx(self._repo)
1677
1676
1678 def data(self):
1677 def data(self):
1679 return self._repo.wread(self._path)
1678 return self._repo.wread(self._path)
1680 def renamed(self):
1679 def renamed(self):
1681 rp = self._repo.dirstate.copied(self._path)
1680 rp = self._repo.dirstate.copied(self._path)
1682 if not rp:
1681 if not rp:
1683 return None
1682 return None
1684 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1683 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1685
1684
1686 def size(self):
1685 def size(self):
1687 return self._repo.wvfs.lstat(self._path).st_size
1686 return self._repo.wvfs.lstat(self._path).st_size
1688 def date(self):
1687 def date(self):
1689 t, tz = self._changectx.date()
1688 t, tz = self._changectx.date()
1690 try:
1689 try:
1691 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1690 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1692 except OSError as err:
1691 except OSError as err:
1693 if err.errno != errno.ENOENT:
1692 if err.errno != errno.ENOENT:
1694 raise
1693 raise
1695 return (t, tz)
1694 return (t, tz)
1696
1695
1697 def exists(self):
1696 def exists(self):
1698 return self._repo.wvfs.exists(self._path)
1697 return self._repo.wvfs.exists(self._path)
1699
1698
1700 def lexists(self):
1699 def lexists(self):
1701 return self._repo.wvfs.lexists(self._path)
1700 return self._repo.wvfs.lexists(self._path)
1702
1701
1703 def audit(self):
1702 def audit(self):
1704 return self._repo.wvfs.audit(self._path)
1703 return self._repo.wvfs.audit(self._path)
1705
1704
1706 def cmp(self, fctx):
1705 def cmp(self, fctx):
1707 """compare with other file context
1706 """compare with other file context
1708
1707
1709 returns True if different than fctx.
1708 returns True if different than fctx.
1710 """
1709 """
1711 # fctx should be a filectx (not a workingfilectx)
1710 # fctx should be a filectx (not a workingfilectx)
1712 # invert comparison to reuse the same code path
1711 # invert comparison to reuse the same code path
1713 return fctx.cmp(self)
1712 return fctx.cmp(self)
1714
1713
1715 def remove(self, ignoremissing=False):
1714 def remove(self, ignoremissing=False):
1716 """wraps unlink for a repo's working directory"""
1715 """wraps unlink for a repo's working directory"""
1717 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1716 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1718
1717
1719 def write(self, data, flags, backgroundclose=False, **kwargs):
1718 def write(self, data, flags, backgroundclose=False, **kwargs):
1720 """wraps repo.wwrite"""
1719 """wraps repo.wwrite"""
1721 self._repo.wwrite(self._path, data, flags,
1720 self._repo.wwrite(self._path, data, flags,
1722 backgroundclose=backgroundclose,
1721 backgroundclose=backgroundclose,
1723 **kwargs)
1722 **kwargs)
1724
1723
1725 def markcopied(self, src):
1724 def markcopied(self, src):
1726 """marks this file a copy of `src`"""
1725 """marks this file a copy of `src`"""
1727 if self._repo.dirstate[self._path] in "nma":
1726 if self._repo.dirstate[self._path] in "nma":
1728 self._repo.dirstate.copy(src, self._path)
1727 self._repo.dirstate.copy(src, self._path)
1729
1728
1730 def clearunknown(self):
1729 def clearunknown(self):
1731 """Removes conflicting items in the working directory so that
1730 """Removes conflicting items in the working directory so that
1732 ``write()`` can be called successfully.
1731 ``write()`` can be called successfully.
1733 """
1732 """
1734 wvfs = self._repo.wvfs
1733 wvfs = self._repo.wvfs
1735 f = self._path
1734 f = self._path
1736 wvfs.audit(f)
1735 wvfs.audit(f)
1737 if wvfs.isdir(f) and not wvfs.islink(f):
1736 if wvfs.isdir(f) and not wvfs.islink(f):
1738 wvfs.rmtree(f, forcibly=True)
1737 wvfs.rmtree(f, forcibly=True)
1739 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1738 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1740 for p in reversed(list(util.finddirs(f))):
1739 for p in reversed(list(util.finddirs(f))):
1741 if wvfs.isfileorlink(p):
1740 if wvfs.isfileorlink(p):
1742 wvfs.unlink(p)
1741 wvfs.unlink(p)
1743 break
1742 break
1744
1743
1745 def setflags(self, l, x):
1744 def setflags(self, l, x):
1746 self._repo.wvfs.setflags(self._path, l, x)
1745 self._repo.wvfs.setflags(self._path, l, x)
1747
1746
1748 class overlayworkingctx(committablectx):
1747 class overlayworkingctx(committablectx):
1749 """Wraps another mutable context with a write-back cache that can be
1748 """Wraps another mutable context with a write-back cache that can be
1750 converted into a commit context.
1749 converted into a commit context.
1751
1750
1752 self._cache[path] maps to a dict with keys: {
1751 self._cache[path] maps to a dict with keys: {
1753 'exists': bool?
1752 'exists': bool?
1754 'date': date?
1753 'date': date?
1755 'data': str?
1754 'data': str?
1756 'flags': str?
1755 'flags': str?
1757 'copied': str? (path or None)
1756 'copied': str? (path or None)
1758 }
1757 }
1759 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1758 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1760 is `False`, the file was deleted.
1759 is `False`, the file was deleted.
1761 """
1760 """
1762
1761
1763 def __init__(self, repo):
1762 def __init__(self, repo):
1764 super(overlayworkingctx, self).__init__(repo)
1763 super(overlayworkingctx, self).__init__(repo)
1765 self.clean()
1764 self.clean()
1766
1765
1767 def setbase(self, wrappedctx):
1766 def setbase(self, wrappedctx):
1768 self._wrappedctx = wrappedctx
1767 self._wrappedctx = wrappedctx
1769 self._parents = [wrappedctx]
1768 self._parents = [wrappedctx]
1770 # Drop old manifest cache as it is now out of date.
1769 # Drop old manifest cache as it is now out of date.
1771 # This is necessary when, e.g., rebasing several nodes with one
1770 # This is necessary when, e.g., rebasing several nodes with one
1772 # ``overlayworkingctx`` (e.g. with --collapse).
1771 # ``overlayworkingctx`` (e.g. with --collapse).
1773 util.clearcachedproperty(self, '_manifest')
1772 util.clearcachedproperty(self, '_manifest')
1774
1773
1775 def data(self, path):
1774 def data(self, path):
1776 if self.isdirty(path):
1775 if self.isdirty(path):
1777 if self._cache[path]['exists']:
1776 if self._cache[path]['exists']:
1778 if self._cache[path]['data']:
1777 if self._cache[path]['data']:
1779 return self._cache[path]['data']
1778 return self._cache[path]['data']
1780 else:
1779 else:
1781 # Must fallback here, too, because we only set flags.
1780 # Must fallback here, too, because we only set flags.
1782 return self._wrappedctx[path].data()
1781 return self._wrappedctx[path].data()
1783 else:
1782 else:
1784 raise error.ProgrammingError("No such file or directory: %s" %
1783 raise error.ProgrammingError("No such file or directory: %s" %
1785 path)
1784 path)
1786 else:
1785 else:
1787 return self._wrappedctx[path].data()
1786 return self._wrappedctx[path].data()
1788
1787
1789 @propertycache
1788 @propertycache
1790 def _manifest(self):
1789 def _manifest(self):
1791 parents = self.parents()
1790 parents = self.parents()
1792 man = parents[0].manifest().copy()
1791 man = parents[0].manifest().copy()
1793
1792
1794 flag = self._flagfunc
1793 flag = self._flagfunc
1795 for path in self.added():
1794 for path in self.added():
1796 man[path] = addednodeid
1795 man[path] = addednodeid
1797 man.setflag(path, flag(path))
1796 man.setflag(path, flag(path))
1798 for path in self.modified():
1797 for path in self.modified():
1799 man[path] = modifiednodeid
1798 man[path] = modifiednodeid
1800 man.setflag(path, flag(path))
1799 man.setflag(path, flag(path))
1801 for path in self.removed():
1800 for path in self.removed():
1802 del man[path]
1801 del man[path]
1803 return man
1802 return man
1804
1803
1805 @propertycache
1804 @propertycache
1806 def _flagfunc(self):
1805 def _flagfunc(self):
1807 def f(path):
1806 def f(path):
1808 return self._cache[path]['flags']
1807 return self._cache[path]['flags']
1809 return f
1808 return f
1810
1809
1811 def files(self):
1810 def files(self):
1812 return sorted(self.added() + self.modified() + self.removed())
1811 return sorted(self.added() + self.modified() + self.removed())
1813
1812
1814 def modified(self):
1813 def modified(self):
1815 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1816 self._existsinparent(f)]
1815 self._existsinparent(f)]
1817
1816
1818 def added(self):
1817 def added(self):
1819 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1820 not self._existsinparent(f)]
1819 not self._existsinparent(f)]
1821
1820
1822 def removed(self):
1821 def removed(self):
1823 return [f for f in self._cache.keys() if
1822 return [f for f in self._cache.keys() if
1824 not self._cache[f]['exists'] and self._existsinparent(f)]
1823 not self._cache[f]['exists'] and self._existsinparent(f)]
1825
1824
1826 def isinmemory(self):
1825 def isinmemory(self):
1827 return True
1826 return True
1828
1827
1829 def filedate(self, path):
1828 def filedate(self, path):
1830 if self.isdirty(path):
1829 if self.isdirty(path):
1831 return self._cache[path]['date']
1830 return self._cache[path]['date']
1832 else:
1831 else:
1833 return self._wrappedctx[path].date()
1832 return self._wrappedctx[path].date()
1834
1833
1835 def markcopied(self, path, origin):
1834 def markcopied(self, path, origin):
1836 if self.isdirty(path):
1835 if self.isdirty(path):
1837 self._cache[path]['copied'] = origin
1836 self._cache[path]['copied'] = origin
1838 else:
1837 else:
1839 raise error.ProgrammingError('markcopied() called on clean context')
1838 raise error.ProgrammingError('markcopied() called on clean context')
1840
1839
1841 def copydata(self, path):
1840 def copydata(self, path):
1842 if self.isdirty(path):
1841 if self.isdirty(path):
1843 return self._cache[path]['copied']
1842 return self._cache[path]['copied']
1844 else:
1843 else:
1845 raise error.ProgrammingError('copydata() called on clean context')
1844 raise error.ProgrammingError('copydata() called on clean context')
1846
1845
1847 def flags(self, path):
1846 def flags(self, path):
1848 if self.isdirty(path):
1847 if self.isdirty(path):
1849 if self._cache[path]['exists']:
1848 if self._cache[path]['exists']:
1850 return self._cache[path]['flags']
1849 return self._cache[path]['flags']
1851 else:
1850 else:
1852 raise error.ProgrammingError("No such file or directory: %s" %
1851 raise error.ProgrammingError("No such file or directory: %s" %
1853 self._path)
1852 self._path)
1854 else:
1853 else:
1855 return self._wrappedctx[path].flags()
1854 return self._wrappedctx[path].flags()
1856
1855
1857 def _existsinparent(self, path):
1856 def _existsinparent(self, path):
1858 try:
1857 try:
1859 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1858 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1860 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1859 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1861 # with an ``exists()`` function.
1860 # with an ``exists()`` function.
1862 self._wrappedctx[path]
1861 self._wrappedctx[path]
1863 return True
1862 return True
1864 except error.ManifestLookupError:
1863 except error.ManifestLookupError:
1865 return False
1864 return False
1866
1865
1867 def _auditconflicts(self, path):
1866 def _auditconflicts(self, path):
1868 """Replicates conflict checks done by wvfs.write().
1867 """Replicates conflict checks done by wvfs.write().
1869
1868
1870 Since we never write to the filesystem and never call `applyupdates` in
1869 Since we never write to the filesystem and never call `applyupdates` in
1871 IMM, we'll never check that a path is actually writable -- e.g., because
1870 IMM, we'll never check that a path is actually writable -- e.g., because
1872 it adds `a/foo`, but `a` is actually a file in the other commit.
1871 it adds `a/foo`, but `a` is actually a file in the other commit.
1873 """
1872 """
1874 def fail(path, component):
1873 def fail(path, component):
1875 # p1() is the base and we're receiving "writes" for p2()'s
1874 # p1() is the base and we're receiving "writes" for p2()'s
1876 # files.
1875 # files.
1877 if 'l' in self.p1()[component].flags():
1876 if 'l' in self.p1()[component].flags():
1878 raise error.Abort("error: %s conflicts with symlink %s "
1877 raise error.Abort("error: %s conflicts with symlink %s "
1879 "in %s." % (path, component,
1878 "in %s." % (path, component,
1880 self.p1().rev()))
1879 self.p1().rev()))
1881 else:
1880 else:
1882 raise error.Abort("error: '%s' conflicts with file '%s' in "
1881 raise error.Abort("error: '%s' conflicts with file '%s' in "
1883 "%s." % (path, component,
1882 "%s." % (path, component,
1884 self.p1().rev()))
1883 self.p1().rev()))
1885
1884
1886 # Test that each new directory to be created to write this path from p2
1885 # Test that each new directory to be created to write this path from p2
1887 # is not a file in p1.
1886 # is not a file in p1.
1888 components = path.split('/')
1887 components = path.split('/')
1889 for i in xrange(len(components)):
1888 for i in xrange(len(components)):
1890 component = "/".join(components[0:i])
1889 component = "/".join(components[0:i])
1891 if component in self.p1():
1890 if component in self.p1():
1892 fail(path, component)
1891 fail(path, component)
1893
1892
1894 # Test the other direction -- that this path from p2 isn't a directory
1893 # Test the other direction -- that this path from p2 isn't a directory
1895 # in p1 (test that p1 doesn't any paths matching `path/*`).
1894 # in p1 (test that p1 doesn't any paths matching `path/*`).
1896 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1895 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1897 matches = self.p1().manifest().matches(match)
1896 matches = self.p1().manifest().matches(match)
1898 if len(matches) > 0:
1897 if len(matches) > 0:
1899 if len(matches) == 1 and matches.keys()[0] == path:
1898 if len(matches) == 1 and matches.keys()[0] == path:
1900 return
1899 return
1901 raise error.Abort("error: file '%s' cannot be written because "
1900 raise error.Abort("error: file '%s' cannot be written because "
1902 " '%s/' is a folder in %s (containing %d "
1901 " '%s/' is a folder in %s (containing %d "
1903 "entries: %s)"
1902 "entries: %s)"
1904 % (path, path, self.p1(), len(matches),
1903 % (path, path, self.p1(), len(matches),
1905 ', '.join(matches.keys())))
1904 ', '.join(matches.keys())))
1906
1905
1907 def write(self, path, data, flags='', **kwargs):
1906 def write(self, path, data, flags='', **kwargs):
1908 if data is None:
1907 if data is None:
1909 raise error.ProgrammingError("data must be non-None")
1908 raise error.ProgrammingError("data must be non-None")
1910 self._auditconflicts(path)
1909 self._auditconflicts(path)
1911 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1912 flags=flags)
1911 flags=flags)
1913
1912
1914 def setflags(self, path, l, x):
1913 def setflags(self, path, l, x):
1915 self._markdirty(path, exists=True, date=dateutil.makedate(),
1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1916 flags=(l and 'l' or '') + (x and 'x' or ''))
1915 flags=(l and 'l' or '') + (x and 'x' or ''))
1917
1916
1918 def remove(self, path):
1917 def remove(self, path):
1919 self._markdirty(path, exists=False)
1918 self._markdirty(path, exists=False)
1920
1919
1921 def exists(self, path):
1920 def exists(self, path):
1922 """exists behaves like `lexists`, but needs to follow symlinks and
1921 """exists behaves like `lexists`, but needs to follow symlinks and
1923 return False if they are broken.
1922 return False if they are broken.
1924 """
1923 """
1925 if self.isdirty(path):
1924 if self.isdirty(path):
1926 # If this path exists and is a symlink, "follow" it by calling
1925 # If this path exists and is a symlink, "follow" it by calling
1927 # exists on the destination path.
1926 # exists on the destination path.
1928 if (self._cache[path]['exists'] and
1927 if (self._cache[path]['exists'] and
1929 'l' in self._cache[path]['flags']):
1928 'l' in self._cache[path]['flags']):
1930 return self.exists(self._cache[path]['data'].strip())
1929 return self.exists(self._cache[path]['data'].strip())
1931 else:
1930 else:
1932 return self._cache[path]['exists']
1931 return self._cache[path]['exists']
1933
1932
1934 return self._existsinparent(path)
1933 return self._existsinparent(path)
1935
1934
1936 def lexists(self, path):
1935 def lexists(self, path):
1937 """lexists returns True if the path exists"""
1936 """lexists returns True if the path exists"""
1938 if self.isdirty(path):
1937 if self.isdirty(path):
1939 return self._cache[path]['exists']
1938 return self._cache[path]['exists']
1940
1939
1941 return self._existsinparent(path)
1940 return self._existsinparent(path)
1942
1941
1943 def size(self, path):
1942 def size(self, path):
1944 if self.isdirty(path):
1943 if self.isdirty(path):
1945 if self._cache[path]['exists']:
1944 if self._cache[path]['exists']:
1946 return len(self._cache[path]['data'])
1945 return len(self._cache[path]['data'])
1947 else:
1946 else:
1948 raise error.ProgrammingError("No such file or directory: %s" %
1947 raise error.ProgrammingError("No such file or directory: %s" %
1949 self._path)
1948 self._path)
1950 return self._wrappedctx[path].size()
1949 return self._wrappedctx[path].size()
1951
1950
1952 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1953 user=None, editor=None):
1952 user=None, editor=None):
1954 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1955 committed.
1954 committed.
1956
1955
1957 ``text`` is the commit message.
1956 ``text`` is the commit message.
1958 ``parents`` (optional) are rev numbers.
1957 ``parents`` (optional) are rev numbers.
1959 """
1958 """
1960 # Default parents to the wrapped contexts' if not passed.
1959 # Default parents to the wrapped contexts' if not passed.
1961 if parents is None:
1960 if parents is None:
1962 parents = self._wrappedctx.parents()
1961 parents = self._wrappedctx.parents()
1963 if len(parents) == 1:
1962 if len(parents) == 1:
1964 parents = (parents[0], None)
1963 parents = (parents[0], None)
1965
1964
1966 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1967 if parents[1] is None:
1966 if parents[1] is None:
1968 parents = (self._repo[parents[0]], None)
1967 parents = (self._repo[parents[0]], None)
1969 else:
1968 else:
1970 parents = (self._repo[parents[0]], self._repo[parents[1]])
1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1971
1970
1972 files = self._cache.keys()
1971 files = self._cache.keys()
1973 def getfile(repo, memctx, path):
1972 def getfile(repo, memctx, path):
1974 if self._cache[path]['exists']:
1973 if self._cache[path]['exists']:
1975 return memfilectx(repo, memctx, path,
1974 return memfilectx(repo, memctx, path,
1976 self._cache[path]['data'],
1975 self._cache[path]['data'],
1977 'l' in self._cache[path]['flags'],
1976 'l' in self._cache[path]['flags'],
1978 'x' in self._cache[path]['flags'],
1977 'x' in self._cache[path]['flags'],
1979 self._cache[path]['copied'])
1978 self._cache[path]['copied'])
1980 else:
1979 else:
1981 # Returning None, but including the path in `files`, is
1980 # Returning None, but including the path in `files`, is
1982 # necessary for memctx to register a deletion.
1981 # necessary for memctx to register a deletion.
1983 return None
1982 return None
1984 return memctx(self._repo, parents, text, files, getfile, date=date,
1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1985 extra=extra, user=user, branch=branch, editor=editor)
1984 extra=extra, user=user, branch=branch, editor=editor)
1986
1985
1987 def isdirty(self, path):
1986 def isdirty(self, path):
1988 return path in self._cache
1987 return path in self._cache
1989
1988
1990 def isempty(self):
1989 def isempty(self):
1991 # We need to discard any keys that are actually clean before the empty
1990 # We need to discard any keys that are actually clean before the empty
1992 # commit check.
1991 # commit check.
1993 self._compact()
1992 self._compact()
1994 return len(self._cache) == 0
1993 return len(self._cache) == 0
1995
1994
1996 def clean(self):
1995 def clean(self):
1997 self._cache = {}
1996 self._cache = {}
1998
1997
1999 def _compact(self):
1998 def _compact(self):
2000 """Removes keys from the cache that are actually clean, by comparing
1999 """Removes keys from the cache that are actually clean, by comparing
2001 them with the underlying context.
2000 them with the underlying context.
2002
2001
2003 This can occur during the merge process, e.g. by passing --tool :local
2002 This can occur during the merge process, e.g. by passing --tool :local
2004 to resolve a conflict.
2003 to resolve a conflict.
2005 """
2004 """
2006 keys = []
2005 keys = []
2007 for path in self._cache.keys():
2006 for path in self._cache.keys():
2008 cache = self._cache[path]
2007 cache = self._cache[path]
2009 try:
2008 try:
2010 underlying = self._wrappedctx[path]
2009 underlying = self._wrappedctx[path]
2011 if (underlying.data() == cache['data'] and
2010 if (underlying.data() == cache['data'] and
2012 underlying.flags() == cache['flags']):
2011 underlying.flags() == cache['flags']):
2013 keys.append(path)
2012 keys.append(path)
2014 except error.ManifestLookupError:
2013 except error.ManifestLookupError:
2015 # Path not in the underlying manifest (created).
2014 # Path not in the underlying manifest (created).
2016 continue
2015 continue
2017
2016
2018 for path in keys:
2017 for path in keys:
2019 del self._cache[path]
2018 del self._cache[path]
2020 return keys
2019 return keys
2021
2020
2022 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2023 self._cache[path] = {
2022 self._cache[path] = {
2024 'exists': exists,
2023 'exists': exists,
2025 'data': data,
2024 'data': data,
2026 'date': date,
2025 'date': date,
2027 'flags': flags,
2026 'flags': flags,
2028 'copied': None,
2027 'copied': None,
2029 }
2028 }
2030
2029
2031 def filectx(self, path, filelog=None):
2030 def filectx(self, path, filelog=None):
2032 return overlayworkingfilectx(self._repo, path, parent=self,
2031 return overlayworkingfilectx(self._repo, path, parent=self,
2033 filelog=filelog)
2032 filelog=filelog)
2034
2033
2035 class overlayworkingfilectx(committablefilectx):
2034 class overlayworkingfilectx(committablefilectx):
2036 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2035 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2037 cache, which can be flushed through later by calling ``flush()``."""
2036 cache, which can be flushed through later by calling ``flush()``."""
2038
2037
2039 def __init__(self, repo, path, filelog=None, parent=None):
2038 def __init__(self, repo, path, filelog=None, parent=None):
2040 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2039 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2041 parent)
2040 parent)
2042 self._repo = repo
2041 self._repo = repo
2043 self._parent = parent
2042 self._parent = parent
2044 self._path = path
2043 self._path = path
2045
2044
2046 def cmp(self, fctx):
2045 def cmp(self, fctx):
2047 return self.data() != fctx.data()
2046 return self.data() != fctx.data()
2048
2047
2049 def changectx(self):
2048 def changectx(self):
2050 return self._parent
2049 return self._parent
2051
2050
2052 def data(self):
2051 def data(self):
2053 return self._parent.data(self._path)
2052 return self._parent.data(self._path)
2054
2053
2055 def date(self):
2054 def date(self):
2056 return self._parent.filedate(self._path)
2055 return self._parent.filedate(self._path)
2057
2056
2058 def exists(self):
2057 def exists(self):
2059 return self.lexists()
2058 return self.lexists()
2060
2059
2061 def lexists(self):
2060 def lexists(self):
2062 return self._parent.exists(self._path)
2061 return self._parent.exists(self._path)
2063
2062
2064 def renamed(self):
2063 def renamed(self):
2065 path = self._parent.copydata(self._path)
2064 path = self._parent.copydata(self._path)
2066 if not path:
2065 if not path:
2067 return None
2066 return None
2068 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2067 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2069
2068
2070 def size(self):
2069 def size(self):
2071 return self._parent.size(self._path)
2070 return self._parent.size(self._path)
2072
2071
2073 def markcopied(self, origin):
2072 def markcopied(self, origin):
2074 self._parent.markcopied(self._path, origin)
2073 self._parent.markcopied(self._path, origin)
2075
2074
2076 def audit(self):
2075 def audit(self):
2077 pass
2076 pass
2078
2077
2079 def flags(self):
2078 def flags(self):
2080 return self._parent.flags(self._path)
2079 return self._parent.flags(self._path)
2081
2080
2082 def setflags(self, islink, isexec):
2081 def setflags(self, islink, isexec):
2083 return self._parent.setflags(self._path, islink, isexec)
2082 return self._parent.setflags(self._path, islink, isexec)
2084
2083
2085 def write(self, data, flags, backgroundclose=False, **kwargs):
2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2086 return self._parent.write(self._path, data, flags, **kwargs)
2085 return self._parent.write(self._path, data, flags, **kwargs)
2087
2086
2088 def remove(self, ignoremissing=False):
2087 def remove(self, ignoremissing=False):
2089 return self._parent.remove(self._path)
2088 return self._parent.remove(self._path)
2090
2089
2091 def clearunknown(self):
2090 def clearunknown(self):
2092 pass
2091 pass
2093
2092
2094 class workingcommitctx(workingctx):
2093 class workingcommitctx(workingctx):
2095 """A workingcommitctx object makes access to data related to
2094 """A workingcommitctx object makes access to data related to
2096 the revision being committed convenient.
2095 the revision being committed convenient.
2097
2096
2098 This hides changes in the working directory, if they aren't
2097 This hides changes in the working directory, if they aren't
2099 committed in this context.
2098 committed in this context.
2100 """
2099 """
2101 def __init__(self, repo, changes,
2100 def __init__(self, repo, changes,
2102 text="", user=None, date=None, extra=None):
2101 text="", user=None, date=None, extra=None):
2103 super(workingctx, self).__init__(repo, text, user, date, extra,
2102 super(workingctx, self).__init__(repo, text, user, date, extra,
2104 changes)
2103 changes)
2105
2104
2106 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2105 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2107 """Return matched files only in ``self._status``
2106 """Return matched files only in ``self._status``
2108
2107
2109 Uncommitted files appear "clean" via this context, even if
2108 Uncommitted files appear "clean" via this context, even if
2110 they aren't actually so in the working directory.
2109 they aren't actually so in the working directory.
2111 """
2110 """
2112 if clean:
2111 if clean:
2113 clean = [f for f in self._manifest if f not in self._changedset]
2112 clean = [f for f in self._manifest if f not in self._changedset]
2114 else:
2113 else:
2115 clean = []
2114 clean = []
2116 return scmutil.status([f for f in self._status.modified if match(f)],
2115 return scmutil.status([f for f in self._status.modified if match(f)],
2117 [f for f in self._status.added if match(f)],
2116 [f for f in self._status.added if match(f)],
2118 [f for f in self._status.removed if match(f)],
2117 [f for f in self._status.removed if match(f)],
2119 [], [], [], clean)
2118 [], [], [], clean)
2120
2119
2121 @propertycache
2120 @propertycache
2122 def _changedset(self):
2121 def _changedset(self):
2123 """Return the set of files changed in this context
2122 """Return the set of files changed in this context
2124 """
2123 """
2125 changed = set(self._status.modified)
2124 changed = set(self._status.modified)
2126 changed.update(self._status.added)
2125 changed.update(self._status.added)
2127 changed.update(self._status.removed)
2126 changed.update(self._status.removed)
2128 return changed
2127 return changed
2129
2128
2130 def makecachingfilectxfn(func):
2129 def makecachingfilectxfn(func):
2131 """Create a filectxfn that caches based on the path.
2130 """Create a filectxfn that caches based on the path.
2132
2131
2133 We can't use util.cachefunc because it uses all arguments as the cache
2132 We can't use util.cachefunc because it uses all arguments as the cache
2134 key and this creates a cycle since the arguments include the repo and
2133 key and this creates a cycle since the arguments include the repo and
2135 memctx.
2134 memctx.
2136 """
2135 """
2137 cache = {}
2136 cache = {}
2138
2137
2139 def getfilectx(repo, memctx, path):
2138 def getfilectx(repo, memctx, path):
2140 if path not in cache:
2139 if path not in cache:
2141 cache[path] = func(repo, memctx, path)
2140 cache[path] = func(repo, memctx, path)
2142 return cache[path]
2141 return cache[path]
2143
2142
2144 return getfilectx
2143 return getfilectx
2145
2144
2146 def memfilefromctx(ctx):
2145 def memfilefromctx(ctx):
2147 """Given a context return a memfilectx for ctx[path]
2146 """Given a context return a memfilectx for ctx[path]
2148
2147
2149 This is a convenience method for building a memctx based on another
2148 This is a convenience method for building a memctx based on another
2150 context.
2149 context.
2151 """
2150 """
2152 def getfilectx(repo, memctx, path):
2151 def getfilectx(repo, memctx, path):
2153 fctx = ctx[path]
2152 fctx = ctx[path]
2154 # this is weird but apparently we only keep track of one parent
2153 # this is weird but apparently we only keep track of one parent
2155 # (why not only store that instead of a tuple?)
2154 # (why not only store that instead of a tuple?)
2156 copied = fctx.renamed()
2155 copied = fctx.renamed()
2157 if copied:
2156 if copied:
2158 copied = copied[0]
2157 copied = copied[0]
2159 return memfilectx(repo, memctx, path, fctx.data(),
2158 return memfilectx(repo, memctx, path, fctx.data(),
2160 islink=fctx.islink(), isexec=fctx.isexec(),
2159 islink=fctx.islink(), isexec=fctx.isexec(),
2161 copied=copied)
2160 copied=copied)
2162
2161
2163 return getfilectx
2162 return getfilectx
2164
2163
2165 def memfilefrompatch(patchstore):
2164 def memfilefrompatch(patchstore):
2166 """Given a patch (e.g. patchstore object) return a memfilectx
2165 """Given a patch (e.g. patchstore object) return a memfilectx
2167
2166
2168 This is a convenience method for building a memctx based on a patchstore.
2167 This is a convenience method for building a memctx based on a patchstore.
2169 """
2168 """
2170 def getfilectx(repo, memctx, path):
2169 def getfilectx(repo, memctx, path):
2171 data, mode, copied = patchstore.getfile(path)
2170 data, mode, copied = patchstore.getfile(path)
2172 if data is None:
2171 if data is None:
2173 return None
2172 return None
2174 islink, isexec = mode
2173 islink, isexec = mode
2175 return memfilectx(repo, memctx, path, data, islink=islink,
2174 return memfilectx(repo, memctx, path, data, islink=islink,
2176 isexec=isexec, copied=copied)
2175 isexec=isexec, copied=copied)
2177
2176
2178 return getfilectx
2177 return getfilectx
2179
2178
2180 class memctx(committablectx):
2179 class memctx(committablectx):
2181 """Use memctx to perform in-memory commits via localrepo.commitctx().
2180 """Use memctx to perform in-memory commits via localrepo.commitctx().
2182
2181
2183 Revision information is supplied at initialization time while
2182 Revision information is supplied at initialization time while
2184 related files data and is made available through a callback
2183 related files data and is made available through a callback
2185 mechanism. 'repo' is the current localrepo, 'parents' is a
2184 mechanism. 'repo' is the current localrepo, 'parents' is a
2186 sequence of two parent revisions identifiers (pass None for every
2185 sequence of two parent revisions identifiers (pass None for every
2187 missing parent), 'text' is the commit message and 'files' lists
2186 missing parent), 'text' is the commit message and 'files' lists
2188 names of files touched by the revision (normalized and relative to
2187 names of files touched by the revision (normalized and relative to
2189 repository root).
2188 repository root).
2190
2189
2191 filectxfn(repo, memctx, path) is a callable receiving the
2190 filectxfn(repo, memctx, path) is a callable receiving the
2192 repository, the current memctx object and the normalized path of
2191 repository, the current memctx object and the normalized path of
2193 requested file, relative to repository root. It is fired by the
2192 requested file, relative to repository root. It is fired by the
2194 commit function for every file in 'files', but calls order is
2193 commit function for every file in 'files', but calls order is
2195 undefined. If the file is available in the revision being
2194 undefined. If the file is available in the revision being
2196 committed (updated or added), filectxfn returns a memfilectx
2195 committed (updated or added), filectxfn returns a memfilectx
2197 object. If the file was removed, filectxfn return None for recent
2196 object. If the file was removed, filectxfn return None for recent
2198 Mercurial. Moved files are represented by marking the source file
2197 Mercurial. Moved files are represented by marking the source file
2199 removed and the new file added with copy information (see
2198 removed and the new file added with copy information (see
2200 memfilectx).
2199 memfilectx).
2201
2200
2202 user receives the committer name and defaults to current
2201 user receives the committer name and defaults to current
2203 repository username, date is the commit date in any format
2202 repository username, date is the commit date in any format
2204 supported by dateutil.parsedate() and defaults to current date, extra
2203 supported by dateutil.parsedate() and defaults to current date, extra
2205 is a dictionary of metadata or is left empty.
2204 is a dictionary of metadata or is left empty.
2206 """
2205 """
2207
2206
2208 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2207 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2209 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2208 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2210 # this field to determine what to do in filectxfn.
2209 # this field to determine what to do in filectxfn.
2211 _returnnoneformissingfiles = True
2210 _returnnoneformissingfiles = True
2212
2211
2213 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2212 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2214 date=None, extra=None, branch=None, editor=False):
2213 date=None, extra=None, branch=None, editor=False):
2215 super(memctx, self).__init__(repo, text, user, date, extra)
2214 super(memctx, self).__init__(repo, text, user, date, extra)
2216 self._rev = None
2215 self._rev = None
2217 self._node = None
2216 self._node = None
2218 parents = [(p or nullid) for p in parents]
2217 parents = [(p or nullid) for p in parents]
2219 p1, p2 = parents
2218 p1, p2 = parents
2220 self._parents = [self._repo[p] for p in (p1, p2)]
2219 self._parents = [self._repo[p] for p in (p1, p2)]
2221 files = sorted(set(files))
2220 files = sorted(set(files))
2222 self._files = files
2221 self._files = files
2223 if branch is not None:
2222 if branch is not None:
2224 self._extra['branch'] = encoding.fromlocal(branch)
2223 self._extra['branch'] = encoding.fromlocal(branch)
2225 self.substate = {}
2224 self.substate = {}
2226
2225
2227 if isinstance(filectxfn, patch.filestore):
2226 if isinstance(filectxfn, patch.filestore):
2228 filectxfn = memfilefrompatch(filectxfn)
2227 filectxfn = memfilefrompatch(filectxfn)
2229 elif not callable(filectxfn):
2228 elif not callable(filectxfn):
2230 # if store is not callable, wrap it in a function
2229 # if store is not callable, wrap it in a function
2231 filectxfn = memfilefromctx(filectxfn)
2230 filectxfn = memfilefromctx(filectxfn)
2232
2231
2233 # memoizing increases performance for e.g. vcs convert scenarios.
2232 # memoizing increases performance for e.g. vcs convert scenarios.
2234 self._filectxfn = makecachingfilectxfn(filectxfn)
2233 self._filectxfn = makecachingfilectxfn(filectxfn)
2235
2234
2236 if editor:
2235 if editor:
2237 self._text = editor(self._repo, self, [])
2236 self._text = editor(self._repo, self, [])
2238 self._repo.savecommitmessage(self._text)
2237 self._repo.savecommitmessage(self._text)
2239
2238
2240 def filectx(self, path, filelog=None):
2239 def filectx(self, path, filelog=None):
2241 """get a file context from the working directory
2240 """get a file context from the working directory
2242
2241
2243 Returns None if file doesn't exist and should be removed."""
2242 Returns None if file doesn't exist and should be removed."""
2244 return self._filectxfn(self._repo, self, path)
2243 return self._filectxfn(self._repo, self, path)
2245
2244
2246 def commit(self):
2245 def commit(self):
2247 """commit context to the repo"""
2246 """commit context to the repo"""
2248 return self._repo.commitctx(self)
2247 return self._repo.commitctx(self)
2249
2248
2250 @propertycache
2249 @propertycache
2251 def _manifest(self):
2250 def _manifest(self):
2252 """generate a manifest based on the return values of filectxfn"""
2251 """generate a manifest based on the return values of filectxfn"""
2253
2252
2254 # keep this simple for now; just worry about p1
2253 # keep this simple for now; just worry about p1
2255 pctx = self._parents[0]
2254 pctx = self._parents[0]
2256 man = pctx.manifest().copy()
2255 man = pctx.manifest().copy()
2257
2256
2258 for f in self._status.modified:
2257 for f in self._status.modified:
2259 p1node = nullid
2258 p1node = nullid
2260 p2node = nullid
2259 p2node = nullid
2261 p = pctx[f].parents() # if file isn't in pctx, check p2?
2260 p = pctx[f].parents() # if file isn't in pctx, check p2?
2262 if len(p) > 0:
2261 if len(p) > 0:
2263 p1node = p[0].filenode()
2262 p1node = p[0].filenode()
2264 if len(p) > 1:
2263 if len(p) > 1:
2265 p2node = p[1].filenode()
2264 p2node = p[1].filenode()
2266 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2265 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2267
2266
2268 for f in self._status.added:
2267 for f in self._status.added:
2269 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2268 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2270
2269
2271 for f in self._status.removed:
2270 for f in self._status.removed:
2272 if f in man:
2271 if f in man:
2273 del man[f]
2272 del man[f]
2274
2273
2275 return man
2274 return man
2276
2275
2277 @propertycache
2276 @propertycache
2278 def _status(self):
2277 def _status(self):
2279 """Calculate exact status from ``files`` specified at construction
2278 """Calculate exact status from ``files`` specified at construction
2280 """
2279 """
2281 man1 = self.p1().manifest()
2280 man1 = self.p1().manifest()
2282 p2 = self._parents[1]
2281 p2 = self._parents[1]
2283 # "1 < len(self._parents)" can't be used for checking
2282 # "1 < len(self._parents)" can't be used for checking
2284 # existence of the 2nd parent, because "memctx._parents" is
2283 # existence of the 2nd parent, because "memctx._parents" is
2285 # explicitly initialized by the list, of which length is 2.
2284 # explicitly initialized by the list, of which length is 2.
2286 if p2.node() != nullid:
2285 if p2.node() != nullid:
2287 man2 = p2.manifest()
2286 man2 = p2.manifest()
2288 managing = lambda f: f in man1 or f in man2
2287 managing = lambda f: f in man1 or f in man2
2289 else:
2288 else:
2290 managing = lambda f: f in man1
2289 managing = lambda f: f in man1
2291
2290
2292 modified, added, removed = [], [], []
2291 modified, added, removed = [], [], []
2293 for f in self._files:
2292 for f in self._files:
2294 if not managing(f):
2293 if not managing(f):
2295 added.append(f)
2294 added.append(f)
2296 elif self[f]:
2295 elif self[f]:
2297 modified.append(f)
2296 modified.append(f)
2298 else:
2297 else:
2299 removed.append(f)
2298 removed.append(f)
2300
2299
2301 return scmutil.status(modified, added, removed, [], [], [], [])
2300 return scmutil.status(modified, added, removed, [], [], [], [])
2302
2301
2303 class memfilectx(committablefilectx):
2302 class memfilectx(committablefilectx):
2304 """memfilectx represents an in-memory file to commit.
2303 """memfilectx represents an in-memory file to commit.
2305
2304
2306 See memctx and committablefilectx for more details.
2305 See memctx and committablefilectx for more details.
2307 """
2306 """
2308 def __init__(self, repo, changectx, path, data, islink=False,
2307 def __init__(self, repo, changectx, path, data, islink=False,
2309 isexec=False, copied=None):
2308 isexec=False, copied=None):
2310 """
2309 """
2311 path is the normalized file path relative to repository root.
2310 path is the normalized file path relative to repository root.
2312 data is the file content as a string.
2311 data is the file content as a string.
2313 islink is True if the file is a symbolic link.
2312 islink is True if the file is a symbolic link.
2314 isexec is True if the file is executable.
2313 isexec is True if the file is executable.
2315 copied is the source file path if current file was copied in the
2314 copied is the source file path if current file was copied in the
2316 revision being committed, or None."""
2315 revision being committed, or None."""
2317 super(memfilectx, self).__init__(repo, path, None, changectx)
2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2318 self._data = data
2317 self._data = data
2319 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2318 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2320 self._copied = None
2319 self._copied = None
2321 if copied:
2320 if copied:
2322 self._copied = (copied, nullid)
2321 self._copied = (copied, nullid)
2323
2322
2324 def data(self):
2323 def data(self):
2325 return self._data
2324 return self._data
2326
2325
2327 def remove(self, ignoremissing=False):
2326 def remove(self, ignoremissing=False):
2328 """wraps unlink for a repo's working directory"""
2327 """wraps unlink for a repo's working directory"""
2329 # need to figure out what to do here
2328 # need to figure out what to do here
2330 del self._changectx[self._path]
2329 del self._changectx[self._path]
2331
2330
2332 def write(self, data, flags, **kwargs):
2331 def write(self, data, flags, **kwargs):
2333 """wraps repo.wwrite"""
2332 """wraps repo.wwrite"""
2334 self._data = data
2333 self._data = data
2335
2334
2336 class overlayfilectx(committablefilectx):
2335 class overlayfilectx(committablefilectx):
2337 """Like memfilectx but take an original filectx and optional parameters to
2336 """Like memfilectx but take an original filectx and optional parameters to
2338 override parts of it. This is useful when fctx.data() is expensive (i.e.
2337 override parts of it. This is useful when fctx.data() is expensive (i.e.
2339 flag processor is expensive) and raw data, flags, and filenode could be
2338 flag processor is expensive) and raw data, flags, and filenode could be
2340 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2339 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2341 """
2340 """
2342
2341
2343 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2342 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2344 copied=None, ctx=None):
2343 copied=None, ctx=None):
2345 """originalfctx: filecontext to duplicate
2344 """originalfctx: filecontext to duplicate
2346
2345
2347 datafunc: None or a function to override data (file content). It is a
2346 datafunc: None or a function to override data (file content). It is a
2348 function to be lazy. path, flags, copied, ctx: None or overridden value
2347 function to be lazy. path, flags, copied, ctx: None or overridden value
2349
2348
2350 copied could be (path, rev), or False. copied could also be just path,
2349 copied could be (path, rev), or False. copied could also be just path,
2351 and will be converted to (path, nullid). This simplifies some callers.
2350 and will be converted to (path, nullid). This simplifies some callers.
2352 """
2351 """
2353
2352
2354 if path is None:
2353 if path is None:
2355 path = originalfctx.path()
2354 path = originalfctx.path()
2356 if ctx is None:
2355 if ctx is None:
2357 ctx = originalfctx.changectx()
2356 ctx = originalfctx.changectx()
2358 ctxmatch = lambda: True
2357 ctxmatch = lambda: True
2359 else:
2358 else:
2360 ctxmatch = lambda: ctx == originalfctx.changectx()
2359 ctxmatch = lambda: ctx == originalfctx.changectx()
2361
2360
2362 repo = originalfctx.repo()
2361 repo = originalfctx.repo()
2363 flog = originalfctx.filelog()
2362 flog = originalfctx.filelog()
2364 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2363 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2365
2364
2366 if copied is None:
2365 if copied is None:
2367 copied = originalfctx.renamed()
2366 copied = originalfctx.renamed()
2368 copiedmatch = lambda: True
2367 copiedmatch = lambda: True
2369 else:
2368 else:
2370 if copied and not isinstance(copied, tuple):
2369 if copied and not isinstance(copied, tuple):
2371 # repo._filecommit will recalculate copyrev so nullid is okay
2370 # repo._filecommit will recalculate copyrev so nullid is okay
2372 copied = (copied, nullid)
2371 copied = (copied, nullid)
2373 copiedmatch = lambda: copied == originalfctx.renamed()
2372 copiedmatch = lambda: copied == originalfctx.renamed()
2374
2373
2375 # When data, copied (could affect data), ctx (could affect filelog
2374 # When data, copied (could affect data), ctx (could affect filelog
2376 # parents) are not overridden, rawdata, rawflags, and filenode may be
2375 # parents) are not overridden, rawdata, rawflags, and filenode may be
2377 # reused (repo._filecommit should double check filelog parents).
2376 # reused (repo._filecommit should double check filelog parents).
2378 #
2377 #
2379 # path, flags are not hashed in filelog (but in manifestlog) so they do
2378 # path, flags are not hashed in filelog (but in manifestlog) so they do
2380 # not affect reusable here.
2379 # not affect reusable here.
2381 #
2380 #
2382 # If ctx or copied is overridden to a same value with originalfctx,
2381 # If ctx or copied is overridden to a same value with originalfctx,
2383 # still consider it's reusable. originalfctx.renamed() may be a bit
2382 # still consider it's reusable. originalfctx.renamed() may be a bit
2384 # expensive so it's not called unless necessary. Assuming datafunc is
2383 # expensive so it's not called unless necessary. Assuming datafunc is
2385 # always expensive, do not call it for this "reusable" test.
2384 # always expensive, do not call it for this "reusable" test.
2386 reusable = datafunc is None and ctxmatch() and copiedmatch()
2385 reusable = datafunc is None and ctxmatch() and copiedmatch()
2387
2386
2388 if datafunc is None:
2387 if datafunc is None:
2389 datafunc = originalfctx.data
2388 datafunc = originalfctx.data
2390 if flags is None:
2389 if flags is None:
2391 flags = originalfctx.flags()
2390 flags = originalfctx.flags()
2392
2391
2393 self._datafunc = datafunc
2392 self._datafunc = datafunc
2394 self._flags = flags
2393 self._flags = flags
2395 self._copied = copied
2394 self._copied = copied
2396
2395
2397 if reusable:
2396 if reusable:
2398 # copy extra fields from originalfctx
2397 # copy extra fields from originalfctx
2399 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2398 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2400 for attr_ in attrs:
2399 for attr_ in attrs:
2401 if util.safehasattr(originalfctx, attr_):
2400 if util.safehasattr(originalfctx, attr_):
2402 setattr(self, attr_, getattr(originalfctx, attr_))
2401 setattr(self, attr_, getattr(originalfctx, attr_))
2403
2402
2404 def data(self):
2403 def data(self):
2405 return self._datafunc()
2404 return self._datafunc()
2406
2405
2407 class metadataonlyctx(committablectx):
2406 class metadataonlyctx(committablectx):
2408 """Like memctx but it's reusing the manifest of different commit.
2407 """Like memctx but it's reusing the manifest of different commit.
2409 Intended to be used by lightweight operations that are creating
2408 Intended to be used by lightweight operations that are creating
2410 metadata-only changes.
2409 metadata-only changes.
2411
2410
2412 Revision information is supplied at initialization time. 'repo' is the
2411 Revision information is supplied at initialization time. 'repo' is the
2413 current localrepo, 'ctx' is original revision which manifest we're reuisng
2412 current localrepo, 'ctx' is original revision which manifest we're reuisng
2414 'parents' is a sequence of two parent revisions identifiers (pass None for
2413 'parents' is a sequence of two parent revisions identifiers (pass None for
2415 every missing parent), 'text' is the commit.
2414 every missing parent), 'text' is the commit.
2416
2415
2417 user receives the committer name and defaults to current repository
2416 user receives the committer name and defaults to current repository
2418 username, date is the commit date in any format supported by
2417 username, date is the commit date in any format supported by
2419 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2418 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2420 metadata or is left empty.
2419 metadata or is left empty.
2421 """
2420 """
2422 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2421 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2423 date=None, extra=None, editor=False):
2422 date=None, extra=None, editor=False):
2424 if text is None:
2423 if text is None:
2425 text = originalctx.description()
2424 text = originalctx.description()
2426 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2425 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2427 self._rev = None
2426 self._rev = None
2428 self._node = None
2427 self._node = None
2429 self._originalctx = originalctx
2428 self._originalctx = originalctx
2430 self._manifestnode = originalctx.manifestnode()
2429 self._manifestnode = originalctx.manifestnode()
2431 if parents is None:
2430 if parents is None:
2432 parents = originalctx.parents()
2431 parents = originalctx.parents()
2433 else:
2432 else:
2434 parents = [repo[p] for p in parents if p is not None]
2433 parents = [repo[p] for p in parents if p is not None]
2435 parents = parents[:]
2434 parents = parents[:]
2436 while len(parents) < 2:
2435 while len(parents) < 2:
2437 parents.append(repo[nullid])
2436 parents.append(repo[nullid])
2438 p1, p2 = self._parents = parents
2437 p1, p2 = self._parents = parents
2439
2438
2440 # sanity check to ensure that the reused manifest parents are
2439 # sanity check to ensure that the reused manifest parents are
2441 # manifests of our commit parents
2440 # manifests of our commit parents
2442 mp1, mp2 = self.manifestctx().parents
2441 mp1, mp2 = self.manifestctx().parents
2443 if p1 != nullid and p1.manifestnode() != mp1:
2442 if p1 != nullid and p1.manifestnode() != mp1:
2444 raise RuntimeError('can\'t reuse the manifest: '
2443 raise RuntimeError('can\'t reuse the manifest: '
2445 'its p1 doesn\'t match the new ctx p1')
2444 'its p1 doesn\'t match the new ctx p1')
2446 if p2 != nullid and p2.manifestnode() != mp2:
2445 if p2 != nullid and p2.manifestnode() != mp2:
2447 raise RuntimeError('can\'t reuse the manifest: '
2446 raise RuntimeError('can\'t reuse the manifest: '
2448 'its p2 doesn\'t match the new ctx p2')
2447 'its p2 doesn\'t match the new ctx p2')
2449
2448
2450 self._files = originalctx.files()
2449 self._files = originalctx.files()
2451 self.substate = {}
2450 self.substate = {}
2452
2451
2453 if editor:
2452 if editor:
2454 self._text = editor(self._repo, self, [])
2453 self._text = editor(self._repo, self, [])
2455 self._repo.savecommitmessage(self._text)
2454 self._repo.savecommitmessage(self._text)
2456
2455
2457 def manifestnode(self):
2456 def manifestnode(self):
2458 return self._manifestnode
2457 return self._manifestnode
2459
2458
2460 @property
2459 @property
2461 def _manifestctx(self):
2460 def _manifestctx(self):
2462 return self._repo.manifestlog[self._manifestnode]
2461 return self._repo.manifestlog[self._manifestnode]
2463
2462
2464 def filectx(self, path, filelog=None):
2463 def filectx(self, path, filelog=None):
2465 return self._originalctx.filectx(path, filelog=filelog)
2464 return self._originalctx.filectx(path, filelog=filelog)
2466
2465
2467 def commit(self):
2466 def commit(self):
2468 """commit context to the repo"""
2467 """commit context to the repo"""
2469 return self._repo.commitctx(self)
2468 return self._repo.commitctx(self)
2470
2469
2471 @property
2470 @property
2472 def _manifest(self):
2471 def _manifest(self):
2473 return self._originalctx.manifest()
2472 return self._originalctx.manifest()
2474
2473
2475 @propertycache
2474 @propertycache
2476 def _status(self):
2475 def _status(self):
2477 """Calculate exact status from ``files`` specified in the ``origctx``
2476 """Calculate exact status from ``files`` specified in the ``origctx``
2478 and parents manifests.
2477 and parents manifests.
2479 """
2478 """
2480 man1 = self.p1().manifest()
2479 man1 = self.p1().manifest()
2481 p2 = self._parents[1]
2480 p2 = self._parents[1]
2482 # "1 < len(self._parents)" can't be used for checking
2481 # "1 < len(self._parents)" can't be used for checking
2483 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2482 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2484 # explicitly initialized by the list, of which length is 2.
2483 # explicitly initialized by the list, of which length is 2.
2485 if p2.node() != nullid:
2484 if p2.node() != nullid:
2486 man2 = p2.manifest()
2485 man2 = p2.manifest()
2487 managing = lambda f: f in man1 or f in man2
2486 managing = lambda f: f in man1 or f in man2
2488 else:
2487 else:
2489 managing = lambda f: f in man1
2488 managing = lambda f: f in man1
2490
2489
2491 modified, added, removed = [], [], []
2490 modified, added, removed = [], [], []
2492 for f in self._files:
2491 for f in self._files:
2493 if not managing(f):
2492 if not managing(f):
2494 added.append(f)
2493 added.append(f)
2495 elif f in self:
2494 elif f in self:
2496 modified.append(f)
2495 modified.append(f)
2497 else:
2496 else:
2498 removed.append(f)
2497 removed.append(f)
2499
2498
2500 return scmutil.status(modified, added, removed, [], [], [], [])
2499 return scmutil.status(modified, added, removed, [], [], [], [])
2501
2500
2502 class arbitraryfilectx(object):
2501 class arbitraryfilectx(object):
2503 """Allows you to use filectx-like functions on a file in an arbitrary
2502 """Allows you to use filectx-like functions on a file in an arbitrary
2504 location on disk, possibly not in the working directory.
2503 location on disk, possibly not in the working directory.
2505 """
2504 """
2506 def __init__(self, path, repo=None):
2505 def __init__(self, path, repo=None):
2507 # Repo is optional because contrib/simplemerge uses this class.
2506 # Repo is optional because contrib/simplemerge uses this class.
2508 self._repo = repo
2507 self._repo = repo
2509 self._path = path
2508 self._path = path
2510
2509
2511 def cmp(self, fctx):
2510 def cmp(self, fctx):
2512 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2511 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2513 # path if either side is a symlink.
2512 # path if either side is a symlink.
2514 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2513 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2515 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2514 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2516 # Add a fast-path for merge if both sides are disk-backed.
2515 # Add a fast-path for merge if both sides are disk-backed.
2517 # Note that filecmp uses the opposite return values (True if same)
2516 # Note that filecmp uses the opposite return values (True if same)
2518 # from our cmp functions (True if different).
2517 # from our cmp functions (True if different).
2519 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2518 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2520 return self.data() != fctx.data()
2519 return self.data() != fctx.data()
2521
2520
2522 def path(self):
2521 def path(self):
2523 return self._path
2522 return self._path
2524
2523
2525 def flags(self):
2524 def flags(self):
2526 return ''
2525 return ''
2527
2526
2528 def data(self):
2527 def data(self):
2529 return util.readfile(self._path)
2528 return util.readfile(self._path)
2530
2529
2531 def decodeddata(self):
2530 def decodeddata(self):
2532 with open(self._path, "rb") as f:
2531 with open(self._path, "rb") as f:
2533 return f.read()
2532 return f.read()
2534
2533
2535 def remove(self):
2534 def remove(self):
2536 util.unlink(self._path)
2535 util.unlink(self._path)
2537
2536
2538 def write(self, data, flags, **kwargs):
2537 def write(self, data, flags, **kwargs):
2539 assert not flags
2538 assert not flags
2540 with open(self._path, "w") as f:
2539 with open(self._path, "w") as f:
2541 f.write(data)
2540 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now