##// END OF EJS Templates
context: raise ProgrammingError on repo['my-tag']...
Martin von Zweigbergk -
r38845:91618801 default
parent child Browse files
Show More
@@ -1,2540 +1,2544 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 sparse,
41 sparse,
42 subrepo,
42 subrepo,
43 subrepoutil,
43 subrepoutil,
44 util,
44 util,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 dateutil,
47 dateutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
70 return r"<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(self, other, s, match, listignored, listclean,
102 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
103 listunknown):
104 """build a status with respect to another context"""
104 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
105 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
110 # delta application.
111 mf2 = None
111 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
112 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
113 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
115 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117
117
118 modified, added = [], []
118 modified, added = [], []
119 removed = []
119 removed = []
120 clean = []
120 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
122 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
123 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
124 for fn, value in d.iteritems():
125 if fn in deletedset:
125 if fn in deletedset:
126 continue
126 continue
127 if value is None:
127 if value is None:
128 clean.append(fn)
128 clean.append(fn)
129 continue
129 continue
130 (node1, flag1), (node2, flag2) = value
130 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
131 if node1 is None:
132 added.append(fn)
132 added.append(fn)
133 elif node2 is None:
133 elif node2 is None:
134 removed.append(fn)
134 removed.append(fn)
135 elif flag1 != flag2:
135 elif flag1 != flag2:
136 modified.append(fn)
136 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
137 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
138 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
139 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
140 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
141 # to a file as a modification.
142 modified.append(fn)
142 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
143 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
144 modified.append(fn)
145 else:
145 else:
146 clean.append(fn)
146 clean.append(fn)
147
147
148 if removed:
148 if removed:
149 # need to filter files if they are already reported as removed
149 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
150 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
152 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
153 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
154 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
155 removed = [fn for fn in removed if fn not in deletedset]
156
156
157 return scmutil.status(modified, added, removed, deleted, unknown,
157 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
158 ignored, clean)
159
159
160 @propertycache
160 @propertycache
161 def substate(self):
161 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
162 return subrepoutil.state(self, self._repo.ui)
163
163
164 def subrev(self, subpath):
164 def subrev(self, subpath):
165 return self.substate[subpath][1]
165 return self.substate[subpath][1]
166
166
167 def rev(self):
167 def rev(self):
168 return self._rev
168 return self._rev
169 def node(self):
169 def node(self):
170 return self._node
170 return self._node
171 def hex(self):
171 def hex(self):
172 return hex(self.node())
172 return hex(self.node())
173 def manifest(self):
173 def manifest(self):
174 return self._manifest
174 return self._manifest
175 def manifestctx(self):
175 def manifestctx(self):
176 return self._manifestctx
176 return self._manifestctx
177 def repo(self):
177 def repo(self):
178 return self._repo
178 return self._repo
179 def phasestr(self):
179 def phasestr(self):
180 return phases.phasenames[self.phase()]
180 return phases.phasenames[self.phase()]
181 def mutable(self):
181 def mutable(self):
182 return self.phase() > phases.public
182 return self.phase() > phases.public
183
183
184 def getfileset(self, expr):
184 def getfileset(self, expr):
185 return fileset.getfileset(self, expr)
185 return fileset.getfileset(self, expr)
186
186
187 def obsolete(self):
187 def obsolete(self):
188 """True if the changeset is obsolete"""
188 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
190
191 def extinct(self):
191 def extinct(self):
192 """True if the changeset is extinct"""
192 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
194
195 def orphan(self):
195 def orphan(self):
196 """True if the changeset is not obsolete but it's ancestor are"""
196 """True if the changeset is not obsolete but it's ancestor are"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
198
199 def phasedivergent(self):
199 def phasedivergent(self):
200 """True if the changeset try to be a successor of a public changeset
200 """True if the changeset try to be a successor of a public changeset
201
201
202 Only non-public and non-obsolete changesets may be bumped.
202 Only non-public and non-obsolete changesets may be bumped.
203 """
203 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
205
206 def contentdivergent(self):
206 def contentdivergent(self):
207 """Is a successors of a changeset with multiple possible successors set
207 """Is a successors of a changeset with multiple possible successors set
208
208
209 Only non-public and non-obsolete changesets may be divergent.
209 Only non-public and non-obsolete changesets may be divergent.
210 """
210 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
212
213 def isunstable(self):
213 def isunstable(self):
214 """True if the changeset is either unstable, bumped or divergent"""
214 """True if the changeset is either unstable, bumped or divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380 class changectx(basectx):
380 class changectx(basectx):
381 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
382 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
383 the repo."""
383 the repo."""
384 def __init__(self, repo, changeid='.'):
384 def __init__(self, repo, changeid='.'):
385 """changeid is a revision number, node, or tag"""
385 """changeid is a revision number, node, or tag"""
386 super(changectx, self).__init__(repo)
386 super(changectx, self).__init__(repo)
387
387
388 try:
388 try:
389 if isinstance(changeid, int):
389 if isinstance(changeid, int):
390 self._node = repo.changelog.node(changeid)
390 self._node = repo.changelog.node(changeid)
391 self._rev = changeid
391 self._rev = changeid
392 return
392 return
393 elif changeid == 'null':
393 elif changeid == 'null':
394 self._node = nullid
394 self._node = nullid
395 self._rev = nullrev
395 self._rev = nullrev
396 return
396 return
397 elif changeid == 'tip':
397 elif changeid == 'tip':
398 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
399 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
400 return
400 return
401 elif (changeid == '.'
401 elif (changeid == '.'
402 or repo.local() and changeid == repo.dirstate.p1()):
402 or repo.local() and changeid == repo.dirstate.p1()):
403 # this is a hack to delay/avoid loading obsmarkers
403 # this is a hack to delay/avoid loading obsmarkers
404 # when we know that '.' won't be hidden
404 # when we know that '.' won't be hidden
405 self._node = repo.dirstate.p1()
405 self._node = repo.dirstate.p1()
406 self._rev = repo.unfiltered().changelog.rev(self._node)
406 self._rev = repo.unfiltered().changelog.rev(self._node)
407 return
407 return
408 elif len(changeid) == 20:
408 elif len(changeid) == 20:
409 try:
409 try:
410 self._node = changeid
410 self._node = changeid
411 self._rev = repo.changelog.rev(changeid)
411 self._rev = repo.changelog.rev(changeid)
412 return
412 return
413 except error.FilteredLookupError:
413 except error.FilteredLookupError:
414 raise
414 raise
415 except LookupError:
415 except LookupError:
416 # check if it might have come from damaged dirstate
416 # check if it might have come from damaged dirstate
417 #
417 #
418 # XXX we could avoid the unfiltered if we had a recognizable
418 # XXX we could avoid the unfiltered if we had a recognizable
419 # exception for filtered changeset access
419 # exception for filtered changeset access
420 if (repo.local()
420 if (repo.local()
421 and changeid in repo.unfiltered().dirstate.parents()):
421 and changeid in repo.unfiltered().dirstate.parents()):
422 msg = _("working directory has unknown parent '%s'!")
422 msg = _("working directory has unknown parent '%s'!")
423 raise error.Abort(msg % short(changeid))
423 raise error.Abort(msg % short(changeid))
424 changeid = hex(changeid) # for the error message
424 changeid = hex(changeid) # for the error message
425
425
426 elif len(changeid) == 40:
426 elif len(changeid) == 40:
427 try:
427 try:
428 self._node = bin(changeid)
428 self._node = bin(changeid)
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431 except error.FilteredLookupError:
431 except error.FilteredLookupError:
432 raise
432 raise
433 except (TypeError, LookupError):
433 except (TypeError, LookupError):
434 pass
434 pass
435 else:
436 raise error.ProgrammingError(
437 "unsupported changeid '%s' of type %s" %
438 (changeid, type(changeid)))
435
439
436 # lookup failed
440 # lookup failed
437 except (error.FilteredIndexError, error.FilteredLookupError):
441 except (error.FilteredIndexError, error.FilteredLookupError):
438 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
442 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
439 % pycompat.bytestr(changeid))
443 % pycompat.bytestr(changeid))
440 except error.FilteredRepoLookupError:
444 except error.FilteredRepoLookupError:
441 raise
445 raise
442 except IndexError:
446 except IndexError:
443 pass
447 pass
444 raise error.RepoLookupError(
448 raise error.RepoLookupError(
445 _("unknown revision '%s'") % changeid)
449 _("unknown revision '%s'") % changeid)
446
450
447 def __hash__(self):
451 def __hash__(self):
448 try:
452 try:
449 return hash(self._rev)
453 return hash(self._rev)
450 except AttributeError:
454 except AttributeError:
451 return id(self)
455 return id(self)
452
456
453 def __nonzero__(self):
457 def __nonzero__(self):
454 return self._rev != nullrev
458 return self._rev != nullrev
455
459
456 __bool__ = __nonzero__
460 __bool__ = __nonzero__
457
461
458 @propertycache
462 @propertycache
459 def _changeset(self):
463 def _changeset(self):
460 return self._repo.changelog.changelogrevision(self.rev())
464 return self._repo.changelog.changelogrevision(self.rev())
461
465
462 @propertycache
466 @propertycache
463 def _manifest(self):
467 def _manifest(self):
464 return self._manifestctx.read()
468 return self._manifestctx.read()
465
469
466 @property
470 @property
467 def _manifestctx(self):
471 def _manifestctx(self):
468 return self._repo.manifestlog[self._changeset.manifest]
472 return self._repo.manifestlog[self._changeset.manifest]
469
473
470 @propertycache
474 @propertycache
471 def _manifestdelta(self):
475 def _manifestdelta(self):
472 return self._manifestctx.readdelta()
476 return self._manifestctx.readdelta()
473
477
474 @propertycache
478 @propertycache
475 def _parents(self):
479 def _parents(self):
476 repo = self._repo
480 repo = self._repo
477 p1, p2 = repo.changelog.parentrevs(self._rev)
481 p1, p2 = repo.changelog.parentrevs(self._rev)
478 if p2 == nullrev:
482 if p2 == nullrev:
479 return [changectx(repo, p1)]
483 return [changectx(repo, p1)]
480 return [changectx(repo, p1), changectx(repo, p2)]
484 return [changectx(repo, p1), changectx(repo, p2)]
481
485
482 def changeset(self):
486 def changeset(self):
483 c = self._changeset
487 c = self._changeset
484 return (
488 return (
485 c.manifest,
489 c.manifest,
486 c.user,
490 c.user,
487 c.date,
491 c.date,
488 c.files,
492 c.files,
489 c.description,
493 c.description,
490 c.extra,
494 c.extra,
491 )
495 )
492 def manifestnode(self):
496 def manifestnode(self):
493 return self._changeset.manifest
497 return self._changeset.manifest
494
498
495 def user(self):
499 def user(self):
496 return self._changeset.user
500 return self._changeset.user
497 def date(self):
501 def date(self):
498 return self._changeset.date
502 return self._changeset.date
499 def files(self):
503 def files(self):
500 return self._changeset.files
504 return self._changeset.files
501 def description(self):
505 def description(self):
502 return self._changeset.description
506 return self._changeset.description
503 def branch(self):
507 def branch(self):
504 return encoding.tolocal(self._changeset.extra.get("branch"))
508 return encoding.tolocal(self._changeset.extra.get("branch"))
505 def closesbranch(self):
509 def closesbranch(self):
506 return 'close' in self._changeset.extra
510 return 'close' in self._changeset.extra
507 def extra(self):
511 def extra(self):
508 """Return a dict of extra information."""
512 """Return a dict of extra information."""
509 return self._changeset.extra
513 return self._changeset.extra
510 def tags(self):
514 def tags(self):
511 """Return a list of byte tag names"""
515 """Return a list of byte tag names"""
512 return self._repo.nodetags(self._node)
516 return self._repo.nodetags(self._node)
513 def bookmarks(self):
517 def bookmarks(self):
514 """Return a list of byte bookmark names."""
518 """Return a list of byte bookmark names."""
515 return self._repo.nodebookmarks(self._node)
519 return self._repo.nodebookmarks(self._node)
516 def phase(self):
520 def phase(self):
517 return self._repo._phasecache.phase(self._repo, self._rev)
521 return self._repo._phasecache.phase(self._repo, self._rev)
518 def hidden(self):
522 def hidden(self):
519 return self._rev in repoview.filterrevs(self._repo, 'visible')
523 return self._rev in repoview.filterrevs(self._repo, 'visible')
520
524
521 def isinmemory(self):
525 def isinmemory(self):
522 return False
526 return False
523
527
524 def children(self):
528 def children(self):
525 """return list of changectx contexts for each child changeset.
529 """return list of changectx contexts for each child changeset.
526
530
527 This returns only the immediate child changesets. Use descendants() to
531 This returns only the immediate child changesets. Use descendants() to
528 recursively walk children.
532 recursively walk children.
529 """
533 """
530 c = self._repo.changelog.children(self._node)
534 c = self._repo.changelog.children(self._node)
531 return [changectx(self._repo, x) for x in c]
535 return [changectx(self._repo, x) for x in c]
532
536
533 def ancestors(self):
537 def ancestors(self):
534 for a in self._repo.changelog.ancestors([self._rev]):
538 for a in self._repo.changelog.ancestors([self._rev]):
535 yield changectx(self._repo, a)
539 yield changectx(self._repo, a)
536
540
537 def descendants(self):
541 def descendants(self):
538 """Recursively yield all children of the changeset.
542 """Recursively yield all children of the changeset.
539
543
540 For just the immediate children, use children()
544 For just the immediate children, use children()
541 """
545 """
542 for d in self._repo.changelog.descendants([self._rev]):
546 for d in self._repo.changelog.descendants([self._rev]):
543 yield changectx(self._repo, d)
547 yield changectx(self._repo, d)
544
548
545 def filectx(self, path, fileid=None, filelog=None):
549 def filectx(self, path, fileid=None, filelog=None):
546 """get a file context from this changeset"""
550 """get a file context from this changeset"""
547 if fileid is None:
551 if fileid is None:
548 fileid = self.filenode(path)
552 fileid = self.filenode(path)
549 return filectx(self._repo, path, fileid=fileid,
553 return filectx(self._repo, path, fileid=fileid,
550 changectx=self, filelog=filelog)
554 changectx=self, filelog=filelog)
551
555
552 def ancestor(self, c2, warn=False):
556 def ancestor(self, c2, warn=False):
553 """return the "best" ancestor context of self and c2
557 """return the "best" ancestor context of self and c2
554
558
555 If there are multiple candidates, it will show a message and check
559 If there are multiple candidates, it will show a message and check
556 merge.preferancestor configuration before falling back to the
560 merge.preferancestor configuration before falling back to the
557 revlog ancestor."""
561 revlog ancestor."""
558 # deal with workingctxs
562 # deal with workingctxs
559 n2 = c2._node
563 n2 = c2._node
560 if n2 is None:
564 if n2 is None:
561 n2 = c2._parents[0]._node
565 n2 = c2._parents[0]._node
562 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
563 if not cahs:
567 if not cahs:
564 anc = nullid
568 anc = nullid
565 elif len(cahs) == 1:
569 elif len(cahs) == 1:
566 anc = cahs[0]
570 anc = cahs[0]
567 else:
571 else:
568 # experimental config: merge.preferancestor
572 # experimental config: merge.preferancestor
569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
573 for r in self._repo.ui.configlist('merge', 'preferancestor'):
570 try:
574 try:
571 ctx = scmutil.revsymbol(self._repo, r)
575 ctx = scmutil.revsymbol(self._repo, r)
572 except error.RepoLookupError:
576 except error.RepoLookupError:
573 continue
577 continue
574 anc = ctx.node()
578 anc = ctx.node()
575 if anc in cahs:
579 if anc in cahs:
576 break
580 break
577 else:
581 else:
578 anc = self._repo.changelog.ancestor(self._node, n2)
582 anc = self._repo.changelog.ancestor(self._node, n2)
579 if warn:
583 if warn:
580 self._repo.ui.status(
584 self._repo.ui.status(
581 (_("note: using %s as ancestor of %s and %s\n") %
585 (_("note: using %s as ancestor of %s and %s\n") %
582 (short(anc), short(self._node), short(n2))) +
586 (short(anc), short(self._node), short(n2))) +
583 ''.join(_(" alternatively, use --config "
587 ''.join(_(" alternatively, use --config "
584 "merge.preferancestor=%s\n") %
588 "merge.preferancestor=%s\n") %
585 short(n) for n in sorted(cahs) if n != anc))
589 short(n) for n in sorted(cahs) if n != anc))
586 return changectx(self._repo, anc)
590 return changectx(self._repo, anc)
587
591
588 def descendant(self, other):
592 def descendant(self, other):
589 """True if other is descendant of this changeset"""
593 """True if other is descendant of this changeset"""
590 return self._repo.changelog.descendant(self._rev, other._rev)
594 return self._repo.changelog.descendant(self._rev, other._rev)
591
595
592 def walk(self, match):
596 def walk(self, match):
593 '''Generates matching file names.'''
597 '''Generates matching file names.'''
594
598
595 # Wrap match.bad method to have message with nodeid
599 # Wrap match.bad method to have message with nodeid
596 def bad(fn, msg):
600 def bad(fn, msg):
597 # The manifest doesn't know about subrepos, so don't complain about
601 # The manifest doesn't know about subrepos, so don't complain about
598 # paths into valid subrepos.
602 # paths into valid subrepos.
599 if any(fn == s or fn.startswith(s + '/')
603 if any(fn == s or fn.startswith(s + '/')
600 for s in self.substate):
604 for s in self.substate):
601 return
605 return
602 match.bad(fn, _('no such file in rev %s') % self)
606 match.bad(fn, _('no such file in rev %s') % self)
603
607
604 m = matchmod.badmatch(match, bad)
608 m = matchmod.badmatch(match, bad)
605 return self._manifest.walk(m)
609 return self._manifest.walk(m)
606
610
607 def matches(self, match):
611 def matches(self, match):
608 return self.walk(match)
612 return self.walk(match)
609
613
610 class basefilectx(object):
614 class basefilectx(object):
611 """A filecontext object represents the common logic for its children:
615 """A filecontext object represents the common logic for its children:
612 filectx: read-only access to a filerevision that is already present
616 filectx: read-only access to a filerevision that is already present
613 in the repo,
617 in the repo,
614 workingfilectx: a filecontext that represents files from the working
618 workingfilectx: a filecontext that represents files from the working
615 directory,
619 directory,
616 memfilectx: a filecontext that represents files in-memory,
620 memfilectx: a filecontext that represents files in-memory,
617 overlayfilectx: duplicate another filecontext with some fields overridden.
621 overlayfilectx: duplicate another filecontext with some fields overridden.
618 """
622 """
619 @propertycache
623 @propertycache
620 def _filelog(self):
624 def _filelog(self):
621 return self._repo.file(self._path)
625 return self._repo.file(self._path)
622
626
623 @propertycache
627 @propertycache
624 def _changeid(self):
628 def _changeid(self):
625 if r'_changeid' in self.__dict__:
629 if r'_changeid' in self.__dict__:
626 return self._changeid
630 return self._changeid
627 elif r'_changectx' in self.__dict__:
631 elif r'_changectx' in self.__dict__:
628 return self._changectx.rev()
632 return self._changectx.rev()
629 elif r'_descendantrev' in self.__dict__:
633 elif r'_descendantrev' in self.__dict__:
630 # this file context was created from a revision with a known
634 # this file context was created from a revision with a known
631 # descendant, we can (lazily) correct for linkrev aliases
635 # descendant, we can (lazily) correct for linkrev aliases
632 return self._adjustlinkrev(self._descendantrev)
636 return self._adjustlinkrev(self._descendantrev)
633 else:
637 else:
634 return self._filelog.linkrev(self._filerev)
638 return self._filelog.linkrev(self._filerev)
635
639
636 @propertycache
640 @propertycache
637 def _filenode(self):
641 def _filenode(self):
638 if r'_fileid' in self.__dict__:
642 if r'_fileid' in self.__dict__:
639 return self._filelog.lookup(self._fileid)
643 return self._filelog.lookup(self._fileid)
640 else:
644 else:
641 return self._changectx.filenode(self._path)
645 return self._changectx.filenode(self._path)
642
646
643 @propertycache
647 @propertycache
644 def _filerev(self):
648 def _filerev(self):
645 return self._filelog.rev(self._filenode)
649 return self._filelog.rev(self._filenode)
646
650
647 @propertycache
651 @propertycache
648 def _repopath(self):
652 def _repopath(self):
649 return self._path
653 return self._path
650
654
651 def __nonzero__(self):
655 def __nonzero__(self):
652 try:
656 try:
653 self._filenode
657 self._filenode
654 return True
658 return True
655 except error.LookupError:
659 except error.LookupError:
656 # file is missing
660 # file is missing
657 return False
661 return False
658
662
659 __bool__ = __nonzero__
663 __bool__ = __nonzero__
660
664
661 def __bytes__(self):
665 def __bytes__(self):
662 try:
666 try:
663 return "%s@%s" % (self.path(), self._changectx)
667 return "%s@%s" % (self.path(), self._changectx)
664 except error.LookupError:
668 except error.LookupError:
665 return "%s@???" % self.path()
669 return "%s@???" % self.path()
666
670
667 __str__ = encoding.strmethod(__bytes__)
671 __str__ = encoding.strmethod(__bytes__)
668
672
669 def __repr__(self):
673 def __repr__(self):
670 return r"<%s %s>" % (type(self).__name__, str(self))
674 return r"<%s %s>" % (type(self).__name__, str(self))
671
675
672 def __hash__(self):
676 def __hash__(self):
673 try:
677 try:
674 return hash((self._path, self._filenode))
678 return hash((self._path, self._filenode))
675 except AttributeError:
679 except AttributeError:
676 return id(self)
680 return id(self)
677
681
678 def __eq__(self, other):
682 def __eq__(self, other):
679 try:
683 try:
680 return (type(self) == type(other) and self._path == other._path
684 return (type(self) == type(other) and self._path == other._path
681 and self._filenode == other._filenode)
685 and self._filenode == other._filenode)
682 except AttributeError:
686 except AttributeError:
683 return False
687 return False
684
688
685 def __ne__(self, other):
689 def __ne__(self, other):
686 return not (self == other)
690 return not (self == other)
687
691
688 def filerev(self):
692 def filerev(self):
689 return self._filerev
693 return self._filerev
690 def filenode(self):
694 def filenode(self):
691 return self._filenode
695 return self._filenode
692 @propertycache
696 @propertycache
693 def _flags(self):
697 def _flags(self):
694 return self._changectx.flags(self._path)
698 return self._changectx.flags(self._path)
695 def flags(self):
699 def flags(self):
696 return self._flags
700 return self._flags
697 def filelog(self):
701 def filelog(self):
698 return self._filelog
702 return self._filelog
699 def rev(self):
703 def rev(self):
700 return self._changeid
704 return self._changeid
701 def linkrev(self):
705 def linkrev(self):
702 return self._filelog.linkrev(self._filerev)
706 return self._filelog.linkrev(self._filerev)
703 def node(self):
707 def node(self):
704 return self._changectx.node()
708 return self._changectx.node()
705 def hex(self):
709 def hex(self):
706 return self._changectx.hex()
710 return self._changectx.hex()
707 def user(self):
711 def user(self):
708 return self._changectx.user()
712 return self._changectx.user()
709 def date(self):
713 def date(self):
710 return self._changectx.date()
714 return self._changectx.date()
711 def files(self):
715 def files(self):
712 return self._changectx.files()
716 return self._changectx.files()
713 def description(self):
717 def description(self):
714 return self._changectx.description()
718 return self._changectx.description()
715 def branch(self):
719 def branch(self):
716 return self._changectx.branch()
720 return self._changectx.branch()
717 def extra(self):
721 def extra(self):
718 return self._changectx.extra()
722 return self._changectx.extra()
719 def phase(self):
723 def phase(self):
720 return self._changectx.phase()
724 return self._changectx.phase()
721 def phasestr(self):
725 def phasestr(self):
722 return self._changectx.phasestr()
726 return self._changectx.phasestr()
723 def obsolete(self):
727 def obsolete(self):
724 return self._changectx.obsolete()
728 return self._changectx.obsolete()
725 def instabilities(self):
729 def instabilities(self):
726 return self._changectx.instabilities()
730 return self._changectx.instabilities()
727 def manifest(self):
731 def manifest(self):
728 return self._changectx.manifest()
732 return self._changectx.manifest()
729 def changectx(self):
733 def changectx(self):
730 return self._changectx
734 return self._changectx
731 def renamed(self):
735 def renamed(self):
732 return self._copied
736 return self._copied
733 def repo(self):
737 def repo(self):
734 return self._repo
738 return self._repo
735 def size(self):
739 def size(self):
736 return len(self.data())
740 return len(self.data())
737
741
738 def path(self):
742 def path(self):
739 return self._path
743 return self._path
740
744
741 def isbinary(self):
745 def isbinary(self):
742 try:
746 try:
743 return stringutil.binary(self.data())
747 return stringutil.binary(self.data())
744 except IOError:
748 except IOError:
745 return False
749 return False
746 def isexec(self):
750 def isexec(self):
747 return 'x' in self.flags()
751 return 'x' in self.flags()
748 def islink(self):
752 def islink(self):
749 return 'l' in self.flags()
753 return 'l' in self.flags()
750
754
751 def isabsent(self):
755 def isabsent(self):
752 """whether this filectx represents a file not in self._changectx
756 """whether this filectx represents a file not in self._changectx
753
757
754 This is mainly for merge code to detect change/delete conflicts. This is
758 This is mainly for merge code to detect change/delete conflicts. This is
755 expected to be True for all subclasses of basectx."""
759 expected to be True for all subclasses of basectx."""
756 return False
760 return False
757
761
758 _customcmp = False
762 _customcmp = False
759 def cmp(self, fctx):
763 def cmp(self, fctx):
760 """compare with other file context
764 """compare with other file context
761
765
762 returns True if different than fctx.
766 returns True if different than fctx.
763 """
767 """
764 if fctx._customcmp:
768 if fctx._customcmp:
765 return fctx.cmp(self)
769 return fctx.cmp(self)
766
770
767 if (fctx._filenode is None
771 if (fctx._filenode is None
768 and (self._repo._encodefilterpats
772 and (self._repo._encodefilterpats
769 # if file data starts with '\1\n', empty metadata block is
773 # if file data starts with '\1\n', empty metadata block is
770 # prepended, which adds 4 bytes to filelog.size().
774 # prepended, which adds 4 bytes to filelog.size().
771 or self.size() - 4 == fctx.size())
775 or self.size() - 4 == fctx.size())
772 or self.size() == fctx.size()):
776 or self.size() == fctx.size()):
773 return self._filelog.cmp(self._filenode, fctx.data())
777 return self._filelog.cmp(self._filenode, fctx.data())
774
778
775 return True
779 return True
776
780
777 def _adjustlinkrev(self, srcrev, inclusive=False):
781 def _adjustlinkrev(self, srcrev, inclusive=False):
778 """return the first ancestor of <srcrev> introducing <fnode>
782 """return the first ancestor of <srcrev> introducing <fnode>
779
783
780 If the linkrev of the file revision does not point to an ancestor of
784 If the linkrev of the file revision does not point to an ancestor of
781 srcrev, we'll walk down the ancestors until we find one introducing
785 srcrev, we'll walk down the ancestors until we find one introducing
782 this file revision.
786 this file revision.
783
787
784 :srcrev: the changeset revision we search ancestors from
788 :srcrev: the changeset revision we search ancestors from
785 :inclusive: if true, the src revision will also be checked
789 :inclusive: if true, the src revision will also be checked
786 """
790 """
787 repo = self._repo
791 repo = self._repo
788 cl = repo.unfiltered().changelog
792 cl = repo.unfiltered().changelog
789 mfl = repo.manifestlog
793 mfl = repo.manifestlog
790 # fetch the linkrev
794 # fetch the linkrev
791 lkr = self.linkrev()
795 lkr = self.linkrev()
792 # hack to reuse ancestor computation when searching for renames
796 # hack to reuse ancestor computation when searching for renames
793 memberanc = getattr(self, '_ancestrycontext', None)
797 memberanc = getattr(self, '_ancestrycontext', None)
794 iteranc = None
798 iteranc = None
795 if srcrev is None:
799 if srcrev is None:
796 # wctx case, used by workingfilectx during mergecopy
800 # wctx case, used by workingfilectx during mergecopy
797 revs = [p.rev() for p in self._repo[None].parents()]
801 revs = [p.rev() for p in self._repo[None].parents()]
798 inclusive = True # we skipped the real (revless) source
802 inclusive = True # we skipped the real (revless) source
799 else:
803 else:
800 revs = [srcrev]
804 revs = [srcrev]
801 if memberanc is None:
805 if memberanc is None:
802 memberanc = iteranc = cl.ancestors(revs, lkr,
806 memberanc = iteranc = cl.ancestors(revs, lkr,
803 inclusive=inclusive)
807 inclusive=inclusive)
804 # check if this linkrev is an ancestor of srcrev
808 # check if this linkrev is an ancestor of srcrev
805 if lkr not in memberanc:
809 if lkr not in memberanc:
806 if iteranc is None:
810 if iteranc is None:
807 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
808 fnode = self._filenode
812 fnode = self._filenode
809 path = self._path
813 path = self._path
810 for a in iteranc:
814 for a in iteranc:
811 ac = cl.read(a) # get changeset data (we avoid object creation)
815 ac = cl.read(a) # get changeset data (we avoid object creation)
812 if path in ac[3]: # checking the 'files' field.
816 if path in ac[3]: # checking the 'files' field.
813 # The file has been touched, check if the content is
817 # The file has been touched, check if the content is
814 # similar to the one we search for.
818 # similar to the one we search for.
815 if fnode == mfl[ac[0]].readfast().get(path):
819 if fnode == mfl[ac[0]].readfast().get(path):
816 return a
820 return a
817 # In theory, we should never get out of that loop without a result.
821 # In theory, we should never get out of that loop without a result.
818 # But if manifest uses a buggy file revision (not children of the
822 # But if manifest uses a buggy file revision (not children of the
819 # one it replaces) we could. Such a buggy situation will likely
823 # one it replaces) we could. Such a buggy situation will likely
820 # result is crash somewhere else at to some point.
824 # result is crash somewhere else at to some point.
821 return lkr
825 return lkr
822
826
823 def introrev(self):
827 def introrev(self):
824 """return the rev of the changeset which introduced this file revision
828 """return the rev of the changeset which introduced this file revision
825
829
826 This method is different from linkrev because it take into account the
830 This method is different from linkrev because it take into account the
827 changeset the filectx was created from. It ensures the returned
831 changeset the filectx was created from. It ensures the returned
828 revision is one of its ancestors. This prevents bugs from
832 revision is one of its ancestors. This prevents bugs from
829 'linkrev-shadowing' when a file revision is used by multiple
833 'linkrev-shadowing' when a file revision is used by multiple
830 changesets.
834 changesets.
831 """
835 """
832 lkr = self.linkrev()
836 lkr = self.linkrev()
833 attrs = vars(self)
837 attrs = vars(self)
834 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
838 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
835 if noctx or self.rev() == lkr:
839 if noctx or self.rev() == lkr:
836 return self.linkrev()
840 return self.linkrev()
837 return self._adjustlinkrev(self.rev(), inclusive=True)
841 return self._adjustlinkrev(self.rev(), inclusive=True)
838
842
839 def introfilectx(self):
843 def introfilectx(self):
840 """Return filectx having identical contents, but pointing to the
844 """Return filectx having identical contents, but pointing to the
841 changeset revision where this filectx was introduced"""
845 changeset revision where this filectx was introduced"""
842 introrev = self.introrev()
846 introrev = self.introrev()
843 if self.rev() == introrev:
847 if self.rev() == introrev:
844 return self
848 return self
845 return self.filectx(self.filenode(), changeid=introrev)
849 return self.filectx(self.filenode(), changeid=introrev)
846
850
847 def _parentfilectx(self, path, fileid, filelog):
851 def _parentfilectx(self, path, fileid, filelog):
848 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
852 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
849 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
853 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
850 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
854 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
851 # If self is associated with a changeset (probably explicitly
855 # If self is associated with a changeset (probably explicitly
852 # fed), ensure the created filectx is associated with a
856 # fed), ensure the created filectx is associated with a
853 # changeset that is an ancestor of self.changectx.
857 # changeset that is an ancestor of self.changectx.
854 # This lets us later use _adjustlinkrev to get a correct link.
858 # This lets us later use _adjustlinkrev to get a correct link.
855 fctx._descendantrev = self.rev()
859 fctx._descendantrev = self.rev()
856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
857 elif r'_descendantrev' in vars(self):
861 elif r'_descendantrev' in vars(self):
858 # Otherwise propagate _descendantrev if we have one associated.
862 # Otherwise propagate _descendantrev if we have one associated.
859 fctx._descendantrev = self._descendantrev
863 fctx._descendantrev = self._descendantrev
860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
864 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
861 return fctx
865 return fctx
862
866
863 def parents(self):
867 def parents(self):
864 _path = self._path
868 _path = self._path
865 fl = self._filelog
869 fl = self._filelog
866 parents = self._filelog.parents(self._filenode)
870 parents = self._filelog.parents(self._filenode)
867 pl = [(_path, node, fl) for node in parents if node != nullid]
871 pl = [(_path, node, fl) for node in parents if node != nullid]
868
872
869 r = fl.renamed(self._filenode)
873 r = fl.renamed(self._filenode)
870 if r:
874 if r:
871 # - In the simple rename case, both parent are nullid, pl is empty.
875 # - In the simple rename case, both parent are nullid, pl is empty.
872 # - In case of merge, only one of the parent is null id and should
876 # - In case of merge, only one of the parent is null id and should
873 # be replaced with the rename information. This parent is -always-
877 # be replaced with the rename information. This parent is -always-
874 # the first one.
878 # the first one.
875 #
879 #
876 # As null id have always been filtered out in the previous list
880 # As null id have always been filtered out in the previous list
877 # comprehension, inserting to 0 will always result in "replacing
881 # comprehension, inserting to 0 will always result in "replacing
878 # first nullid parent with rename information.
882 # first nullid parent with rename information.
879 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
883 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
880
884
881 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
885 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
882
886
883 def p1(self):
887 def p1(self):
884 return self.parents()[0]
888 return self.parents()[0]
885
889
886 def p2(self):
890 def p2(self):
887 p = self.parents()
891 p = self.parents()
888 if len(p) == 2:
892 if len(p) == 2:
889 return p[1]
893 return p[1]
890 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
894 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
891
895
892 def annotate(self, follow=False, skiprevs=None, diffopts=None):
896 def annotate(self, follow=False, skiprevs=None, diffopts=None):
893 """Returns a list of annotateline objects for each line in the file
897 """Returns a list of annotateline objects for each line in the file
894
898
895 - line.fctx is the filectx of the node where that line was last changed
899 - line.fctx is the filectx of the node where that line was last changed
896 - line.lineno is the line number at the first appearance in the managed
900 - line.lineno is the line number at the first appearance in the managed
897 file
901 file
898 - line.text is the data on that line (including newline character)
902 - line.text is the data on that line (including newline character)
899 """
903 """
900 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
901
905
902 def parents(f):
906 def parents(f):
903 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
904 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
905 # from the topmost introrev (= srcrev) down to p.linkrev() if it
909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
906 # isn't an ancestor of the srcrev.
910 # isn't an ancestor of the srcrev.
907 f._changeid
911 f._changeid
908 pl = f.parents()
912 pl = f.parents()
909
913
910 # Don't return renamed parents if we aren't following.
914 # Don't return renamed parents if we aren't following.
911 if not follow:
915 if not follow:
912 pl = [p for p in pl if p.path() == f.path()]
916 pl = [p for p in pl if p.path() == f.path()]
913
917
914 # renamed filectx won't have a filelog yet, so set it
918 # renamed filectx won't have a filelog yet, so set it
915 # from the cache to save time
919 # from the cache to save time
916 for p in pl:
920 for p in pl:
917 if not r'_filelog' in p.__dict__:
921 if not r'_filelog' in p.__dict__:
918 p._filelog = getlog(p.path())
922 p._filelog = getlog(p.path())
919
923
920 return pl
924 return pl
921
925
922 # use linkrev to find the first changeset where self appeared
926 # use linkrev to find the first changeset where self appeared
923 base = self.introfilectx()
927 base = self.introfilectx()
924 if getattr(base, '_ancestrycontext', None) is None:
928 if getattr(base, '_ancestrycontext', None) is None:
925 cl = self._repo.changelog
929 cl = self._repo.changelog
926 if base.rev() is None:
930 if base.rev() is None:
927 # wctx is not inclusive, but works because _ancestrycontext
931 # wctx is not inclusive, but works because _ancestrycontext
928 # is used to test filelog revisions
932 # is used to test filelog revisions
929 ac = cl.ancestors([p.rev() for p in base.parents()],
933 ac = cl.ancestors([p.rev() for p in base.parents()],
930 inclusive=True)
934 inclusive=True)
931 else:
935 else:
932 ac = cl.ancestors([base.rev()], inclusive=True)
936 ac = cl.ancestors([base.rev()], inclusive=True)
933 base._ancestrycontext = ac
937 base._ancestrycontext = ac
934
938
935 return dagop.annotate(base, parents, skiprevs=skiprevs,
939 return dagop.annotate(base, parents, skiprevs=skiprevs,
936 diffopts=diffopts)
940 diffopts=diffopts)
937
941
938 def ancestors(self, followfirst=False):
942 def ancestors(self, followfirst=False):
939 visit = {}
943 visit = {}
940 c = self
944 c = self
941 if followfirst:
945 if followfirst:
942 cut = 1
946 cut = 1
943 else:
947 else:
944 cut = None
948 cut = None
945
949
946 while True:
950 while True:
947 for parent in c.parents()[:cut]:
951 for parent in c.parents()[:cut]:
948 visit[(parent.linkrev(), parent.filenode())] = parent
952 visit[(parent.linkrev(), parent.filenode())] = parent
949 if not visit:
953 if not visit:
950 break
954 break
951 c = visit.pop(max(visit))
955 c = visit.pop(max(visit))
952 yield c
956 yield c
953
957
954 def decodeddata(self):
958 def decodeddata(self):
955 """Returns `data()` after running repository decoding filters.
959 """Returns `data()` after running repository decoding filters.
956
960
957 This is often equivalent to how the data would be expressed on disk.
961 This is often equivalent to how the data would be expressed on disk.
958 """
962 """
959 return self._repo.wwritedata(self.path(), self.data())
963 return self._repo.wwritedata(self.path(), self.data())
960
964
961 class filectx(basefilectx):
965 class filectx(basefilectx):
962 """A filecontext object makes access to data related to a particular
966 """A filecontext object makes access to data related to a particular
963 filerevision convenient."""
967 filerevision convenient."""
964 def __init__(self, repo, path, changeid=None, fileid=None,
968 def __init__(self, repo, path, changeid=None, fileid=None,
965 filelog=None, changectx=None):
969 filelog=None, changectx=None):
966 """changeid can be a changeset revision, node, or tag.
970 """changeid can be a changeset revision, node, or tag.
967 fileid can be a file revision or node."""
971 fileid can be a file revision or node."""
968 self._repo = repo
972 self._repo = repo
969 self._path = path
973 self._path = path
970
974
971 assert (changeid is not None
975 assert (changeid is not None
972 or fileid is not None
976 or fileid is not None
973 or changectx is not None), \
977 or changectx is not None), \
974 ("bad args: changeid=%r, fileid=%r, changectx=%r"
978 ("bad args: changeid=%r, fileid=%r, changectx=%r"
975 % (changeid, fileid, changectx))
979 % (changeid, fileid, changectx))
976
980
977 if filelog is not None:
981 if filelog is not None:
978 self._filelog = filelog
982 self._filelog = filelog
979
983
980 if changeid is not None:
984 if changeid is not None:
981 self._changeid = changeid
985 self._changeid = changeid
982 if changectx is not None:
986 if changectx is not None:
983 self._changectx = changectx
987 self._changectx = changectx
984 if fileid is not None:
988 if fileid is not None:
985 self._fileid = fileid
989 self._fileid = fileid
986
990
987 @propertycache
991 @propertycache
988 def _changectx(self):
992 def _changectx(self):
989 try:
993 try:
990 return changectx(self._repo, self._changeid)
994 return changectx(self._repo, self._changeid)
991 except error.FilteredRepoLookupError:
995 except error.FilteredRepoLookupError:
992 # Linkrev may point to any revision in the repository. When the
996 # Linkrev may point to any revision in the repository. When the
993 # repository is filtered this may lead to `filectx` trying to build
997 # repository is filtered this may lead to `filectx` trying to build
994 # `changectx` for filtered revision. In such case we fallback to
998 # `changectx` for filtered revision. In such case we fallback to
995 # creating `changectx` on the unfiltered version of the reposition.
999 # creating `changectx` on the unfiltered version of the reposition.
996 # This fallback should not be an issue because `changectx` from
1000 # This fallback should not be an issue because `changectx` from
997 # `filectx` are not used in complex operations that care about
1001 # `filectx` are not used in complex operations that care about
998 # filtering.
1002 # filtering.
999 #
1003 #
1000 # This fallback is a cheap and dirty fix that prevent several
1004 # This fallback is a cheap and dirty fix that prevent several
1001 # crashes. It does not ensure the behavior is correct. However the
1005 # crashes. It does not ensure the behavior is correct. However the
1002 # behavior was not correct before filtering either and "incorrect
1006 # behavior was not correct before filtering either and "incorrect
1003 # behavior" is seen as better as "crash"
1007 # behavior" is seen as better as "crash"
1004 #
1008 #
1005 # Linkrevs have several serious troubles with filtering that are
1009 # Linkrevs have several serious troubles with filtering that are
1006 # complicated to solve. Proper handling of the issue here should be
1010 # complicated to solve. Proper handling of the issue here should be
1007 # considered when solving linkrev issue are on the table.
1011 # considered when solving linkrev issue are on the table.
1008 return changectx(self._repo.unfiltered(), self._changeid)
1012 return changectx(self._repo.unfiltered(), self._changeid)
1009
1013
1010 def filectx(self, fileid, changeid=None):
1014 def filectx(self, fileid, changeid=None):
1011 '''opens an arbitrary revision of the file without
1015 '''opens an arbitrary revision of the file without
1012 opening a new filelog'''
1016 opening a new filelog'''
1013 return filectx(self._repo, self._path, fileid=fileid,
1017 return filectx(self._repo, self._path, fileid=fileid,
1014 filelog=self._filelog, changeid=changeid)
1018 filelog=self._filelog, changeid=changeid)
1015
1019
1016 def rawdata(self):
1020 def rawdata(self):
1017 return self._filelog.revision(self._filenode, raw=True)
1021 return self._filelog.revision(self._filenode, raw=True)
1018
1022
1019 def rawflags(self):
1023 def rawflags(self):
1020 """low-level revlog flags"""
1024 """low-level revlog flags"""
1021 return self._filelog.flags(self._filerev)
1025 return self._filelog.flags(self._filerev)
1022
1026
1023 def data(self):
1027 def data(self):
1024 try:
1028 try:
1025 return self._filelog.read(self._filenode)
1029 return self._filelog.read(self._filenode)
1026 except error.CensoredNodeError:
1030 except error.CensoredNodeError:
1027 if self._repo.ui.config("censor", "policy") == "ignore":
1031 if self._repo.ui.config("censor", "policy") == "ignore":
1028 return ""
1032 return ""
1029 raise error.Abort(_("censored node: %s") % short(self._filenode),
1033 raise error.Abort(_("censored node: %s") % short(self._filenode),
1030 hint=_("set censor.policy to ignore errors"))
1034 hint=_("set censor.policy to ignore errors"))
1031
1035
1032 def size(self):
1036 def size(self):
1033 return self._filelog.size(self._filerev)
1037 return self._filelog.size(self._filerev)
1034
1038
1035 @propertycache
1039 @propertycache
1036 def _copied(self):
1040 def _copied(self):
1037 """check if file was actually renamed in this changeset revision
1041 """check if file was actually renamed in this changeset revision
1038
1042
1039 If rename logged in file revision, we report copy for changeset only
1043 If rename logged in file revision, we report copy for changeset only
1040 if file revisions linkrev points back to the changeset in question
1044 if file revisions linkrev points back to the changeset in question
1041 or both changeset parents contain different file revisions.
1045 or both changeset parents contain different file revisions.
1042 """
1046 """
1043
1047
1044 renamed = self._filelog.renamed(self._filenode)
1048 renamed = self._filelog.renamed(self._filenode)
1045 if not renamed:
1049 if not renamed:
1046 return renamed
1050 return renamed
1047
1051
1048 if self.rev() == self.linkrev():
1052 if self.rev() == self.linkrev():
1049 return renamed
1053 return renamed
1050
1054
1051 name = self.path()
1055 name = self.path()
1052 fnode = self._filenode
1056 fnode = self._filenode
1053 for p in self._changectx.parents():
1057 for p in self._changectx.parents():
1054 try:
1058 try:
1055 if fnode == p.filenode(name):
1059 if fnode == p.filenode(name):
1056 return None
1060 return None
1057 except error.LookupError:
1061 except error.LookupError:
1058 pass
1062 pass
1059 return renamed
1063 return renamed
1060
1064
1061 def children(self):
1065 def children(self):
1062 # hard for renames
1066 # hard for renames
1063 c = self._filelog.children(self._filenode)
1067 c = self._filelog.children(self._filenode)
1064 return [filectx(self._repo, self._path, fileid=x,
1068 return [filectx(self._repo, self._path, fileid=x,
1065 filelog=self._filelog) for x in c]
1069 filelog=self._filelog) for x in c]
1066
1070
1067 class committablectx(basectx):
1071 class committablectx(basectx):
1068 """A committablectx object provides common functionality for a context that
1072 """A committablectx object provides common functionality for a context that
1069 wants the ability to commit, e.g. workingctx or memctx."""
1073 wants the ability to commit, e.g. workingctx or memctx."""
1070 def __init__(self, repo, text="", user=None, date=None, extra=None,
1074 def __init__(self, repo, text="", user=None, date=None, extra=None,
1071 changes=None):
1075 changes=None):
1072 super(committablectx, self).__init__(repo)
1076 super(committablectx, self).__init__(repo)
1073 self._rev = None
1077 self._rev = None
1074 self._node = None
1078 self._node = None
1075 self._text = text
1079 self._text = text
1076 if date:
1080 if date:
1077 self._date = dateutil.parsedate(date)
1081 self._date = dateutil.parsedate(date)
1078 if user:
1082 if user:
1079 self._user = user
1083 self._user = user
1080 if changes:
1084 if changes:
1081 self._status = changes
1085 self._status = changes
1082
1086
1083 self._extra = {}
1087 self._extra = {}
1084 if extra:
1088 if extra:
1085 self._extra = extra.copy()
1089 self._extra = extra.copy()
1086 if 'branch' not in self._extra:
1090 if 'branch' not in self._extra:
1087 try:
1091 try:
1088 branch = encoding.fromlocal(self._repo.dirstate.branch())
1092 branch = encoding.fromlocal(self._repo.dirstate.branch())
1089 except UnicodeDecodeError:
1093 except UnicodeDecodeError:
1090 raise error.Abort(_('branch name not in UTF-8!'))
1094 raise error.Abort(_('branch name not in UTF-8!'))
1091 self._extra['branch'] = branch
1095 self._extra['branch'] = branch
1092 if self._extra['branch'] == '':
1096 if self._extra['branch'] == '':
1093 self._extra['branch'] = 'default'
1097 self._extra['branch'] = 'default'
1094
1098
1095 def __bytes__(self):
1099 def __bytes__(self):
1096 return bytes(self._parents[0]) + "+"
1100 return bytes(self._parents[0]) + "+"
1097
1101
1098 __str__ = encoding.strmethod(__bytes__)
1102 __str__ = encoding.strmethod(__bytes__)
1099
1103
1100 def __nonzero__(self):
1104 def __nonzero__(self):
1101 return True
1105 return True
1102
1106
1103 __bool__ = __nonzero__
1107 __bool__ = __nonzero__
1104
1108
1105 def _buildflagfunc(self):
1109 def _buildflagfunc(self):
1106 # Create a fallback function for getting file flags when the
1110 # Create a fallback function for getting file flags when the
1107 # filesystem doesn't support them
1111 # filesystem doesn't support them
1108
1112
1109 copiesget = self._repo.dirstate.copies().get
1113 copiesget = self._repo.dirstate.copies().get
1110 parents = self.parents()
1114 parents = self.parents()
1111 if len(parents) < 2:
1115 if len(parents) < 2:
1112 # when we have one parent, it's easy: copy from parent
1116 # when we have one parent, it's easy: copy from parent
1113 man = parents[0].manifest()
1117 man = parents[0].manifest()
1114 def func(f):
1118 def func(f):
1115 f = copiesget(f, f)
1119 f = copiesget(f, f)
1116 return man.flags(f)
1120 return man.flags(f)
1117 else:
1121 else:
1118 # merges are tricky: we try to reconstruct the unstored
1122 # merges are tricky: we try to reconstruct the unstored
1119 # result from the merge (issue1802)
1123 # result from the merge (issue1802)
1120 p1, p2 = parents
1124 p1, p2 = parents
1121 pa = p1.ancestor(p2)
1125 pa = p1.ancestor(p2)
1122 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1126 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1123
1127
1124 def func(f):
1128 def func(f):
1125 f = copiesget(f, f) # may be wrong for merges with copies
1129 f = copiesget(f, f) # may be wrong for merges with copies
1126 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1130 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1127 if fl1 == fl2:
1131 if fl1 == fl2:
1128 return fl1
1132 return fl1
1129 if fl1 == fla:
1133 if fl1 == fla:
1130 return fl2
1134 return fl2
1131 if fl2 == fla:
1135 if fl2 == fla:
1132 return fl1
1136 return fl1
1133 return '' # punt for conflicts
1137 return '' # punt for conflicts
1134
1138
1135 return func
1139 return func
1136
1140
1137 @propertycache
1141 @propertycache
1138 def _flagfunc(self):
1142 def _flagfunc(self):
1139 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1143 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1140
1144
1141 @propertycache
1145 @propertycache
1142 def _status(self):
1146 def _status(self):
1143 return self._repo.status()
1147 return self._repo.status()
1144
1148
1145 @propertycache
1149 @propertycache
1146 def _user(self):
1150 def _user(self):
1147 return self._repo.ui.username()
1151 return self._repo.ui.username()
1148
1152
1149 @propertycache
1153 @propertycache
1150 def _date(self):
1154 def _date(self):
1151 ui = self._repo.ui
1155 ui = self._repo.ui
1152 date = ui.configdate('devel', 'default-date')
1156 date = ui.configdate('devel', 'default-date')
1153 if date is None:
1157 if date is None:
1154 date = dateutil.makedate()
1158 date = dateutil.makedate()
1155 return date
1159 return date
1156
1160
1157 def subrev(self, subpath):
1161 def subrev(self, subpath):
1158 return None
1162 return None
1159
1163
1160 def manifestnode(self):
1164 def manifestnode(self):
1161 return None
1165 return None
1162 def user(self):
1166 def user(self):
1163 return self._user or self._repo.ui.username()
1167 return self._user or self._repo.ui.username()
1164 def date(self):
1168 def date(self):
1165 return self._date
1169 return self._date
1166 def description(self):
1170 def description(self):
1167 return self._text
1171 return self._text
1168 def files(self):
1172 def files(self):
1169 return sorted(self._status.modified + self._status.added +
1173 return sorted(self._status.modified + self._status.added +
1170 self._status.removed)
1174 self._status.removed)
1171
1175
1172 def modified(self):
1176 def modified(self):
1173 return self._status.modified
1177 return self._status.modified
1174 def added(self):
1178 def added(self):
1175 return self._status.added
1179 return self._status.added
1176 def removed(self):
1180 def removed(self):
1177 return self._status.removed
1181 return self._status.removed
1178 def deleted(self):
1182 def deleted(self):
1179 return self._status.deleted
1183 return self._status.deleted
1180 def branch(self):
1184 def branch(self):
1181 return encoding.tolocal(self._extra['branch'])
1185 return encoding.tolocal(self._extra['branch'])
1182 def closesbranch(self):
1186 def closesbranch(self):
1183 return 'close' in self._extra
1187 return 'close' in self._extra
1184 def extra(self):
1188 def extra(self):
1185 return self._extra
1189 return self._extra
1186
1190
1187 def isinmemory(self):
1191 def isinmemory(self):
1188 return False
1192 return False
1189
1193
1190 def tags(self):
1194 def tags(self):
1191 return []
1195 return []
1192
1196
1193 def bookmarks(self):
1197 def bookmarks(self):
1194 b = []
1198 b = []
1195 for p in self.parents():
1199 for p in self.parents():
1196 b.extend(p.bookmarks())
1200 b.extend(p.bookmarks())
1197 return b
1201 return b
1198
1202
1199 def phase(self):
1203 def phase(self):
1200 phase = phases.draft # default phase to draft
1204 phase = phases.draft # default phase to draft
1201 for p in self.parents():
1205 for p in self.parents():
1202 phase = max(phase, p.phase())
1206 phase = max(phase, p.phase())
1203 return phase
1207 return phase
1204
1208
1205 def hidden(self):
1209 def hidden(self):
1206 return False
1210 return False
1207
1211
1208 def children(self):
1212 def children(self):
1209 return []
1213 return []
1210
1214
1211 def flags(self, path):
1215 def flags(self, path):
1212 if r'_manifest' in self.__dict__:
1216 if r'_manifest' in self.__dict__:
1213 try:
1217 try:
1214 return self._manifest.flags(path)
1218 return self._manifest.flags(path)
1215 except KeyError:
1219 except KeyError:
1216 return ''
1220 return ''
1217
1221
1218 try:
1222 try:
1219 return self._flagfunc(path)
1223 return self._flagfunc(path)
1220 except OSError:
1224 except OSError:
1221 return ''
1225 return ''
1222
1226
1223 def ancestor(self, c2):
1227 def ancestor(self, c2):
1224 """return the "best" ancestor context of self and c2"""
1228 """return the "best" ancestor context of self and c2"""
1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1229 return self._parents[0].ancestor(c2) # punt on two parents for now
1226
1230
1227 def walk(self, match):
1231 def walk(self, match):
1228 '''Generates matching file names.'''
1232 '''Generates matching file names.'''
1229 return sorted(self._repo.dirstate.walk(match,
1233 return sorted(self._repo.dirstate.walk(match,
1230 subrepos=sorted(self.substate),
1234 subrepos=sorted(self.substate),
1231 unknown=True, ignored=False))
1235 unknown=True, ignored=False))
1232
1236
1233 def matches(self, match):
1237 def matches(self, match):
1234 ds = self._repo.dirstate
1238 ds = self._repo.dirstate
1235 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1239 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1236
1240
1237 def ancestors(self):
1241 def ancestors(self):
1238 for p in self._parents:
1242 for p in self._parents:
1239 yield p
1243 yield p
1240 for a in self._repo.changelog.ancestors(
1244 for a in self._repo.changelog.ancestors(
1241 [p.rev() for p in self._parents]):
1245 [p.rev() for p in self._parents]):
1242 yield changectx(self._repo, a)
1246 yield changectx(self._repo, a)
1243
1247
1244 def markcommitted(self, node):
1248 def markcommitted(self, node):
1245 """Perform post-commit cleanup necessary after committing this ctx
1249 """Perform post-commit cleanup necessary after committing this ctx
1246
1250
1247 Specifically, this updates backing stores this working context
1251 Specifically, this updates backing stores this working context
1248 wraps to reflect the fact that the changes reflected by this
1252 wraps to reflect the fact that the changes reflected by this
1249 workingctx have been committed. For example, it marks
1253 workingctx have been committed. For example, it marks
1250 modified and added files as normal in the dirstate.
1254 modified and added files as normal in the dirstate.
1251
1255
1252 """
1256 """
1253
1257
1254 with self._repo.dirstate.parentchange():
1258 with self._repo.dirstate.parentchange():
1255 for f in self.modified() + self.added():
1259 for f in self.modified() + self.added():
1256 self._repo.dirstate.normal(f)
1260 self._repo.dirstate.normal(f)
1257 for f in self.removed():
1261 for f in self.removed():
1258 self._repo.dirstate.drop(f)
1262 self._repo.dirstate.drop(f)
1259 self._repo.dirstate.setparents(node)
1263 self._repo.dirstate.setparents(node)
1260
1264
1261 # write changes out explicitly, because nesting wlock at
1265 # write changes out explicitly, because nesting wlock at
1262 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1266 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1263 # from immediately doing so for subsequent changing files
1267 # from immediately doing so for subsequent changing files
1264 self._repo.dirstate.write(self._repo.currenttransaction())
1268 self._repo.dirstate.write(self._repo.currenttransaction())
1265
1269
1266 def dirty(self, missing=False, merge=True, branch=True):
1270 def dirty(self, missing=False, merge=True, branch=True):
1267 return False
1271 return False
1268
1272
1269 class workingctx(committablectx):
1273 class workingctx(committablectx):
1270 """A workingctx object makes access to data related to
1274 """A workingctx object makes access to data related to
1271 the current working directory convenient.
1275 the current working directory convenient.
1272 date - any valid date string or (unixtime, offset), or None.
1276 date - any valid date string or (unixtime, offset), or None.
1273 user - username string, or None.
1277 user - username string, or None.
1274 extra - a dictionary of extra values, or None.
1278 extra - a dictionary of extra values, or None.
1275 changes - a list of file lists as returned by localrepo.status()
1279 changes - a list of file lists as returned by localrepo.status()
1276 or None to use the repository status.
1280 or None to use the repository status.
1277 """
1281 """
1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1282 def __init__(self, repo, text="", user=None, date=None, extra=None,
1279 changes=None):
1283 changes=None):
1280 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1284 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1281
1285
1282 def __iter__(self):
1286 def __iter__(self):
1283 d = self._repo.dirstate
1287 d = self._repo.dirstate
1284 for f in d:
1288 for f in d:
1285 if d[f] != 'r':
1289 if d[f] != 'r':
1286 yield f
1290 yield f
1287
1291
1288 def __contains__(self, key):
1292 def __contains__(self, key):
1289 return self._repo.dirstate[key] not in "?r"
1293 return self._repo.dirstate[key] not in "?r"
1290
1294
1291 def hex(self):
1295 def hex(self):
1292 return hex(wdirid)
1296 return hex(wdirid)
1293
1297
1294 @propertycache
1298 @propertycache
1295 def _parents(self):
1299 def _parents(self):
1296 p = self._repo.dirstate.parents()
1300 p = self._repo.dirstate.parents()
1297 if p[1] == nullid:
1301 if p[1] == nullid:
1298 p = p[:-1]
1302 p = p[:-1]
1299 return [changectx(self._repo, x) for x in p]
1303 return [changectx(self._repo, x) for x in p]
1300
1304
1301 def _fileinfo(self, path):
1305 def _fileinfo(self, path):
1302 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1306 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1303 self._manifest
1307 self._manifest
1304 return super(workingctx, self)._fileinfo(path)
1308 return super(workingctx, self)._fileinfo(path)
1305
1309
1306 def filectx(self, path, filelog=None):
1310 def filectx(self, path, filelog=None):
1307 """get a file context from the working directory"""
1311 """get a file context from the working directory"""
1308 return workingfilectx(self._repo, path, workingctx=self,
1312 return workingfilectx(self._repo, path, workingctx=self,
1309 filelog=filelog)
1313 filelog=filelog)
1310
1314
1311 def dirty(self, missing=False, merge=True, branch=True):
1315 def dirty(self, missing=False, merge=True, branch=True):
1312 "check whether a working directory is modified"
1316 "check whether a working directory is modified"
1313 # check subrepos first
1317 # check subrepos first
1314 for s in sorted(self.substate):
1318 for s in sorted(self.substate):
1315 if self.sub(s).dirty(missing=missing):
1319 if self.sub(s).dirty(missing=missing):
1316 return True
1320 return True
1317 # check current working dir
1321 # check current working dir
1318 return ((merge and self.p2()) or
1322 return ((merge and self.p2()) or
1319 (branch and self.branch() != self.p1().branch()) or
1323 (branch and self.branch() != self.p1().branch()) or
1320 self.modified() or self.added() or self.removed() or
1324 self.modified() or self.added() or self.removed() or
1321 (missing and self.deleted()))
1325 (missing and self.deleted()))
1322
1326
1323 def add(self, list, prefix=""):
1327 def add(self, list, prefix=""):
1324 with self._repo.wlock():
1328 with self._repo.wlock():
1325 ui, ds = self._repo.ui, self._repo.dirstate
1329 ui, ds = self._repo.ui, self._repo.dirstate
1326 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1330 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1327 rejected = []
1331 rejected = []
1328 lstat = self._repo.wvfs.lstat
1332 lstat = self._repo.wvfs.lstat
1329 for f in list:
1333 for f in list:
1330 # ds.pathto() returns an absolute file when this is invoked from
1334 # ds.pathto() returns an absolute file when this is invoked from
1331 # the keyword extension. That gets flagged as non-portable on
1335 # the keyword extension. That gets flagged as non-portable on
1332 # Windows, since it contains the drive letter and colon.
1336 # Windows, since it contains the drive letter and colon.
1333 scmutil.checkportable(ui, os.path.join(prefix, f))
1337 scmutil.checkportable(ui, os.path.join(prefix, f))
1334 try:
1338 try:
1335 st = lstat(f)
1339 st = lstat(f)
1336 except OSError:
1340 except OSError:
1337 ui.warn(_("%s does not exist!\n") % uipath(f))
1341 ui.warn(_("%s does not exist!\n") % uipath(f))
1338 rejected.append(f)
1342 rejected.append(f)
1339 continue
1343 continue
1340 if st.st_size > 10000000:
1344 if st.st_size > 10000000:
1341 ui.warn(_("%s: up to %d MB of RAM may be required "
1345 ui.warn(_("%s: up to %d MB of RAM may be required "
1342 "to manage this file\n"
1346 "to manage this file\n"
1343 "(use 'hg revert %s' to cancel the "
1347 "(use 'hg revert %s' to cancel the "
1344 "pending addition)\n")
1348 "pending addition)\n")
1345 % (f, 3 * st.st_size // 1000000, uipath(f)))
1349 % (f, 3 * st.st_size // 1000000, uipath(f)))
1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1350 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1347 ui.warn(_("%s not added: only files and symlinks "
1351 ui.warn(_("%s not added: only files and symlinks "
1348 "supported currently\n") % uipath(f))
1352 "supported currently\n") % uipath(f))
1349 rejected.append(f)
1353 rejected.append(f)
1350 elif ds[f] in 'amn':
1354 elif ds[f] in 'amn':
1351 ui.warn(_("%s already tracked!\n") % uipath(f))
1355 ui.warn(_("%s already tracked!\n") % uipath(f))
1352 elif ds[f] == 'r':
1356 elif ds[f] == 'r':
1353 ds.normallookup(f)
1357 ds.normallookup(f)
1354 else:
1358 else:
1355 ds.add(f)
1359 ds.add(f)
1356 return rejected
1360 return rejected
1357
1361
1358 def forget(self, files, prefix=""):
1362 def forget(self, files, prefix=""):
1359 with self._repo.wlock():
1363 with self._repo.wlock():
1360 ds = self._repo.dirstate
1364 ds = self._repo.dirstate
1361 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1365 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1362 rejected = []
1366 rejected = []
1363 for f in files:
1367 for f in files:
1364 if f not in self._repo.dirstate:
1368 if f not in self._repo.dirstate:
1365 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1369 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1366 rejected.append(f)
1370 rejected.append(f)
1367 elif self._repo.dirstate[f] != 'a':
1371 elif self._repo.dirstate[f] != 'a':
1368 self._repo.dirstate.remove(f)
1372 self._repo.dirstate.remove(f)
1369 else:
1373 else:
1370 self._repo.dirstate.drop(f)
1374 self._repo.dirstate.drop(f)
1371 return rejected
1375 return rejected
1372
1376
1373 def undelete(self, list):
1377 def undelete(self, list):
1374 pctxs = self.parents()
1378 pctxs = self.parents()
1375 with self._repo.wlock():
1379 with self._repo.wlock():
1376 ds = self._repo.dirstate
1380 ds = self._repo.dirstate
1377 for f in list:
1381 for f in list:
1378 if self._repo.dirstate[f] != 'r':
1382 if self._repo.dirstate[f] != 'r':
1379 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1383 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1380 else:
1384 else:
1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1385 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1382 t = fctx.data()
1386 t = fctx.data()
1383 self._repo.wwrite(f, t, fctx.flags())
1387 self._repo.wwrite(f, t, fctx.flags())
1384 self._repo.dirstate.normal(f)
1388 self._repo.dirstate.normal(f)
1385
1389
1386 def copy(self, source, dest):
1390 def copy(self, source, dest):
1387 try:
1391 try:
1388 st = self._repo.wvfs.lstat(dest)
1392 st = self._repo.wvfs.lstat(dest)
1389 except OSError as err:
1393 except OSError as err:
1390 if err.errno != errno.ENOENT:
1394 if err.errno != errno.ENOENT:
1391 raise
1395 raise
1392 self._repo.ui.warn(_("%s does not exist!\n")
1396 self._repo.ui.warn(_("%s does not exist!\n")
1393 % self._repo.dirstate.pathto(dest))
1397 % self._repo.dirstate.pathto(dest))
1394 return
1398 return
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1399 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1400 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1397 "symbolic link\n")
1401 "symbolic link\n")
1398 % self._repo.dirstate.pathto(dest))
1402 % self._repo.dirstate.pathto(dest))
1399 else:
1403 else:
1400 with self._repo.wlock():
1404 with self._repo.wlock():
1401 if self._repo.dirstate[dest] in '?':
1405 if self._repo.dirstate[dest] in '?':
1402 self._repo.dirstate.add(dest)
1406 self._repo.dirstate.add(dest)
1403 elif self._repo.dirstate[dest] in 'r':
1407 elif self._repo.dirstate[dest] in 'r':
1404 self._repo.dirstate.normallookup(dest)
1408 self._repo.dirstate.normallookup(dest)
1405 self._repo.dirstate.copy(source, dest)
1409 self._repo.dirstate.copy(source, dest)
1406
1410
1407 def match(self, pats=None, include=None, exclude=None, default='glob',
1411 def match(self, pats=None, include=None, exclude=None, default='glob',
1408 listsubrepos=False, badfn=None):
1412 listsubrepos=False, badfn=None):
1409 r = self._repo
1413 r = self._repo
1410
1414
1411 # Only a case insensitive filesystem needs magic to translate user input
1415 # Only a case insensitive filesystem needs magic to translate user input
1412 # to actual case in the filesystem.
1416 # to actual case in the filesystem.
1413 icasefs = not util.fscasesensitive(r.root)
1417 icasefs = not util.fscasesensitive(r.root)
1414 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1418 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1415 default, auditor=r.auditor, ctx=self,
1419 default, auditor=r.auditor, ctx=self,
1416 listsubrepos=listsubrepos, badfn=badfn,
1420 listsubrepos=listsubrepos, badfn=badfn,
1417 icasefs=icasefs)
1421 icasefs=icasefs)
1418
1422
1419 def _filtersuspectsymlink(self, files):
1423 def _filtersuspectsymlink(self, files):
1420 if not files or self._repo.dirstate._checklink:
1424 if not files or self._repo.dirstate._checklink:
1421 return files
1425 return files
1422
1426
1423 # Symlink placeholders may get non-symlink-like contents
1427 # Symlink placeholders may get non-symlink-like contents
1424 # via user error or dereferencing by NFS or Samba servers,
1428 # via user error or dereferencing by NFS or Samba servers,
1425 # so we filter out any placeholders that don't look like a
1429 # so we filter out any placeholders that don't look like a
1426 # symlink
1430 # symlink
1427 sane = []
1431 sane = []
1428 for f in files:
1432 for f in files:
1429 if self.flags(f) == 'l':
1433 if self.flags(f) == 'l':
1430 d = self[f].data()
1434 d = self[f].data()
1431 if (d == '' or len(d) >= 1024 or '\n' in d
1435 if (d == '' or len(d) >= 1024 or '\n' in d
1432 or stringutil.binary(d)):
1436 or stringutil.binary(d)):
1433 self._repo.ui.debug('ignoring suspect symlink placeholder'
1437 self._repo.ui.debug('ignoring suspect symlink placeholder'
1434 ' "%s"\n' % f)
1438 ' "%s"\n' % f)
1435 continue
1439 continue
1436 sane.append(f)
1440 sane.append(f)
1437 return sane
1441 return sane
1438
1442
1439 def _checklookup(self, files):
1443 def _checklookup(self, files):
1440 # check for any possibly clean files
1444 # check for any possibly clean files
1441 if not files:
1445 if not files:
1442 return [], [], []
1446 return [], [], []
1443
1447
1444 modified = []
1448 modified = []
1445 deleted = []
1449 deleted = []
1446 fixup = []
1450 fixup = []
1447 pctx = self._parents[0]
1451 pctx = self._parents[0]
1448 # do a full compare of any files that might have changed
1452 # do a full compare of any files that might have changed
1449 for f in sorted(files):
1453 for f in sorted(files):
1450 try:
1454 try:
1451 # This will return True for a file that got replaced by a
1455 # This will return True for a file that got replaced by a
1452 # directory in the interim, but fixing that is pretty hard.
1456 # directory in the interim, but fixing that is pretty hard.
1453 if (f not in pctx or self.flags(f) != pctx.flags(f)
1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1454 or pctx[f].cmp(self[f])):
1458 or pctx[f].cmp(self[f])):
1455 modified.append(f)
1459 modified.append(f)
1456 else:
1460 else:
1457 fixup.append(f)
1461 fixup.append(f)
1458 except (IOError, OSError):
1462 except (IOError, OSError):
1459 # A file become inaccessible in between? Mark it as deleted,
1463 # A file become inaccessible in between? Mark it as deleted,
1460 # matching dirstate behavior (issue5584).
1464 # matching dirstate behavior (issue5584).
1461 # The dirstate has more complex behavior around whether a
1465 # The dirstate has more complex behavior around whether a
1462 # missing file matches a directory, etc, but we don't need to
1466 # missing file matches a directory, etc, but we don't need to
1463 # bother with that: if f has made it to this point, we're sure
1467 # bother with that: if f has made it to this point, we're sure
1464 # it's in the dirstate.
1468 # it's in the dirstate.
1465 deleted.append(f)
1469 deleted.append(f)
1466
1470
1467 return modified, deleted, fixup
1471 return modified, deleted, fixup
1468
1472
1469 def _poststatusfixup(self, status, fixup):
1473 def _poststatusfixup(self, status, fixup):
1470 """update dirstate for files that are actually clean"""
1474 """update dirstate for files that are actually clean"""
1471 poststatus = self._repo.postdsstatus()
1475 poststatus = self._repo.postdsstatus()
1472 if fixup or poststatus:
1476 if fixup or poststatus:
1473 try:
1477 try:
1474 oldid = self._repo.dirstate.identity()
1478 oldid = self._repo.dirstate.identity()
1475
1479
1476 # updating the dirstate is optional
1480 # updating the dirstate is optional
1477 # so we don't wait on the lock
1481 # so we don't wait on the lock
1478 # wlock can invalidate the dirstate, so cache normal _after_
1482 # wlock can invalidate the dirstate, so cache normal _after_
1479 # taking the lock
1483 # taking the lock
1480 with self._repo.wlock(False):
1484 with self._repo.wlock(False):
1481 if self._repo.dirstate.identity() == oldid:
1485 if self._repo.dirstate.identity() == oldid:
1482 if fixup:
1486 if fixup:
1483 normal = self._repo.dirstate.normal
1487 normal = self._repo.dirstate.normal
1484 for f in fixup:
1488 for f in fixup:
1485 normal(f)
1489 normal(f)
1486 # write changes out explicitly, because nesting
1490 # write changes out explicitly, because nesting
1487 # wlock at runtime may prevent 'wlock.release()'
1491 # wlock at runtime may prevent 'wlock.release()'
1488 # after this block from doing so for subsequent
1492 # after this block from doing so for subsequent
1489 # changing files
1493 # changing files
1490 tr = self._repo.currenttransaction()
1494 tr = self._repo.currenttransaction()
1491 self._repo.dirstate.write(tr)
1495 self._repo.dirstate.write(tr)
1492
1496
1493 if poststatus:
1497 if poststatus:
1494 for ps in poststatus:
1498 for ps in poststatus:
1495 ps(self, status)
1499 ps(self, status)
1496 else:
1500 else:
1497 # in this case, writing changes out breaks
1501 # in this case, writing changes out breaks
1498 # consistency, because .hg/dirstate was
1502 # consistency, because .hg/dirstate was
1499 # already changed simultaneously after last
1503 # already changed simultaneously after last
1500 # caching (see also issue5584 for detail)
1504 # caching (see also issue5584 for detail)
1501 self._repo.ui.debug('skip updating dirstate: '
1505 self._repo.ui.debug('skip updating dirstate: '
1502 'identity mismatch\n')
1506 'identity mismatch\n')
1503 except error.LockError:
1507 except error.LockError:
1504 pass
1508 pass
1505 finally:
1509 finally:
1506 # Even if the wlock couldn't be grabbed, clear out the list.
1510 # Even if the wlock couldn't be grabbed, clear out the list.
1507 self._repo.clearpostdsstatus()
1511 self._repo.clearpostdsstatus()
1508
1512
1509 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1513 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1510 '''Gets the status from the dirstate -- internal use only.'''
1514 '''Gets the status from the dirstate -- internal use only.'''
1511 subrepos = []
1515 subrepos = []
1512 if '.hgsub' in self:
1516 if '.hgsub' in self:
1513 subrepos = sorted(self.substate)
1517 subrepos = sorted(self.substate)
1514 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1518 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1515 clean=clean, unknown=unknown)
1519 clean=clean, unknown=unknown)
1516
1520
1517 # check for any possibly clean files
1521 # check for any possibly clean files
1518 fixup = []
1522 fixup = []
1519 if cmp:
1523 if cmp:
1520 modified2, deleted2, fixup = self._checklookup(cmp)
1524 modified2, deleted2, fixup = self._checklookup(cmp)
1521 s.modified.extend(modified2)
1525 s.modified.extend(modified2)
1522 s.deleted.extend(deleted2)
1526 s.deleted.extend(deleted2)
1523
1527
1524 if fixup and clean:
1528 if fixup and clean:
1525 s.clean.extend(fixup)
1529 s.clean.extend(fixup)
1526
1530
1527 self._poststatusfixup(s, fixup)
1531 self._poststatusfixup(s, fixup)
1528
1532
1529 if match.always():
1533 if match.always():
1530 # cache for performance
1534 # cache for performance
1531 if s.unknown or s.ignored or s.clean:
1535 if s.unknown or s.ignored or s.clean:
1532 # "_status" is cached with list*=False in the normal route
1536 # "_status" is cached with list*=False in the normal route
1533 self._status = scmutil.status(s.modified, s.added, s.removed,
1537 self._status = scmutil.status(s.modified, s.added, s.removed,
1534 s.deleted, [], [], [])
1538 s.deleted, [], [], [])
1535 else:
1539 else:
1536 self._status = s
1540 self._status = s
1537
1541
1538 return s
1542 return s
1539
1543
1540 @propertycache
1544 @propertycache
1541 def _manifest(self):
1545 def _manifest(self):
1542 """generate a manifest corresponding to the values in self._status
1546 """generate a manifest corresponding to the values in self._status
1543
1547
1544 This reuse the file nodeid from parent, but we use special node
1548 This reuse the file nodeid from parent, but we use special node
1545 identifiers for added and modified files. This is used by manifests
1549 identifiers for added and modified files. This is used by manifests
1546 merge to see that files are different and by update logic to avoid
1550 merge to see that files are different and by update logic to avoid
1547 deleting newly added files.
1551 deleting newly added files.
1548 """
1552 """
1549 return self._buildstatusmanifest(self._status)
1553 return self._buildstatusmanifest(self._status)
1550
1554
1551 def _buildstatusmanifest(self, status):
1555 def _buildstatusmanifest(self, status):
1552 """Builds a manifest that includes the given status results."""
1556 """Builds a manifest that includes the given status results."""
1553 parents = self.parents()
1557 parents = self.parents()
1554
1558
1555 man = parents[0].manifest().copy()
1559 man = parents[0].manifest().copy()
1556
1560
1557 ff = self._flagfunc
1561 ff = self._flagfunc
1558 for i, l in ((addednodeid, status.added),
1562 for i, l in ((addednodeid, status.added),
1559 (modifiednodeid, status.modified)):
1563 (modifiednodeid, status.modified)):
1560 for f in l:
1564 for f in l:
1561 man[f] = i
1565 man[f] = i
1562 try:
1566 try:
1563 man.setflag(f, ff(f))
1567 man.setflag(f, ff(f))
1564 except OSError:
1568 except OSError:
1565 pass
1569 pass
1566
1570
1567 for f in status.deleted + status.removed:
1571 for f in status.deleted + status.removed:
1568 if f in man:
1572 if f in man:
1569 del man[f]
1573 del man[f]
1570
1574
1571 return man
1575 return man
1572
1576
1573 def _buildstatus(self, other, s, match, listignored, listclean,
1577 def _buildstatus(self, other, s, match, listignored, listclean,
1574 listunknown):
1578 listunknown):
1575 """build a status with respect to another context
1579 """build a status with respect to another context
1576
1580
1577 This includes logic for maintaining the fast path of status when
1581 This includes logic for maintaining the fast path of status when
1578 comparing the working directory against its parent, which is to skip
1582 comparing the working directory against its parent, which is to skip
1579 building a new manifest if self (working directory) is not comparing
1583 building a new manifest if self (working directory) is not comparing
1580 against its parent (repo['.']).
1584 against its parent (repo['.']).
1581 """
1585 """
1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1586 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1587 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1584 # might have accidentally ended up with the entire contents of the file
1588 # might have accidentally ended up with the entire contents of the file
1585 # they are supposed to be linking to.
1589 # they are supposed to be linking to.
1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1590 s.modified[:] = self._filtersuspectsymlink(s.modified)
1587 if other != self._repo['.']:
1591 if other != self._repo['.']:
1588 s = super(workingctx, self)._buildstatus(other, s, match,
1592 s = super(workingctx, self)._buildstatus(other, s, match,
1589 listignored, listclean,
1593 listignored, listclean,
1590 listunknown)
1594 listunknown)
1591 return s
1595 return s
1592
1596
1593 def _matchstatus(self, other, match):
1597 def _matchstatus(self, other, match):
1594 """override the match method with a filter for directory patterns
1598 """override the match method with a filter for directory patterns
1595
1599
1596 We use inheritance to customize the match.bad method only in cases of
1600 We use inheritance to customize the match.bad method only in cases of
1597 workingctx since it belongs only to the working directory when
1601 workingctx since it belongs only to the working directory when
1598 comparing against the parent changeset.
1602 comparing against the parent changeset.
1599
1603
1600 If we aren't comparing against the working directory's parent, then we
1604 If we aren't comparing against the working directory's parent, then we
1601 just use the default match object sent to us.
1605 just use the default match object sent to us.
1602 """
1606 """
1603 if other != self._repo['.']:
1607 if other != self._repo['.']:
1604 def bad(f, msg):
1608 def bad(f, msg):
1605 # 'f' may be a directory pattern from 'match.files()',
1609 # 'f' may be a directory pattern from 'match.files()',
1606 # so 'f not in ctx1' is not enough
1610 # so 'f not in ctx1' is not enough
1607 if f not in other and not other.hasdir(f):
1611 if f not in other and not other.hasdir(f):
1608 self._repo.ui.warn('%s: %s\n' %
1612 self._repo.ui.warn('%s: %s\n' %
1609 (self._repo.dirstate.pathto(f), msg))
1613 (self._repo.dirstate.pathto(f), msg))
1610 match.bad = bad
1614 match.bad = bad
1611 return match
1615 return match
1612
1616
1613 def markcommitted(self, node):
1617 def markcommitted(self, node):
1614 super(workingctx, self).markcommitted(node)
1618 super(workingctx, self).markcommitted(node)
1615
1619
1616 sparse.aftercommit(self._repo, node)
1620 sparse.aftercommit(self._repo, node)
1617
1621
1618 class committablefilectx(basefilectx):
1622 class committablefilectx(basefilectx):
1619 """A committablefilectx provides common functionality for a file context
1623 """A committablefilectx provides common functionality for a file context
1620 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1624 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1621 def __init__(self, repo, path, filelog=None, ctx=None):
1625 def __init__(self, repo, path, filelog=None, ctx=None):
1622 self._repo = repo
1626 self._repo = repo
1623 self._path = path
1627 self._path = path
1624 self._changeid = None
1628 self._changeid = None
1625 self._filerev = self._filenode = None
1629 self._filerev = self._filenode = None
1626
1630
1627 if filelog is not None:
1631 if filelog is not None:
1628 self._filelog = filelog
1632 self._filelog = filelog
1629 if ctx:
1633 if ctx:
1630 self._changectx = ctx
1634 self._changectx = ctx
1631
1635
1632 def __nonzero__(self):
1636 def __nonzero__(self):
1633 return True
1637 return True
1634
1638
1635 __bool__ = __nonzero__
1639 __bool__ = __nonzero__
1636
1640
1637 def linkrev(self):
1641 def linkrev(self):
1638 # linked to self._changectx no matter if file is modified or not
1642 # linked to self._changectx no matter if file is modified or not
1639 return self.rev()
1643 return self.rev()
1640
1644
1641 def parents(self):
1645 def parents(self):
1642 '''return parent filectxs, following copies if necessary'''
1646 '''return parent filectxs, following copies if necessary'''
1643 def filenode(ctx, path):
1647 def filenode(ctx, path):
1644 return ctx._manifest.get(path, nullid)
1648 return ctx._manifest.get(path, nullid)
1645
1649
1646 path = self._path
1650 path = self._path
1647 fl = self._filelog
1651 fl = self._filelog
1648 pcl = self._changectx._parents
1652 pcl = self._changectx._parents
1649 renamed = self.renamed()
1653 renamed = self.renamed()
1650
1654
1651 if renamed:
1655 if renamed:
1652 pl = [renamed + (None,)]
1656 pl = [renamed + (None,)]
1653 else:
1657 else:
1654 pl = [(path, filenode(pcl[0], path), fl)]
1658 pl = [(path, filenode(pcl[0], path), fl)]
1655
1659
1656 for pc in pcl[1:]:
1660 for pc in pcl[1:]:
1657 pl.append((path, filenode(pc, path), fl))
1661 pl.append((path, filenode(pc, path), fl))
1658
1662
1659 return [self._parentfilectx(p, fileid=n, filelog=l)
1663 return [self._parentfilectx(p, fileid=n, filelog=l)
1660 for p, n, l in pl if n != nullid]
1664 for p, n, l in pl if n != nullid]
1661
1665
1662 def children(self):
1666 def children(self):
1663 return []
1667 return []
1664
1668
1665 class workingfilectx(committablefilectx):
1669 class workingfilectx(committablefilectx):
1666 """A workingfilectx object makes access to data related to a particular
1670 """A workingfilectx object makes access to data related to a particular
1667 file in the working directory convenient."""
1671 file in the working directory convenient."""
1668 def __init__(self, repo, path, filelog=None, workingctx=None):
1672 def __init__(self, repo, path, filelog=None, workingctx=None):
1669 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1673 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1670
1674
1671 @propertycache
1675 @propertycache
1672 def _changectx(self):
1676 def _changectx(self):
1673 return workingctx(self._repo)
1677 return workingctx(self._repo)
1674
1678
1675 def data(self):
1679 def data(self):
1676 return self._repo.wread(self._path)
1680 return self._repo.wread(self._path)
1677 def renamed(self):
1681 def renamed(self):
1678 rp = self._repo.dirstate.copied(self._path)
1682 rp = self._repo.dirstate.copied(self._path)
1679 if not rp:
1683 if not rp:
1680 return None
1684 return None
1681 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1685 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1682
1686
1683 def size(self):
1687 def size(self):
1684 return self._repo.wvfs.lstat(self._path).st_size
1688 return self._repo.wvfs.lstat(self._path).st_size
1685 def date(self):
1689 def date(self):
1686 t, tz = self._changectx.date()
1690 t, tz = self._changectx.date()
1687 try:
1691 try:
1688 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1692 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1689 except OSError as err:
1693 except OSError as err:
1690 if err.errno != errno.ENOENT:
1694 if err.errno != errno.ENOENT:
1691 raise
1695 raise
1692 return (t, tz)
1696 return (t, tz)
1693
1697
1694 def exists(self):
1698 def exists(self):
1695 return self._repo.wvfs.exists(self._path)
1699 return self._repo.wvfs.exists(self._path)
1696
1700
1697 def lexists(self):
1701 def lexists(self):
1698 return self._repo.wvfs.lexists(self._path)
1702 return self._repo.wvfs.lexists(self._path)
1699
1703
1700 def audit(self):
1704 def audit(self):
1701 return self._repo.wvfs.audit(self._path)
1705 return self._repo.wvfs.audit(self._path)
1702
1706
1703 def cmp(self, fctx):
1707 def cmp(self, fctx):
1704 """compare with other file context
1708 """compare with other file context
1705
1709
1706 returns True if different than fctx.
1710 returns True if different than fctx.
1707 """
1711 """
1708 # fctx should be a filectx (not a workingfilectx)
1712 # fctx should be a filectx (not a workingfilectx)
1709 # invert comparison to reuse the same code path
1713 # invert comparison to reuse the same code path
1710 return fctx.cmp(self)
1714 return fctx.cmp(self)
1711
1715
1712 def remove(self, ignoremissing=False):
1716 def remove(self, ignoremissing=False):
1713 """wraps unlink for a repo's working directory"""
1717 """wraps unlink for a repo's working directory"""
1714 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1718 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1715 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1719 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1716 rmdir=rmdir)
1720 rmdir=rmdir)
1717
1721
1718 def write(self, data, flags, backgroundclose=False, **kwargs):
1722 def write(self, data, flags, backgroundclose=False, **kwargs):
1719 """wraps repo.wwrite"""
1723 """wraps repo.wwrite"""
1720 self._repo.wwrite(self._path, data, flags,
1724 self._repo.wwrite(self._path, data, flags,
1721 backgroundclose=backgroundclose,
1725 backgroundclose=backgroundclose,
1722 **kwargs)
1726 **kwargs)
1723
1727
1724 def markcopied(self, src):
1728 def markcopied(self, src):
1725 """marks this file a copy of `src`"""
1729 """marks this file a copy of `src`"""
1726 if self._repo.dirstate[self._path] in "nma":
1730 if self._repo.dirstate[self._path] in "nma":
1727 self._repo.dirstate.copy(src, self._path)
1731 self._repo.dirstate.copy(src, self._path)
1728
1732
1729 def clearunknown(self):
1733 def clearunknown(self):
1730 """Removes conflicting items in the working directory so that
1734 """Removes conflicting items in the working directory so that
1731 ``write()`` can be called successfully.
1735 ``write()`` can be called successfully.
1732 """
1736 """
1733 wvfs = self._repo.wvfs
1737 wvfs = self._repo.wvfs
1734 f = self._path
1738 f = self._path
1735 wvfs.audit(f)
1739 wvfs.audit(f)
1736 if wvfs.isdir(f) and not wvfs.islink(f):
1740 if wvfs.isdir(f) and not wvfs.islink(f):
1737 wvfs.rmtree(f, forcibly=True)
1741 wvfs.rmtree(f, forcibly=True)
1738 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1739 for p in reversed(list(util.finddirs(f))):
1743 for p in reversed(list(util.finddirs(f))):
1740 if wvfs.isfileorlink(p):
1744 if wvfs.isfileorlink(p):
1741 wvfs.unlink(p)
1745 wvfs.unlink(p)
1742 break
1746 break
1743
1747
1744 def setflags(self, l, x):
1748 def setflags(self, l, x):
1745 self._repo.wvfs.setflags(self._path, l, x)
1749 self._repo.wvfs.setflags(self._path, l, x)
1746
1750
1747 class overlayworkingctx(committablectx):
1751 class overlayworkingctx(committablectx):
1748 """Wraps another mutable context with a write-back cache that can be
1752 """Wraps another mutable context with a write-back cache that can be
1749 converted into a commit context.
1753 converted into a commit context.
1750
1754
1751 self._cache[path] maps to a dict with keys: {
1755 self._cache[path] maps to a dict with keys: {
1752 'exists': bool?
1756 'exists': bool?
1753 'date': date?
1757 'date': date?
1754 'data': str?
1758 'data': str?
1755 'flags': str?
1759 'flags': str?
1756 'copied': str? (path or None)
1760 'copied': str? (path or None)
1757 }
1761 }
1758 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1762 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1759 is `False`, the file was deleted.
1763 is `False`, the file was deleted.
1760 """
1764 """
1761
1765
1762 def __init__(self, repo):
1766 def __init__(self, repo):
1763 super(overlayworkingctx, self).__init__(repo)
1767 super(overlayworkingctx, self).__init__(repo)
1764 self.clean()
1768 self.clean()
1765
1769
1766 def setbase(self, wrappedctx):
1770 def setbase(self, wrappedctx):
1767 self._wrappedctx = wrappedctx
1771 self._wrappedctx = wrappedctx
1768 self._parents = [wrappedctx]
1772 self._parents = [wrappedctx]
1769 # Drop old manifest cache as it is now out of date.
1773 # Drop old manifest cache as it is now out of date.
1770 # This is necessary when, e.g., rebasing several nodes with one
1774 # This is necessary when, e.g., rebasing several nodes with one
1771 # ``overlayworkingctx`` (e.g. with --collapse).
1775 # ``overlayworkingctx`` (e.g. with --collapse).
1772 util.clearcachedproperty(self, '_manifest')
1776 util.clearcachedproperty(self, '_manifest')
1773
1777
1774 def data(self, path):
1778 def data(self, path):
1775 if self.isdirty(path):
1779 if self.isdirty(path):
1776 if self._cache[path]['exists']:
1780 if self._cache[path]['exists']:
1777 if self._cache[path]['data']:
1781 if self._cache[path]['data']:
1778 return self._cache[path]['data']
1782 return self._cache[path]['data']
1779 else:
1783 else:
1780 # Must fallback here, too, because we only set flags.
1784 # Must fallback here, too, because we only set flags.
1781 return self._wrappedctx[path].data()
1785 return self._wrappedctx[path].data()
1782 else:
1786 else:
1783 raise error.ProgrammingError("No such file or directory: %s" %
1787 raise error.ProgrammingError("No such file or directory: %s" %
1784 path)
1788 path)
1785 else:
1789 else:
1786 return self._wrappedctx[path].data()
1790 return self._wrappedctx[path].data()
1787
1791
1788 @propertycache
1792 @propertycache
1789 def _manifest(self):
1793 def _manifest(self):
1790 parents = self.parents()
1794 parents = self.parents()
1791 man = parents[0].manifest().copy()
1795 man = parents[0].manifest().copy()
1792
1796
1793 flag = self._flagfunc
1797 flag = self._flagfunc
1794 for path in self.added():
1798 for path in self.added():
1795 man[path] = addednodeid
1799 man[path] = addednodeid
1796 man.setflag(path, flag(path))
1800 man.setflag(path, flag(path))
1797 for path in self.modified():
1801 for path in self.modified():
1798 man[path] = modifiednodeid
1802 man[path] = modifiednodeid
1799 man.setflag(path, flag(path))
1803 man.setflag(path, flag(path))
1800 for path in self.removed():
1804 for path in self.removed():
1801 del man[path]
1805 del man[path]
1802 return man
1806 return man
1803
1807
1804 @propertycache
1808 @propertycache
1805 def _flagfunc(self):
1809 def _flagfunc(self):
1806 def f(path):
1810 def f(path):
1807 return self._cache[path]['flags']
1811 return self._cache[path]['flags']
1808 return f
1812 return f
1809
1813
1810 def files(self):
1814 def files(self):
1811 return sorted(self.added() + self.modified() + self.removed())
1815 return sorted(self.added() + self.modified() + self.removed())
1812
1816
1813 def modified(self):
1817 def modified(self):
1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1815 self._existsinparent(f)]
1819 self._existsinparent(f)]
1816
1820
1817 def added(self):
1821 def added(self):
1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1822 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1819 not self._existsinparent(f)]
1823 not self._existsinparent(f)]
1820
1824
1821 def removed(self):
1825 def removed(self):
1822 return [f for f in self._cache.keys() if
1826 return [f for f in self._cache.keys() if
1823 not self._cache[f]['exists'] and self._existsinparent(f)]
1827 not self._cache[f]['exists'] and self._existsinparent(f)]
1824
1828
1825 def isinmemory(self):
1829 def isinmemory(self):
1826 return True
1830 return True
1827
1831
1828 def filedate(self, path):
1832 def filedate(self, path):
1829 if self.isdirty(path):
1833 if self.isdirty(path):
1830 return self._cache[path]['date']
1834 return self._cache[path]['date']
1831 else:
1835 else:
1832 return self._wrappedctx[path].date()
1836 return self._wrappedctx[path].date()
1833
1837
1834 def markcopied(self, path, origin):
1838 def markcopied(self, path, origin):
1835 if self.isdirty(path):
1839 if self.isdirty(path):
1836 self._cache[path]['copied'] = origin
1840 self._cache[path]['copied'] = origin
1837 else:
1841 else:
1838 raise error.ProgrammingError('markcopied() called on clean context')
1842 raise error.ProgrammingError('markcopied() called on clean context')
1839
1843
1840 def copydata(self, path):
1844 def copydata(self, path):
1841 if self.isdirty(path):
1845 if self.isdirty(path):
1842 return self._cache[path]['copied']
1846 return self._cache[path]['copied']
1843 else:
1847 else:
1844 raise error.ProgrammingError('copydata() called on clean context')
1848 raise error.ProgrammingError('copydata() called on clean context')
1845
1849
1846 def flags(self, path):
1850 def flags(self, path):
1847 if self.isdirty(path):
1851 if self.isdirty(path):
1848 if self._cache[path]['exists']:
1852 if self._cache[path]['exists']:
1849 return self._cache[path]['flags']
1853 return self._cache[path]['flags']
1850 else:
1854 else:
1851 raise error.ProgrammingError("No such file or directory: %s" %
1855 raise error.ProgrammingError("No such file or directory: %s" %
1852 self._path)
1856 self._path)
1853 else:
1857 else:
1854 return self._wrappedctx[path].flags()
1858 return self._wrappedctx[path].flags()
1855
1859
1856 def _existsinparent(self, path):
1860 def _existsinparent(self, path):
1857 try:
1861 try:
1858 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1862 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1859 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1863 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1860 # with an ``exists()`` function.
1864 # with an ``exists()`` function.
1861 self._wrappedctx[path]
1865 self._wrappedctx[path]
1862 return True
1866 return True
1863 except error.ManifestLookupError:
1867 except error.ManifestLookupError:
1864 return False
1868 return False
1865
1869
1866 def _auditconflicts(self, path):
1870 def _auditconflicts(self, path):
1867 """Replicates conflict checks done by wvfs.write().
1871 """Replicates conflict checks done by wvfs.write().
1868
1872
1869 Since we never write to the filesystem and never call `applyupdates` in
1873 Since we never write to the filesystem and never call `applyupdates` in
1870 IMM, we'll never check that a path is actually writable -- e.g., because
1874 IMM, we'll never check that a path is actually writable -- e.g., because
1871 it adds `a/foo`, but `a` is actually a file in the other commit.
1875 it adds `a/foo`, but `a` is actually a file in the other commit.
1872 """
1876 """
1873 def fail(path, component):
1877 def fail(path, component):
1874 # p1() is the base and we're receiving "writes" for p2()'s
1878 # p1() is the base and we're receiving "writes" for p2()'s
1875 # files.
1879 # files.
1876 if 'l' in self.p1()[component].flags():
1880 if 'l' in self.p1()[component].flags():
1877 raise error.Abort("error: %s conflicts with symlink %s "
1881 raise error.Abort("error: %s conflicts with symlink %s "
1878 "in %s." % (path, component,
1882 "in %s." % (path, component,
1879 self.p1().rev()))
1883 self.p1().rev()))
1880 else:
1884 else:
1881 raise error.Abort("error: '%s' conflicts with file '%s' in "
1885 raise error.Abort("error: '%s' conflicts with file '%s' in "
1882 "%s." % (path, component,
1886 "%s." % (path, component,
1883 self.p1().rev()))
1887 self.p1().rev()))
1884
1888
1885 # Test that each new directory to be created to write this path from p2
1889 # Test that each new directory to be created to write this path from p2
1886 # is not a file in p1.
1890 # is not a file in p1.
1887 components = path.split('/')
1891 components = path.split('/')
1888 for i in xrange(len(components)):
1892 for i in xrange(len(components)):
1889 component = "/".join(components[0:i])
1893 component = "/".join(components[0:i])
1890 if component in self.p1():
1894 if component in self.p1():
1891 fail(path, component)
1895 fail(path, component)
1892
1896
1893 # Test the other direction -- that this path from p2 isn't a directory
1897 # Test the other direction -- that this path from p2 isn't a directory
1894 # in p1 (test that p1 doesn't any paths matching `path/*`).
1898 # in p1 (test that p1 doesn't any paths matching `path/*`).
1895 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1899 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1896 matches = self.p1().manifest().matches(match)
1900 matches = self.p1().manifest().matches(match)
1897 if len(matches) > 0:
1901 if len(matches) > 0:
1898 if len(matches) == 1 and matches.keys()[0] == path:
1902 if len(matches) == 1 and matches.keys()[0] == path:
1899 return
1903 return
1900 raise error.Abort("error: file '%s' cannot be written because "
1904 raise error.Abort("error: file '%s' cannot be written because "
1901 " '%s/' is a folder in %s (containing %d "
1905 " '%s/' is a folder in %s (containing %d "
1902 "entries: %s)"
1906 "entries: %s)"
1903 % (path, path, self.p1(), len(matches),
1907 % (path, path, self.p1(), len(matches),
1904 ', '.join(matches.keys())))
1908 ', '.join(matches.keys())))
1905
1909
1906 def write(self, path, data, flags='', **kwargs):
1910 def write(self, path, data, flags='', **kwargs):
1907 if data is None:
1911 if data is None:
1908 raise error.ProgrammingError("data must be non-None")
1912 raise error.ProgrammingError("data must be non-None")
1909 self._auditconflicts(path)
1913 self._auditconflicts(path)
1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1914 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1911 flags=flags)
1915 flags=flags)
1912
1916
1913 def setflags(self, path, l, x):
1917 def setflags(self, path, l, x):
1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1918 self._markdirty(path, exists=True, date=dateutil.makedate(),
1915 flags=(l and 'l' or '') + (x and 'x' or ''))
1919 flags=(l and 'l' or '') + (x and 'x' or ''))
1916
1920
1917 def remove(self, path):
1921 def remove(self, path):
1918 self._markdirty(path, exists=False)
1922 self._markdirty(path, exists=False)
1919
1923
1920 def exists(self, path):
1924 def exists(self, path):
1921 """exists behaves like `lexists`, but needs to follow symlinks and
1925 """exists behaves like `lexists`, but needs to follow symlinks and
1922 return False if they are broken.
1926 return False if they are broken.
1923 """
1927 """
1924 if self.isdirty(path):
1928 if self.isdirty(path):
1925 # If this path exists and is a symlink, "follow" it by calling
1929 # If this path exists and is a symlink, "follow" it by calling
1926 # exists on the destination path.
1930 # exists on the destination path.
1927 if (self._cache[path]['exists'] and
1931 if (self._cache[path]['exists'] and
1928 'l' in self._cache[path]['flags']):
1932 'l' in self._cache[path]['flags']):
1929 return self.exists(self._cache[path]['data'].strip())
1933 return self.exists(self._cache[path]['data'].strip())
1930 else:
1934 else:
1931 return self._cache[path]['exists']
1935 return self._cache[path]['exists']
1932
1936
1933 return self._existsinparent(path)
1937 return self._existsinparent(path)
1934
1938
1935 def lexists(self, path):
1939 def lexists(self, path):
1936 """lexists returns True if the path exists"""
1940 """lexists returns True if the path exists"""
1937 if self.isdirty(path):
1941 if self.isdirty(path):
1938 return self._cache[path]['exists']
1942 return self._cache[path]['exists']
1939
1943
1940 return self._existsinparent(path)
1944 return self._existsinparent(path)
1941
1945
1942 def size(self, path):
1946 def size(self, path):
1943 if self.isdirty(path):
1947 if self.isdirty(path):
1944 if self._cache[path]['exists']:
1948 if self._cache[path]['exists']:
1945 return len(self._cache[path]['data'])
1949 return len(self._cache[path]['data'])
1946 else:
1950 else:
1947 raise error.ProgrammingError("No such file or directory: %s" %
1951 raise error.ProgrammingError("No such file or directory: %s" %
1948 self._path)
1952 self._path)
1949 return self._wrappedctx[path].size()
1953 return self._wrappedctx[path].size()
1950
1954
1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1955 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1952 user=None, editor=None):
1956 user=None, editor=None):
1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1957 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1954 committed.
1958 committed.
1955
1959
1956 ``text`` is the commit message.
1960 ``text`` is the commit message.
1957 ``parents`` (optional) are rev numbers.
1961 ``parents`` (optional) are rev numbers.
1958 """
1962 """
1959 # Default parents to the wrapped contexts' if not passed.
1963 # Default parents to the wrapped contexts' if not passed.
1960 if parents is None:
1964 if parents is None:
1961 parents = self._wrappedctx.parents()
1965 parents = self._wrappedctx.parents()
1962 if len(parents) == 1:
1966 if len(parents) == 1:
1963 parents = (parents[0], None)
1967 parents = (parents[0], None)
1964
1968
1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1969 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1966 if parents[1] is None:
1970 if parents[1] is None:
1967 parents = (self._repo[parents[0]], None)
1971 parents = (self._repo[parents[0]], None)
1968 else:
1972 else:
1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1973 parents = (self._repo[parents[0]], self._repo[parents[1]])
1970
1974
1971 files = self._cache.keys()
1975 files = self._cache.keys()
1972 def getfile(repo, memctx, path):
1976 def getfile(repo, memctx, path):
1973 if self._cache[path]['exists']:
1977 if self._cache[path]['exists']:
1974 return memfilectx(repo, memctx, path,
1978 return memfilectx(repo, memctx, path,
1975 self._cache[path]['data'],
1979 self._cache[path]['data'],
1976 'l' in self._cache[path]['flags'],
1980 'l' in self._cache[path]['flags'],
1977 'x' in self._cache[path]['flags'],
1981 'x' in self._cache[path]['flags'],
1978 self._cache[path]['copied'])
1982 self._cache[path]['copied'])
1979 else:
1983 else:
1980 # Returning None, but including the path in `files`, is
1984 # Returning None, but including the path in `files`, is
1981 # necessary for memctx to register a deletion.
1985 # necessary for memctx to register a deletion.
1982 return None
1986 return None
1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1987 return memctx(self._repo, parents, text, files, getfile, date=date,
1984 extra=extra, user=user, branch=branch, editor=editor)
1988 extra=extra, user=user, branch=branch, editor=editor)
1985
1989
1986 def isdirty(self, path):
1990 def isdirty(self, path):
1987 return path in self._cache
1991 return path in self._cache
1988
1992
1989 def isempty(self):
1993 def isempty(self):
1990 # We need to discard any keys that are actually clean before the empty
1994 # We need to discard any keys that are actually clean before the empty
1991 # commit check.
1995 # commit check.
1992 self._compact()
1996 self._compact()
1993 return len(self._cache) == 0
1997 return len(self._cache) == 0
1994
1998
1995 def clean(self):
1999 def clean(self):
1996 self._cache = {}
2000 self._cache = {}
1997
2001
1998 def _compact(self):
2002 def _compact(self):
1999 """Removes keys from the cache that are actually clean, by comparing
2003 """Removes keys from the cache that are actually clean, by comparing
2000 them with the underlying context.
2004 them with the underlying context.
2001
2005
2002 This can occur during the merge process, e.g. by passing --tool :local
2006 This can occur during the merge process, e.g. by passing --tool :local
2003 to resolve a conflict.
2007 to resolve a conflict.
2004 """
2008 """
2005 keys = []
2009 keys = []
2006 for path in self._cache.keys():
2010 for path in self._cache.keys():
2007 cache = self._cache[path]
2011 cache = self._cache[path]
2008 try:
2012 try:
2009 underlying = self._wrappedctx[path]
2013 underlying = self._wrappedctx[path]
2010 if (underlying.data() == cache['data'] and
2014 if (underlying.data() == cache['data'] and
2011 underlying.flags() == cache['flags']):
2015 underlying.flags() == cache['flags']):
2012 keys.append(path)
2016 keys.append(path)
2013 except error.ManifestLookupError:
2017 except error.ManifestLookupError:
2014 # Path not in the underlying manifest (created).
2018 # Path not in the underlying manifest (created).
2015 continue
2019 continue
2016
2020
2017 for path in keys:
2021 for path in keys:
2018 del self._cache[path]
2022 del self._cache[path]
2019 return keys
2023 return keys
2020
2024
2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2025 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2022 self._cache[path] = {
2026 self._cache[path] = {
2023 'exists': exists,
2027 'exists': exists,
2024 'data': data,
2028 'data': data,
2025 'date': date,
2029 'date': date,
2026 'flags': flags,
2030 'flags': flags,
2027 'copied': None,
2031 'copied': None,
2028 }
2032 }
2029
2033
2030 def filectx(self, path, filelog=None):
2034 def filectx(self, path, filelog=None):
2031 return overlayworkingfilectx(self._repo, path, parent=self,
2035 return overlayworkingfilectx(self._repo, path, parent=self,
2032 filelog=filelog)
2036 filelog=filelog)
2033
2037
2034 class overlayworkingfilectx(committablefilectx):
2038 class overlayworkingfilectx(committablefilectx):
2035 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2039 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2036 cache, which can be flushed through later by calling ``flush()``."""
2040 cache, which can be flushed through later by calling ``flush()``."""
2037
2041
2038 def __init__(self, repo, path, filelog=None, parent=None):
2042 def __init__(self, repo, path, filelog=None, parent=None):
2039 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2043 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2040 parent)
2044 parent)
2041 self._repo = repo
2045 self._repo = repo
2042 self._parent = parent
2046 self._parent = parent
2043 self._path = path
2047 self._path = path
2044
2048
2045 def cmp(self, fctx):
2049 def cmp(self, fctx):
2046 return self.data() != fctx.data()
2050 return self.data() != fctx.data()
2047
2051
2048 def changectx(self):
2052 def changectx(self):
2049 return self._parent
2053 return self._parent
2050
2054
2051 def data(self):
2055 def data(self):
2052 return self._parent.data(self._path)
2056 return self._parent.data(self._path)
2053
2057
2054 def date(self):
2058 def date(self):
2055 return self._parent.filedate(self._path)
2059 return self._parent.filedate(self._path)
2056
2060
2057 def exists(self):
2061 def exists(self):
2058 return self.lexists()
2062 return self.lexists()
2059
2063
2060 def lexists(self):
2064 def lexists(self):
2061 return self._parent.exists(self._path)
2065 return self._parent.exists(self._path)
2062
2066
2063 def renamed(self):
2067 def renamed(self):
2064 path = self._parent.copydata(self._path)
2068 path = self._parent.copydata(self._path)
2065 if not path:
2069 if not path:
2066 return None
2070 return None
2067 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2071 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2068
2072
2069 def size(self):
2073 def size(self):
2070 return self._parent.size(self._path)
2074 return self._parent.size(self._path)
2071
2075
2072 def markcopied(self, origin):
2076 def markcopied(self, origin):
2073 self._parent.markcopied(self._path, origin)
2077 self._parent.markcopied(self._path, origin)
2074
2078
2075 def audit(self):
2079 def audit(self):
2076 pass
2080 pass
2077
2081
2078 def flags(self):
2082 def flags(self):
2079 return self._parent.flags(self._path)
2083 return self._parent.flags(self._path)
2080
2084
2081 def setflags(self, islink, isexec):
2085 def setflags(self, islink, isexec):
2082 return self._parent.setflags(self._path, islink, isexec)
2086 return self._parent.setflags(self._path, islink, isexec)
2083
2087
2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2088 def write(self, data, flags, backgroundclose=False, **kwargs):
2085 return self._parent.write(self._path, data, flags, **kwargs)
2089 return self._parent.write(self._path, data, flags, **kwargs)
2086
2090
2087 def remove(self, ignoremissing=False):
2091 def remove(self, ignoremissing=False):
2088 return self._parent.remove(self._path)
2092 return self._parent.remove(self._path)
2089
2093
2090 def clearunknown(self):
2094 def clearunknown(self):
2091 pass
2095 pass
2092
2096
2093 class workingcommitctx(workingctx):
2097 class workingcommitctx(workingctx):
2094 """A workingcommitctx object makes access to data related to
2098 """A workingcommitctx object makes access to data related to
2095 the revision being committed convenient.
2099 the revision being committed convenient.
2096
2100
2097 This hides changes in the working directory, if they aren't
2101 This hides changes in the working directory, if they aren't
2098 committed in this context.
2102 committed in this context.
2099 """
2103 """
2100 def __init__(self, repo, changes,
2104 def __init__(self, repo, changes,
2101 text="", user=None, date=None, extra=None):
2105 text="", user=None, date=None, extra=None):
2102 super(workingctx, self).__init__(repo, text, user, date, extra,
2106 super(workingctx, self).__init__(repo, text, user, date, extra,
2103 changes)
2107 changes)
2104
2108
2105 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2109 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2106 """Return matched files only in ``self._status``
2110 """Return matched files only in ``self._status``
2107
2111
2108 Uncommitted files appear "clean" via this context, even if
2112 Uncommitted files appear "clean" via this context, even if
2109 they aren't actually so in the working directory.
2113 they aren't actually so in the working directory.
2110 """
2114 """
2111 if clean:
2115 if clean:
2112 clean = [f for f in self._manifest if f not in self._changedset]
2116 clean = [f for f in self._manifest if f not in self._changedset]
2113 else:
2117 else:
2114 clean = []
2118 clean = []
2115 return scmutil.status([f for f in self._status.modified if match(f)],
2119 return scmutil.status([f for f in self._status.modified if match(f)],
2116 [f for f in self._status.added if match(f)],
2120 [f for f in self._status.added if match(f)],
2117 [f for f in self._status.removed if match(f)],
2121 [f for f in self._status.removed if match(f)],
2118 [], [], [], clean)
2122 [], [], [], clean)
2119
2123
2120 @propertycache
2124 @propertycache
2121 def _changedset(self):
2125 def _changedset(self):
2122 """Return the set of files changed in this context
2126 """Return the set of files changed in this context
2123 """
2127 """
2124 changed = set(self._status.modified)
2128 changed = set(self._status.modified)
2125 changed.update(self._status.added)
2129 changed.update(self._status.added)
2126 changed.update(self._status.removed)
2130 changed.update(self._status.removed)
2127 return changed
2131 return changed
2128
2132
2129 def makecachingfilectxfn(func):
2133 def makecachingfilectxfn(func):
2130 """Create a filectxfn that caches based on the path.
2134 """Create a filectxfn that caches based on the path.
2131
2135
2132 We can't use util.cachefunc because it uses all arguments as the cache
2136 We can't use util.cachefunc because it uses all arguments as the cache
2133 key and this creates a cycle since the arguments include the repo and
2137 key and this creates a cycle since the arguments include the repo and
2134 memctx.
2138 memctx.
2135 """
2139 """
2136 cache = {}
2140 cache = {}
2137
2141
2138 def getfilectx(repo, memctx, path):
2142 def getfilectx(repo, memctx, path):
2139 if path not in cache:
2143 if path not in cache:
2140 cache[path] = func(repo, memctx, path)
2144 cache[path] = func(repo, memctx, path)
2141 return cache[path]
2145 return cache[path]
2142
2146
2143 return getfilectx
2147 return getfilectx
2144
2148
2145 def memfilefromctx(ctx):
2149 def memfilefromctx(ctx):
2146 """Given a context return a memfilectx for ctx[path]
2150 """Given a context return a memfilectx for ctx[path]
2147
2151
2148 This is a convenience method for building a memctx based on another
2152 This is a convenience method for building a memctx based on another
2149 context.
2153 context.
2150 """
2154 """
2151 def getfilectx(repo, memctx, path):
2155 def getfilectx(repo, memctx, path):
2152 fctx = ctx[path]
2156 fctx = ctx[path]
2153 # this is weird but apparently we only keep track of one parent
2157 # this is weird but apparently we only keep track of one parent
2154 # (why not only store that instead of a tuple?)
2158 # (why not only store that instead of a tuple?)
2155 copied = fctx.renamed()
2159 copied = fctx.renamed()
2156 if copied:
2160 if copied:
2157 copied = copied[0]
2161 copied = copied[0]
2158 return memfilectx(repo, memctx, path, fctx.data(),
2162 return memfilectx(repo, memctx, path, fctx.data(),
2159 islink=fctx.islink(), isexec=fctx.isexec(),
2163 islink=fctx.islink(), isexec=fctx.isexec(),
2160 copied=copied)
2164 copied=copied)
2161
2165
2162 return getfilectx
2166 return getfilectx
2163
2167
2164 def memfilefrompatch(patchstore):
2168 def memfilefrompatch(patchstore):
2165 """Given a patch (e.g. patchstore object) return a memfilectx
2169 """Given a patch (e.g. patchstore object) return a memfilectx
2166
2170
2167 This is a convenience method for building a memctx based on a patchstore.
2171 This is a convenience method for building a memctx based on a patchstore.
2168 """
2172 """
2169 def getfilectx(repo, memctx, path):
2173 def getfilectx(repo, memctx, path):
2170 data, mode, copied = patchstore.getfile(path)
2174 data, mode, copied = patchstore.getfile(path)
2171 if data is None:
2175 if data is None:
2172 return None
2176 return None
2173 islink, isexec = mode
2177 islink, isexec = mode
2174 return memfilectx(repo, memctx, path, data, islink=islink,
2178 return memfilectx(repo, memctx, path, data, islink=islink,
2175 isexec=isexec, copied=copied)
2179 isexec=isexec, copied=copied)
2176
2180
2177 return getfilectx
2181 return getfilectx
2178
2182
2179 class memctx(committablectx):
2183 class memctx(committablectx):
2180 """Use memctx to perform in-memory commits via localrepo.commitctx().
2184 """Use memctx to perform in-memory commits via localrepo.commitctx().
2181
2185
2182 Revision information is supplied at initialization time while
2186 Revision information is supplied at initialization time while
2183 related files data and is made available through a callback
2187 related files data and is made available through a callback
2184 mechanism. 'repo' is the current localrepo, 'parents' is a
2188 mechanism. 'repo' is the current localrepo, 'parents' is a
2185 sequence of two parent revisions identifiers (pass None for every
2189 sequence of two parent revisions identifiers (pass None for every
2186 missing parent), 'text' is the commit message and 'files' lists
2190 missing parent), 'text' is the commit message and 'files' lists
2187 names of files touched by the revision (normalized and relative to
2191 names of files touched by the revision (normalized and relative to
2188 repository root).
2192 repository root).
2189
2193
2190 filectxfn(repo, memctx, path) is a callable receiving the
2194 filectxfn(repo, memctx, path) is a callable receiving the
2191 repository, the current memctx object and the normalized path of
2195 repository, the current memctx object and the normalized path of
2192 requested file, relative to repository root. It is fired by the
2196 requested file, relative to repository root. It is fired by the
2193 commit function for every file in 'files', but calls order is
2197 commit function for every file in 'files', but calls order is
2194 undefined. If the file is available in the revision being
2198 undefined. If the file is available in the revision being
2195 committed (updated or added), filectxfn returns a memfilectx
2199 committed (updated or added), filectxfn returns a memfilectx
2196 object. If the file was removed, filectxfn return None for recent
2200 object. If the file was removed, filectxfn return None for recent
2197 Mercurial. Moved files are represented by marking the source file
2201 Mercurial. Moved files are represented by marking the source file
2198 removed and the new file added with copy information (see
2202 removed and the new file added with copy information (see
2199 memfilectx).
2203 memfilectx).
2200
2204
2201 user receives the committer name and defaults to current
2205 user receives the committer name and defaults to current
2202 repository username, date is the commit date in any format
2206 repository username, date is the commit date in any format
2203 supported by dateutil.parsedate() and defaults to current date, extra
2207 supported by dateutil.parsedate() and defaults to current date, extra
2204 is a dictionary of metadata or is left empty.
2208 is a dictionary of metadata or is left empty.
2205 """
2209 """
2206
2210
2207 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2211 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2208 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2212 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2209 # this field to determine what to do in filectxfn.
2213 # this field to determine what to do in filectxfn.
2210 _returnnoneformissingfiles = True
2214 _returnnoneformissingfiles = True
2211
2215
2212 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2216 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2213 date=None, extra=None, branch=None, editor=False):
2217 date=None, extra=None, branch=None, editor=False):
2214 super(memctx, self).__init__(repo, text, user, date, extra)
2218 super(memctx, self).__init__(repo, text, user, date, extra)
2215 self._rev = None
2219 self._rev = None
2216 self._node = None
2220 self._node = None
2217 parents = [(p or nullid) for p in parents]
2221 parents = [(p or nullid) for p in parents]
2218 p1, p2 = parents
2222 p1, p2 = parents
2219 self._parents = [self._repo[p] for p in (p1, p2)]
2223 self._parents = [self._repo[p] for p in (p1, p2)]
2220 files = sorted(set(files))
2224 files = sorted(set(files))
2221 self._files = files
2225 self._files = files
2222 if branch is not None:
2226 if branch is not None:
2223 self._extra['branch'] = encoding.fromlocal(branch)
2227 self._extra['branch'] = encoding.fromlocal(branch)
2224 self.substate = {}
2228 self.substate = {}
2225
2229
2226 if isinstance(filectxfn, patch.filestore):
2230 if isinstance(filectxfn, patch.filestore):
2227 filectxfn = memfilefrompatch(filectxfn)
2231 filectxfn = memfilefrompatch(filectxfn)
2228 elif not callable(filectxfn):
2232 elif not callable(filectxfn):
2229 # if store is not callable, wrap it in a function
2233 # if store is not callable, wrap it in a function
2230 filectxfn = memfilefromctx(filectxfn)
2234 filectxfn = memfilefromctx(filectxfn)
2231
2235
2232 # memoizing increases performance for e.g. vcs convert scenarios.
2236 # memoizing increases performance for e.g. vcs convert scenarios.
2233 self._filectxfn = makecachingfilectxfn(filectxfn)
2237 self._filectxfn = makecachingfilectxfn(filectxfn)
2234
2238
2235 if editor:
2239 if editor:
2236 self._text = editor(self._repo, self, [])
2240 self._text = editor(self._repo, self, [])
2237 self._repo.savecommitmessage(self._text)
2241 self._repo.savecommitmessage(self._text)
2238
2242
2239 def filectx(self, path, filelog=None):
2243 def filectx(self, path, filelog=None):
2240 """get a file context from the working directory
2244 """get a file context from the working directory
2241
2245
2242 Returns None if file doesn't exist and should be removed."""
2246 Returns None if file doesn't exist and should be removed."""
2243 return self._filectxfn(self._repo, self, path)
2247 return self._filectxfn(self._repo, self, path)
2244
2248
2245 def commit(self):
2249 def commit(self):
2246 """commit context to the repo"""
2250 """commit context to the repo"""
2247 return self._repo.commitctx(self)
2251 return self._repo.commitctx(self)
2248
2252
2249 @propertycache
2253 @propertycache
2250 def _manifest(self):
2254 def _manifest(self):
2251 """generate a manifest based on the return values of filectxfn"""
2255 """generate a manifest based on the return values of filectxfn"""
2252
2256
2253 # keep this simple for now; just worry about p1
2257 # keep this simple for now; just worry about p1
2254 pctx = self._parents[0]
2258 pctx = self._parents[0]
2255 man = pctx.manifest().copy()
2259 man = pctx.manifest().copy()
2256
2260
2257 for f in self._status.modified:
2261 for f in self._status.modified:
2258 p1node = nullid
2262 p1node = nullid
2259 p2node = nullid
2263 p2node = nullid
2260 p = pctx[f].parents() # if file isn't in pctx, check p2?
2264 p = pctx[f].parents() # if file isn't in pctx, check p2?
2261 if len(p) > 0:
2265 if len(p) > 0:
2262 p1node = p[0].filenode()
2266 p1node = p[0].filenode()
2263 if len(p) > 1:
2267 if len(p) > 1:
2264 p2node = p[1].filenode()
2268 p2node = p[1].filenode()
2265 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2269 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2266
2270
2267 for f in self._status.added:
2271 for f in self._status.added:
2268 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2272 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2269
2273
2270 for f in self._status.removed:
2274 for f in self._status.removed:
2271 if f in man:
2275 if f in man:
2272 del man[f]
2276 del man[f]
2273
2277
2274 return man
2278 return man
2275
2279
2276 @propertycache
2280 @propertycache
2277 def _status(self):
2281 def _status(self):
2278 """Calculate exact status from ``files`` specified at construction
2282 """Calculate exact status from ``files`` specified at construction
2279 """
2283 """
2280 man1 = self.p1().manifest()
2284 man1 = self.p1().manifest()
2281 p2 = self._parents[1]
2285 p2 = self._parents[1]
2282 # "1 < len(self._parents)" can't be used for checking
2286 # "1 < len(self._parents)" can't be used for checking
2283 # existence of the 2nd parent, because "memctx._parents" is
2287 # existence of the 2nd parent, because "memctx._parents" is
2284 # explicitly initialized by the list, of which length is 2.
2288 # explicitly initialized by the list, of which length is 2.
2285 if p2.node() != nullid:
2289 if p2.node() != nullid:
2286 man2 = p2.manifest()
2290 man2 = p2.manifest()
2287 managing = lambda f: f in man1 or f in man2
2291 managing = lambda f: f in man1 or f in man2
2288 else:
2292 else:
2289 managing = lambda f: f in man1
2293 managing = lambda f: f in man1
2290
2294
2291 modified, added, removed = [], [], []
2295 modified, added, removed = [], [], []
2292 for f in self._files:
2296 for f in self._files:
2293 if not managing(f):
2297 if not managing(f):
2294 added.append(f)
2298 added.append(f)
2295 elif self[f]:
2299 elif self[f]:
2296 modified.append(f)
2300 modified.append(f)
2297 else:
2301 else:
2298 removed.append(f)
2302 removed.append(f)
2299
2303
2300 return scmutil.status(modified, added, removed, [], [], [], [])
2304 return scmutil.status(modified, added, removed, [], [], [], [])
2301
2305
2302 class memfilectx(committablefilectx):
2306 class memfilectx(committablefilectx):
2303 """memfilectx represents an in-memory file to commit.
2307 """memfilectx represents an in-memory file to commit.
2304
2308
2305 See memctx and committablefilectx for more details.
2309 See memctx and committablefilectx for more details.
2306 """
2310 """
2307 def __init__(self, repo, changectx, path, data, islink=False,
2311 def __init__(self, repo, changectx, path, data, islink=False,
2308 isexec=False, copied=None):
2312 isexec=False, copied=None):
2309 """
2313 """
2310 path is the normalized file path relative to repository root.
2314 path is the normalized file path relative to repository root.
2311 data is the file content as a string.
2315 data is the file content as a string.
2312 islink is True if the file is a symbolic link.
2316 islink is True if the file is a symbolic link.
2313 isexec is True if the file is executable.
2317 isexec is True if the file is executable.
2314 copied is the source file path if current file was copied in the
2318 copied is the source file path if current file was copied in the
2315 revision being committed, or None."""
2319 revision being committed, or None."""
2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2320 super(memfilectx, self).__init__(repo, path, None, changectx)
2317 self._data = data
2321 self._data = data
2318 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2322 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2319 self._copied = None
2323 self._copied = None
2320 if copied:
2324 if copied:
2321 self._copied = (copied, nullid)
2325 self._copied = (copied, nullid)
2322
2326
2323 def data(self):
2327 def data(self):
2324 return self._data
2328 return self._data
2325
2329
2326 def remove(self, ignoremissing=False):
2330 def remove(self, ignoremissing=False):
2327 """wraps unlink for a repo's working directory"""
2331 """wraps unlink for a repo's working directory"""
2328 # need to figure out what to do here
2332 # need to figure out what to do here
2329 del self._changectx[self._path]
2333 del self._changectx[self._path]
2330
2334
2331 def write(self, data, flags, **kwargs):
2335 def write(self, data, flags, **kwargs):
2332 """wraps repo.wwrite"""
2336 """wraps repo.wwrite"""
2333 self._data = data
2337 self._data = data
2334
2338
2335 class overlayfilectx(committablefilectx):
2339 class overlayfilectx(committablefilectx):
2336 """Like memfilectx but take an original filectx and optional parameters to
2340 """Like memfilectx but take an original filectx and optional parameters to
2337 override parts of it. This is useful when fctx.data() is expensive (i.e.
2341 override parts of it. This is useful when fctx.data() is expensive (i.e.
2338 flag processor is expensive) and raw data, flags, and filenode could be
2342 flag processor is expensive) and raw data, flags, and filenode could be
2339 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2343 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2340 """
2344 """
2341
2345
2342 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2346 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2343 copied=None, ctx=None):
2347 copied=None, ctx=None):
2344 """originalfctx: filecontext to duplicate
2348 """originalfctx: filecontext to duplicate
2345
2349
2346 datafunc: None or a function to override data (file content). It is a
2350 datafunc: None or a function to override data (file content). It is a
2347 function to be lazy. path, flags, copied, ctx: None or overridden value
2351 function to be lazy. path, flags, copied, ctx: None or overridden value
2348
2352
2349 copied could be (path, rev), or False. copied could also be just path,
2353 copied could be (path, rev), or False. copied could also be just path,
2350 and will be converted to (path, nullid). This simplifies some callers.
2354 and will be converted to (path, nullid). This simplifies some callers.
2351 """
2355 """
2352
2356
2353 if path is None:
2357 if path is None:
2354 path = originalfctx.path()
2358 path = originalfctx.path()
2355 if ctx is None:
2359 if ctx is None:
2356 ctx = originalfctx.changectx()
2360 ctx = originalfctx.changectx()
2357 ctxmatch = lambda: True
2361 ctxmatch = lambda: True
2358 else:
2362 else:
2359 ctxmatch = lambda: ctx == originalfctx.changectx()
2363 ctxmatch = lambda: ctx == originalfctx.changectx()
2360
2364
2361 repo = originalfctx.repo()
2365 repo = originalfctx.repo()
2362 flog = originalfctx.filelog()
2366 flog = originalfctx.filelog()
2363 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2367 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2364
2368
2365 if copied is None:
2369 if copied is None:
2366 copied = originalfctx.renamed()
2370 copied = originalfctx.renamed()
2367 copiedmatch = lambda: True
2371 copiedmatch = lambda: True
2368 else:
2372 else:
2369 if copied and not isinstance(copied, tuple):
2373 if copied and not isinstance(copied, tuple):
2370 # repo._filecommit will recalculate copyrev so nullid is okay
2374 # repo._filecommit will recalculate copyrev so nullid is okay
2371 copied = (copied, nullid)
2375 copied = (copied, nullid)
2372 copiedmatch = lambda: copied == originalfctx.renamed()
2376 copiedmatch = lambda: copied == originalfctx.renamed()
2373
2377
2374 # When data, copied (could affect data), ctx (could affect filelog
2378 # When data, copied (could affect data), ctx (could affect filelog
2375 # parents) are not overridden, rawdata, rawflags, and filenode may be
2379 # parents) are not overridden, rawdata, rawflags, and filenode may be
2376 # reused (repo._filecommit should double check filelog parents).
2380 # reused (repo._filecommit should double check filelog parents).
2377 #
2381 #
2378 # path, flags are not hashed in filelog (but in manifestlog) so they do
2382 # path, flags are not hashed in filelog (but in manifestlog) so they do
2379 # not affect reusable here.
2383 # not affect reusable here.
2380 #
2384 #
2381 # If ctx or copied is overridden to a same value with originalfctx,
2385 # If ctx or copied is overridden to a same value with originalfctx,
2382 # still consider it's reusable. originalfctx.renamed() may be a bit
2386 # still consider it's reusable. originalfctx.renamed() may be a bit
2383 # expensive so it's not called unless necessary. Assuming datafunc is
2387 # expensive so it's not called unless necessary. Assuming datafunc is
2384 # always expensive, do not call it for this "reusable" test.
2388 # always expensive, do not call it for this "reusable" test.
2385 reusable = datafunc is None and ctxmatch() and copiedmatch()
2389 reusable = datafunc is None and ctxmatch() and copiedmatch()
2386
2390
2387 if datafunc is None:
2391 if datafunc is None:
2388 datafunc = originalfctx.data
2392 datafunc = originalfctx.data
2389 if flags is None:
2393 if flags is None:
2390 flags = originalfctx.flags()
2394 flags = originalfctx.flags()
2391
2395
2392 self._datafunc = datafunc
2396 self._datafunc = datafunc
2393 self._flags = flags
2397 self._flags = flags
2394 self._copied = copied
2398 self._copied = copied
2395
2399
2396 if reusable:
2400 if reusable:
2397 # copy extra fields from originalfctx
2401 # copy extra fields from originalfctx
2398 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2402 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2399 for attr_ in attrs:
2403 for attr_ in attrs:
2400 if util.safehasattr(originalfctx, attr_):
2404 if util.safehasattr(originalfctx, attr_):
2401 setattr(self, attr_, getattr(originalfctx, attr_))
2405 setattr(self, attr_, getattr(originalfctx, attr_))
2402
2406
2403 def data(self):
2407 def data(self):
2404 return self._datafunc()
2408 return self._datafunc()
2405
2409
2406 class metadataonlyctx(committablectx):
2410 class metadataonlyctx(committablectx):
2407 """Like memctx but it's reusing the manifest of different commit.
2411 """Like memctx but it's reusing the manifest of different commit.
2408 Intended to be used by lightweight operations that are creating
2412 Intended to be used by lightweight operations that are creating
2409 metadata-only changes.
2413 metadata-only changes.
2410
2414
2411 Revision information is supplied at initialization time. 'repo' is the
2415 Revision information is supplied at initialization time. 'repo' is the
2412 current localrepo, 'ctx' is original revision which manifest we're reuisng
2416 current localrepo, 'ctx' is original revision which manifest we're reuisng
2413 'parents' is a sequence of two parent revisions identifiers (pass None for
2417 'parents' is a sequence of two parent revisions identifiers (pass None for
2414 every missing parent), 'text' is the commit.
2418 every missing parent), 'text' is the commit.
2415
2419
2416 user receives the committer name and defaults to current repository
2420 user receives the committer name and defaults to current repository
2417 username, date is the commit date in any format supported by
2421 username, date is the commit date in any format supported by
2418 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2422 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2419 metadata or is left empty.
2423 metadata or is left empty.
2420 """
2424 """
2421 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2425 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2422 date=None, extra=None, editor=False):
2426 date=None, extra=None, editor=False):
2423 if text is None:
2427 if text is None:
2424 text = originalctx.description()
2428 text = originalctx.description()
2425 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2429 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2426 self._rev = None
2430 self._rev = None
2427 self._node = None
2431 self._node = None
2428 self._originalctx = originalctx
2432 self._originalctx = originalctx
2429 self._manifestnode = originalctx.manifestnode()
2433 self._manifestnode = originalctx.manifestnode()
2430 if parents is None:
2434 if parents is None:
2431 parents = originalctx.parents()
2435 parents = originalctx.parents()
2432 else:
2436 else:
2433 parents = [repo[p] for p in parents if p is not None]
2437 parents = [repo[p] for p in parents if p is not None]
2434 parents = parents[:]
2438 parents = parents[:]
2435 while len(parents) < 2:
2439 while len(parents) < 2:
2436 parents.append(repo[nullid])
2440 parents.append(repo[nullid])
2437 p1, p2 = self._parents = parents
2441 p1, p2 = self._parents = parents
2438
2442
2439 # sanity check to ensure that the reused manifest parents are
2443 # sanity check to ensure that the reused manifest parents are
2440 # manifests of our commit parents
2444 # manifests of our commit parents
2441 mp1, mp2 = self.manifestctx().parents
2445 mp1, mp2 = self.manifestctx().parents
2442 if p1 != nullid and p1.manifestnode() != mp1:
2446 if p1 != nullid and p1.manifestnode() != mp1:
2443 raise RuntimeError('can\'t reuse the manifest: '
2447 raise RuntimeError('can\'t reuse the manifest: '
2444 'its p1 doesn\'t match the new ctx p1')
2448 'its p1 doesn\'t match the new ctx p1')
2445 if p2 != nullid and p2.manifestnode() != mp2:
2449 if p2 != nullid and p2.manifestnode() != mp2:
2446 raise RuntimeError('can\'t reuse the manifest: '
2450 raise RuntimeError('can\'t reuse the manifest: '
2447 'its p2 doesn\'t match the new ctx p2')
2451 'its p2 doesn\'t match the new ctx p2')
2448
2452
2449 self._files = originalctx.files()
2453 self._files = originalctx.files()
2450 self.substate = {}
2454 self.substate = {}
2451
2455
2452 if editor:
2456 if editor:
2453 self._text = editor(self._repo, self, [])
2457 self._text = editor(self._repo, self, [])
2454 self._repo.savecommitmessage(self._text)
2458 self._repo.savecommitmessage(self._text)
2455
2459
2456 def manifestnode(self):
2460 def manifestnode(self):
2457 return self._manifestnode
2461 return self._manifestnode
2458
2462
2459 @property
2463 @property
2460 def _manifestctx(self):
2464 def _manifestctx(self):
2461 return self._repo.manifestlog[self._manifestnode]
2465 return self._repo.manifestlog[self._manifestnode]
2462
2466
2463 def filectx(self, path, filelog=None):
2467 def filectx(self, path, filelog=None):
2464 return self._originalctx.filectx(path, filelog=filelog)
2468 return self._originalctx.filectx(path, filelog=filelog)
2465
2469
2466 def commit(self):
2470 def commit(self):
2467 """commit context to the repo"""
2471 """commit context to the repo"""
2468 return self._repo.commitctx(self)
2472 return self._repo.commitctx(self)
2469
2473
2470 @property
2474 @property
2471 def _manifest(self):
2475 def _manifest(self):
2472 return self._originalctx.manifest()
2476 return self._originalctx.manifest()
2473
2477
2474 @propertycache
2478 @propertycache
2475 def _status(self):
2479 def _status(self):
2476 """Calculate exact status from ``files`` specified in the ``origctx``
2480 """Calculate exact status from ``files`` specified in the ``origctx``
2477 and parents manifests.
2481 and parents manifests.
2478 """
2482 """
2479 man1 = self.p1().manifest()
2483 man1 = self.p1().manifest()
2480 p2 = self._parents[1]
2484 p2 = self._parents[1]
2481 # "1 < len(self._parents)" can't be used for checking
2485 # "1 < len(self._parents)" can't be used for checking
2482 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2486 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2483 # explicitly initialized by the list, of which length is 2.
2487 # explicitly initialized by the list, of which length is 2.
2484 if p2.node() != nullid:
2488 if p2.node() != nullid:
2485 man2 = p2.manifest()
2489 man2 = p2.manifest()
2486 managing = lambda f: f in man1 or f in man2
2490 managing = lambda f: f in man1 or f in man2
2487 else:
2491 else:
2488 managing = lambda f: f in man1
2492 managing = lambda f: f in man1
2489
2493
2490 modified, added, removed = [], [], []
2494 modified, added, removed = [], [], []
2491 for f in self._files:
2495 for f in self._files:
2492 if not managing(f):
2496 if not managing(f):
2493 added.append(f)
2497 added.append(f)
2494 elif f in self:
2498 elif f in self:
2495 modified.append(f)
2499 modified.append(f)
2496 else:
2500 else:
2497 removed.append(f)
2501 removed.append(f)
2498
2502
2499 return scmutil.status(modified, added, removed, [], [], [], [])
2503 return scmutil.status(modified, added, removed, [], [], [], [])
2500
2504
2501 class arbitraryfilectx(object):
2505 class arbitraryfilectx(object):
2502 """Allows you to use filectx-like functions on a file in an arbitrary
2506 """Allows you to use filectx-like functions on a file in an arbitrary
2503 location on disk, possibly not in the working directory.
2507 location on disk, possibly not in the working directory.
2504 """
2508 """
2505 def __init__(self, path, repo=None):
2509 def __init__(self, path, repo=None):
2506 # Repo is optional because contrib/simplemerge uses this class.
2510 # Repo is optional because contrib/simplemerge uses this class.
2507 self._repo = repo
2511 self._repo = repo
2508 self._path = path
2512 self._path = path
2509
2513
2510 def cmp(self, fctx):
2514 def cmp(self, fctx):
2511 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2515 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2512 # path if either side is a symlink.
2516 # path if either side is a symlink.
2513 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2517 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2514 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2518 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2515 # Add a fast-path for merge if both sides are disk-backed.
2519 # Add a fast-path for merge if both sides are disk-backed.
2516 # Note that filecmp uses the opposite return values (True if same)
2520 # Note that filecmp uses the opposite return values (True if same)
2517 # from our cmp functions (True if different).
2521 # from our cmp functions (True if different).
2518 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2522 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2519 return self.data() != fctx.data()
2523 return self.data() != fctx.data()
2520
2524
2521 def path(self):
2525 def path(self):
2522 return self._path
2526 return self._path
2523
2527
2524 def flags(self):
2528 def flags(self):
2525 return ''
2529 return ''
2526
2530
2527 def data(self):
2531 def data(self):
2528 return util.readfile(self._path)
2532 return util.readfile(self._path)
2529
2533
2530 def decodeddata(self):
2534 def decodeddata(self):
2531 with open(self._path, "rb") as f:
2535 with open(self._path, "rb") as f:
2532 return f.read()
2536 return f.read()
2533
2537
2534 def remove(self):
2538 def remove(self):
2535 util.unlink(self._path)
2539 util.unlink(self._path)
2536
2540
2537 def write(self, data, flags, **kwargs):
2541 def write(self, data, flags, **kwargs):
2538 assert not flags
2542 assert not flags
2539 with open(self._path, "w") as f:
2543 with open(self._path, "w") as f:
2540 f.write(data)
2544 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now