##// END OF EJS Templates
context: remove unwanted assignments in basectx.__new__() (API)...
Martin von Zweigbergk -
r37188:d7f3fdab default
parent child Browse files
Show More
@@ -1,2617 +1,2611 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 obsutil,
36 obsutil,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 repoview,
41 repoview,
42 revlog,
42 revlog,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56 nonascii = re.compile(br'[^\x21-\x7f]').search
56 nonascii = re.compile(br'[^\x21-\x7f]').search
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65 def __new__(cls, repo, changeid='', *args, **kwargs):
65 def __new__(cls, repo, changeid='', *args, **kwargs):
66 if isinstance(changeid, basectx):
66 if isinstance(changeid, basectx):
67 return changeid
67 return changeid
68
68
69 o = super(basectx, cls).__new__(cls)
69 return super(basectx, cls).__new__(cls)
70
71 o._repo = repo
72 o._rev = nullrev
73 o._node = nullid
74
75 return o
76
70
77 def __bytes__(self):
71 def __bytes__(self):
78 return short(self.node())
72 return short(self.node())
79
73
80 __str__ = encoding.strmethod(__bytes__)
74 __str__ = encoding.strmethod(__bytes__)
81
75
82 def __repr__(self):
76 def __repr__(self):
83 return r"<%s %s>" % (type(self).__name__, str(self))
77 return r"<%s %s>" % (type(self).__name__, str(self))
84
78
85 def __eq__(self, other):
79 def __eq__(self, other):
86 try:
80 try:
87 return type(self) == type(other) and self._rev == other._rev
81 return type(self) == type(other) and self._rev == other._rev
88 except AttributeError:
82 except AttributeError:
89 return False
83 return False
90
84
91 def __ne__(self, other):
85 def __ne__(self, other):
92 return not (self == other)
86 return not (self == other)
93
87
94 def __contains__(self, key):
88 def __contains__(self, key):
95 return key in self._manifest
89 return key in self._manifest
96
90
97 def __getitem__(self, key):
91 def __getitem__(self, key):
98 return self.filectx(key)
92 return self.filectx(key)
99
93
100 def __iter__(self):
94 def __iter__(self):
101 return iter(self._manifest)
95 return iter(self._manifest)
102
96
103 def _buildstatusmanifest(self, status):
97 def _buildstatusmanifest(self, status):
104 """Builds a manifest that includes the given status results, if this is
98 """Builds a manifest that includes the given status results, if this is
105 a working copy context. For non-working copy contexts, it just returns
99 a working copy context. For non-working copy contexts, it just returns
106 the normal manifest."""
100 the normal manifest."""
107 return self.manifest()
101 return self.manifest()
108
102
109 def _matchstatus(self, other, match):
103 def _matchstatus(self, other, match):
110 """This internal method provides a way for child objects to override the
104 """This internal method provides a way for child objects to override the
111 match operator.
105 match operator.
112 """
106 """
113 return match
107 return match
114
108
115 def _buildstatus(self, other, s, match, listignored, listclean,
109 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
110 listunknown):
117 """build a status with respect to another context"""
111 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
112 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
117 # delta application.
124 mf2 = None
118 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
119 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
121 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
122 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
123 mf2 = self._buildstatusmanifest(s)
130
124
131 modified, added = [], []
125 modified, added = [], []
132 removed = []
126 removed = []
133 clean = []
127 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
129 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
130 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
131 for fn, value in d.iteritems():
138 if fn in deletedset:
132 if fn in deletedset:
139 continue
133 continue
140 if value is None:
134 if value is None:
141 clean.append(fn)
135 clean.append(fn)
142 continue
136 continue
143 (node1, flag1), (node2, flag2) = value
137 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
138 if node1 is None:
145 added.append(fn)
139 added.append(fn)
146 elif node2 is None:
140 elif node2 is None:
147 removed.append(fn)
141 removed.append(fn)
148 elif flag1 != flag2:
142 elif flag1 != flag2:
149 modified.append(fn)
143 modified.append(fn)
150 elif node2 not in wdirnodes:
144 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
145 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
146 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
147 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
148 # to a file as a modification.
155 modified.append(fn)
149 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
150 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
151 modified.append(fn)
158 else:
152 else:
159 clean.append(fn)
153 clean.append(fn)
160
154
161 if removed:
155 if removed:
162 # need to filter files if they are already reported as removed
156 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
157 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
158 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
159 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
160 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
169
163
170 return scmutil.status(modified, added, removed, deleted, unknown,
164 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
165 ignored, clean)
172
166
173 @propertycache
167 @propertycache
174 def substate(self):
168 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
169 return subrepoutil.state(self, self._repo.ui)
176
170
177 def subrev(self, subpath):
171 def subrev(self, subpath):
178 return self.substate[subpath][1]
172 return self.substate[subpath][1]
179
173
180 def rev(self):
174 def rev(self):
181 return self._rev
175 return self._rev
182 def node(self):
176 def node(self):
183 return self._node
177 return self._node
184 def hex(self):
178 def hex(self):
185 return hex(self.node())
179 return hex(self.node())
186 def manifest(self):
180 def manifest(self):
187 return self._manifest
181 return self._manifest
188 def manifestctx(self):
182 def manifestctx(self):
189 return self._manifestctx
183 return self._manifestctx
190 def repo(self):
184 def repo(self):
191 return self._repo
185 return self._repo
192 def phasestr(self):
186 def phasestr(self):
193 return phases.phasenames[self.phase()]
187 return phases.phasenames[self.phase()]
194 def mutable(self):
188 def mutable(self):
195 return self.phase() > phases.public
189 return self.phase() > phases.public
196
190
197 def getfileset(self, expr):
191 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
192 return fileset.getfileset(self, expr)
199
193
200 def obsolete(self):
194 def obsolete(self):
201 """True if the changeset is obsolete"""
195 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
196 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
197
204 def extinct(self):
198 def extinct(self):
205 """True if the changeset is extinct"""
199 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
200 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
201
208 def orphan(self):
202 def orphan(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
203 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
204 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
211
205
212 def phasedivergent(self):
206 def phasedivergent(self):
213 """True if the changeset try to be a successor of a public changeset
207 """True if the changeset try to be a successor of a public changeset
214
208
215 Only non-public and non-obsolete changesets may be bumped.
209 Only non-public and non-obsolete changesets may be bumped.
216 """
210 """
217 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
218
212
219 def contentdivergent(self):
213 def contentdivergent(self):
220 """Is a successors of a changeset with multiple possible successors set
214 """Is a successors of a changeset with multiple possible successors set
221
215
222 Only non-public and non-obsolete changesets may be divergent.
216 Only non-public and non-obsolete changesets may be divergent.
223 """
217 """
224 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
218 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
225
219
226 def isunstable(self):
220 def isunstable(self):
227 """True if the changeset is either unstable, bumped or divergent"""
221 """True if the changeset is either unstable, bumped or divergent"""
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
222 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229
223
230 def instabilities(self):
224 def instabilities(self):
231 """return the list of instabilities affecting this changeset.
225 """return the list of instabilities affecting this changeset.
232
226
233 Instabilities are returned as strings. possible values are:
227 Instabilities are returned as strings. possible values are:
234 - orphan,
228 - orphan,
235 - phase-divergent,
229 - phase-divergent,
236 - content-divergent.
230 - content-divergent.
237 """
231 """
238 instabilities = []
232 instabilities = []
239 if self.orphan():
233 if self.orphan():
240 instabilities.append('orphan')
234 instabilities.append('orphan')
241 if self.phasedivergent():
235 if self.phasedivergent():
242 instabilities.append('phase-divergent')
236 instabilities.append('phase-divergent')
243 if self.contentdivergent():
237 if self.contentdivergent():
244 instabilities.append('content-divergent')
238 instabilities.append('content-divergent')
245 return instabilities
239 return instabilities
246
240
247 def parents(self):
241 def parents(self):
248 """return contexts for each parent changeset"""
242 """return contexts for each parent changeset"""
249 return self._parents
243 return self._parents
250
244
251 def p1(self):
245 def p1(self):
252 return self._parents[0]
246 return self._parents[0]
253
247
254 def p2(self):
248 def p2(self):
255 parents = self._parents
249 parents = self._parents
256 if len(parents) == 2:
250 if len(parents) == 2:
257 return parents[1]
251 return parents[1]
258 return changectx(self._repo, nullrev)
252 return changectx(self._repo, nullrev)
259
253
260 def _fileinfo(self, path):
254 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
255 if r'_manifest' in self.__dict__:
262 try:
256 try:
263 return self._manifest[path], self._manifest.flags(path)
257 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
258 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
259 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
260 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
261 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
262 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
263 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
264 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
265 mfl = self._repo.manifestlog
272 try:
266 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
267 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
268 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
269 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
270 _('not found in manifest'))
277
271
278 return node, flag
272 return node, flag
279
273
280 def filenode(self, path):
274 def filenode(self, path):
281 return self._fileinfo(path)[0]
275 return self._fileinfo(path)[0]
282
276
283 def flags(self, path):
277 def flags(self, path):
284 try:
278 try:
285 return self._fileinfo(path)[1]
279 return self._fileinfo(path)[1]
286 except error.LookupError:
280 except error.LookupError:
287 return ''
281 return ''
288
282
289 def sub(self, path, allowcreate=True):
283 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
284 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
285 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
286
293 def nullsub(self, path, pctx):
287 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
288 return subrepo.nullsubrepo(self, path, pctx)
295
289
296 def workingsub(self, path):
290 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
291 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
292 context.
299 '''
293 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
294 return subrepo.subrepo(self, path, allowwdir=True)
301
295
302 def match(self, pats=None, include=None, exclude=None, default='glob',
296 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
297 listsubrepos=False, badfn=None):
304 r = self._repo
298 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
299 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
300 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
301 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
302 listsubrepos=listsubrepos, badfn=badfn)
309
303
310 def diff(self, ctx2=None, match=None, **opts):
304 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
305 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
306 if ctx2 is None:
313 ctx2 = self.p1()
307 ctx2 = self.p1()
314 if ctx2 is not None:
308 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
309 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
310 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
311 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
312
319 def dirs(self):
313 def dirs(self):
320 return self._manifest.dirs()
314 return self._manifest.dirs()
321
315
322 def hasdir(self, dir):
316 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
317 return self._manifest.hasdir(dir)
324
318
325 def status(self, other=None, match=None, listignored=False,
319 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
320 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
321 """return status of files between two nodes or node and working
328 directory.
322 directory.
329
323
330 If other is None, compare this node with working directory.
324 If other is None, compare this node with working directory.
331
325
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
327 """
334
328
335 ctx1 = self
329 ctx1 = self
336 ctx2 = self._repo[other]
330 ctx2 = self._repo[other]
337
331
338 # This next code block is, admittedly, fragile logic that tests for
332 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
333 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
334 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
335 # with its first parent.
342 #
336 #
343 # What we're aiming for here is the ability to call:
337 # What we're aiming for here is the ability to call:
344 #
338 #
345 # workingctx.status(parentctx)
339 # workingctx.status(parentctx)
346 #
340 #
347 # If we always built the manifest for each context and compared those,
341 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
342 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
343 # just copy the manifest of the parent.
350 reversed = False
344 reversed = False
351 if (not isinstance(ctx1, changectx)
345 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
346 and isinstance(ctx2, changectx)):
353 reversed = True
347 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
348 ctx1, ctx2 = ctx2, ctx1
355
349
356 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
350 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
357 match = ctx2._matchstatus(ctx1, match)
351 match = ctx2._matchstatus(ctx1, match)
358 r = scmutil.status([], [], [], [], [], [], [])
352 r = scmutil.status([], [], [], [], [], [], [])
359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 listunknown)
354 listunknown)
361
355
362 if reversed:
356 if reversed:
363 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # Reverse added and removed. Clear deleted, unknown and ignored as
364 # these make no sense to reverse.
358 # these make no sense to reverse.
365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 r.clean)
360 r.clean)
367
361
368 if listsubrepos:
362 if listsubrepos:
369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 try:
364 try:
371 rev2 = ctx2.subrev(subpath)
365 rev2 = ctx2.subrev(subpath)
372 except KeyError:
366 except KeyError:
373 # A subrepo that existed in node1 was deleted between
367 # A subrepo that existed in node1 was deleted between
374 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # node1 and node2 (inclusive). Thus, ctx2's substate
375 # won't contain that subpath. The best we can do ignore it.
369 # won't contain that subpath. The best we can do ignore it.
376 rev2 = None
370 rev2 = None
377 submatch = matchmod.subdirmatcher(subpath, match)
371 submatch = matchmod.subdirmatcher(subpath, match)
378 s = sub.status(rev2, match=submatch, ignored=listignored,
372 s = sub.status(rev2, match=submatch, ignored=listignored,
379 clean=listclean, unknown=listunknown,
373 clean=listclean, unknown=listunknown,
380 listsubrepos=True)
374 listsubrepos=True)
381 for rfiles, sfiles in zip(r, s):
375 for rfiles, sfiles in zip(r, s):
382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383
377
384 for l in r:
378 for l in r:
385 l.sort()
379 l.sort()
386
380
387 return r
381 return r
388
382
389 def _filterederror(repo, changeid):
383 def _filterederror(repo, changeid):
390 """build an exception to be raised about a filtered changeid
384 """build an exception to be raised about a filtered changeid
391
385
392 This is extracted in a function to help extensions (eg: evolve) to
386 This is extracted in a function to help extensions (eg: evolve) to
393 experiment with various message variants."""
387 experiment with various message variants."""
394 if repo.filtername.startswith('visible'):
388 if repo.filtername.startswith('visible'):
395
389
396 # Check if the changeset is obsolete
390 # Check if the changeset is obsolete
397 unfilteredrepo = repo.unfiltered()
391 unfilteredrepo = repo.unfiltered()
398 ctx = unfilteredrepo[changeid]
392 ctx = unfilteredrepo[changeid]
399
393
400 # If the changeset is obsolete, enrich the message with the reason
394 # If the changeset is obsolete, enrich the message with the reason
401 # that made this changeset not visible
395 # that made this changeset not visible
402 if ctx.obsolete():
396 if ctx.obsolete():
403 msg = obsutil._getfilteredreason(repo, changeid, ctx)
397 msg = obsutil._getfilteredreason(repo, changeid, ctx)
404 else:
398 else:
405 msg = _("hidden revision '%s'") % changeid
399 msg = _("hidden revision '%s'") % changeid
406
400
407 hint = _('use --hidden to access hidden revisions')
401 hint = _('use --hidden to access hidden revisions')
408
402
409 return error.FilteredRepoLookupError(msg, hint=hint)
403 return error.FilteredRepoLookupError(msg, hint=hint)
410 msg = _("filtered revision '%s' (not in '%s' subset)")
404 msg = _("filtered revision '%s' (not in '%s' subset)")
411 msg %= (changeid, repo.filtername)
405 msg %= (changeid, repo.filtername)
412 return error.FilteredRepoLookupError(msg)
406 return error.FilteredRepoLookupError(msg)
413
407
414 class changectx(basectx):
408 class changectx(basectx):
415 """A changecontext object makes access to data related to a particular
409 """A changecontext object makes access to data related to a particular
416 changeset convenient. It represents a read-only context already present in
410 changeset convenient. It represents a read-only context already present in
417 the repo."""
411 the repo."""
418 def __init__(self, repo, changeid='.'):
412 def __init__(self, repo, changeid='.'):
419 """changeid is a revision number, node, or tag"""
413 """changeid is a revision number, node, or tag"""
420
414
421 # since basectx.__new__ already took care of copying the object, we
415 # since basectx.__new__ already took care of copying the object, we
422 # don't need to do anything in __init__, so we just exit here
416 # don't need to do anything in __init__, so we just exit here
423 if isinstance(changeid, basectx):
417 if isinstance(changeid, basectx):
424 return
418 return
425
419
426 if changeid == '':
420 if changeid == '':
427 changeid = '.'
421 changeid = '.'
428 self._repo = repo
422 self._repo = repo
429
423
430 try:
424 try:
431 if isinstance(changeid, int):
425 if isinstance(changeid, int):
432 self._node = repo.changelog.node(changeid)
426 self._node = repo.changelog.node(changeid)
433 self._rev = changeid
427 self._rev = changeid
434 return
428 return
435 if not pycompat.ispy3 and isinstance(changeid, long):
429 if not pycompat.ispy3 and isinstance(changeid, long):
436 changeid = "%d" % changeid
430 changeid = "%d" % changeid
437 if changeid == 'null':
431 if changeid == 'null':
438 self._node = nullid
432 self._node = nullid
439 self._rev = nullrev
433 self._rev = nullrev
440 return
434 return
441 if changeid == 'tip':
435 if changeid == 'tip':
442 self._node = repo.changelog.tip()
436 self._node = repo.changelog.tip()
443 self._rev = repo.changelog.rev(self._node)
437 self._rev = repo.changelog.rev(self._node)
444 return
438 return
445 if (changeid == '.'
439 if (changeid == '.'
446 or repo.local() and changeid == repo.dirstate.p1()):
440 or repo.local() and changeid == repo.dirstate.p1()):
447 # this is a hack to delay/avoid loading obsmarkers
441 # this is a hack to delay/avoid loading obsmarkers
448 # when we know that '.' won't be hidden
442 # when we know that '.' won't be hidden
449 self._node = repo.dirstate.p1()
443 self._node = repo.dirstate.p1()
450 self._rev = repo.unfiltered().changelog.rev(self._node)
444 self._rev = repo.unfiltered().changelog.rev(self._node)
451 return
445 return
452 if len(changeid) == 20:
446 if len(changeid) == 20:
453 try:
447 try:
454 self._node = changeid
448 self._node = changeid
455 self._rev = repo.changelog.rev(changeid)
449 self._rev = repo.changelog.rev(changeid)
456 return
450 return
457 except error.FilteredRepoLookupError:
451 except error.FilteredRepoLookupError:
458 raise
452 raise
459 except LookupError:
453 except LookupError:
460 pass
454 pass
461
455
462 try:
456 try:
463 r = int(changeid)
457 r = int(changeid)
464 if '%d' % r != changeid:
458 if '%d' % r != changeid:
465 raise ValueError
459 raise ValueError
466 l = len(repo.changelog)
460 l = len(repo.changelog)
467 if r < 0:
461 if r < 0:
468 r += l
462 r += l
469 if r < 0 or r >= l and r != wdirrev:
463 if r < 0 or r >= l and r != wdirrev:
470 raise ValueError
464 raise ValueError
471 self._rev = r
465 self._rev = r
472 self._node = repo.changelog.node(r)
466 self._node = repo.changelog.node(r)
473 return
467 return
474 except error.FilteredIndexError:
468 except error.FilteredIndexError:
475 raise
469 raise
476 except (ValueError, OverflowError, IndexError):
470 except (ValueError, OverflowError, IndexError):
477 pass
471 pass
478
472
479 if len(changeid) == 40:
473 if len(changeid) == 40:
480 try:
474 try:
481 self._node = bin(changeid)
475 self._node = bin(changeid)
482 self._rev = repo.changelog.rev(self._node)
476 self._rev = repo.changelog.rev(self._node)
483 return
477 return
484 except error.FilteredLookupError:
478 except error.FilteredLookupError:
485 raise
479 raise
486 except (TypeError, LookupError):
480 except (TypeError, LookupError):
487 pass
481 pass
488
482
489 # lookup bookmarks through the name interface
483 # lookup bookmarks through the name interface
490 try:
484 try:
491 self._node = repo.names.singlenode(repo, changeid)
485 self._node = repo.names.singlenode(repo, changeid)
492 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
493 return
487 return
494 except KeyError:
488 except KeyError:
495 pass
489 pass
496 except error.FilteredRepoLookupError:
490 except error.FilteredRepoLookupError:
497 raise
491 raise
498 except error.RepoLookupError:
492 except error.RepoLookupError:
499 pass
493 pass
500
494
501 self._node = repo.unfiltered().changelog._partialmatch(changeid)
495 self._node = repo.unfiltered().changelog._partialmatch(changeid)
502 if self._node is not None:
496 if self._node is not None:
503 self._rev = repo.changelog.rev(self._node)
497 self._rev = repo.changelog.rev(self._node)
504 return
498 return
505
499
506 # lookup failed
500 # lookup failed
507 # check if it might have come from damaged dirstate
501 # check if it might have come from damaged dirstate
508 #
502 #
509 # XXX we could avoid the unfiltered if we had a recognizable
503 # XXX we could avoid the unfiltered if we had a recognizable
510 # exception for filtered changeset access
504 # exception for filtered changeset access
511 if (repo.local()
505 if (repo.local()
512 and changeid in repo.unfiltered().dirstate.parents()):
506 and changeid in repo.unfiltered().dirstate.parents()):
513 msg = _("working directory has unknown parent '%s'!")
507 msg = _("working directory has unknown parent '%s'!")
514 raise error.Abort(msg % short(changeid))
508 raise error.Abort(msg % short(changeid))
515 try:
509 try:
516 if len(changeid) == 20 and nonascii(changeid):
510 if len(changeid) == 20 and nonascii(changeid):
517 changeid = hex(changeid)
511 changeid = hex(changeid)
518 except TypeError:
512 except TypeError:
519 pass
513 pass
520 except (error.FilteredIndexError, error.FilteredLookupError,
514 except (error.FilteredIndexError, error.FilteredLookupError,
521 error.FilteredRepoLookupError):
515 error.FilteredRepoLookupError):
522 raise _filterederror(repo, changeid)
516 raise _filterederror(repo, changeid)
523 except IndexError:
517 except IndexError:
524 pass
518 pass
525 raise error.RepoLookupError(
519 raise error.RepoLookupError(
526 _("unknown revision '%s'") % changeid)
520 _("unknown revision '%s'") % changeid)
527
521
528 def __hash__(self):
522 def __hash__(self):
529 try:
523 try:
530 return hash(self._rev)
524 return hash(self._rev)
531 except AttributeError:
525 except AttributeError:
532 return id(self)
526 return id(self)
533
527
534 def __nonzero__(self):
528 def __nonzero__(self):
535 return self._rev != nullrev
529 return self._rev != nullrev
536
530
537 __bool__ = __nonzero__
531 __bool__ = __nonzero__
538
532
539 @propertycache
533 @propertycache
540 def _changeset(self):
534 def _changeset(self):
541 return self._repo.changelog.changelogrevision(self.rev())
535 return self._repo.changelog.changelogrevision(self.rev())
542
536
543 @propertycache
537 @propertycache
544 def _manifest(self):
538 def _manifest(self):
545 return self._manifestctx.read()
539 return self._manifestctx.read()
546
540
547 @property
541 @property
548 def _manifestctx(self):
542 def _manifestctx(self):
549 return self._repo.manifestlog[self._changeset.manifest]
543 return self._repo.manifestlog[self._changeset.manifest]
550
544
551 @propertycache
545 @propertycache
552 def _manifestdelta(self):
546 def _manifestdelta(self):
553 return self._manifestctx.readdelta()
547 return self._manifestctx.readdelta()
554
548
555 @propertycache
549 @propertycache
556 def _parents(self):
550 def _parents(self):
557 repo = self._repo
551 repo = self._repo
558 p1, p2 = repo.changelog.parentrevs(self._rev)
552 p1, p2 = repo.changelog.parentrevs(self._rev)
559 if p2 == nullrev:
553 if p2 == nullrev:
560 return [changectx(repo, p1)]
554 return [changectx(repo, p1)]
561 return [changectx(repo, p1), changectx(repo, p2)]
555 return [changectx(repo, p1), changectx(repo, p2)]
562
556
563 def changeset(self):
557 def changeset(self):
564 c = self._changeset
558 c = self._changeset
565 return (
559 return (
566 c.manifest,
560 c.manifest,
567 c.user,
561 c.user,
568 c.date,
562 c.date,
569 c.files,
563 c.files,
570 c.description,
564 c.description,
571 c.extra,
565 c.extra,
572 )
566 )
573 def manifestnode(self):
567 def manifestnode(self):
574 return self._changeset.manifest
568 return self._changeset.manifest
575
569
576 def user(self):
570 def user(self):
577 return self._changeset.user
571 return self._changeset.user
578 def date(self):
572 def date(self):
579 return self._changeset.date
573 return self._changeset.date
580 def files(self):
574 def files(self):
581 return self._changeset.files
575 return self._changeset.files
582 def description(self):
576 def description(self):
583 return self._changeset.description
577 return self._changeset.description
584 def branch(self):
578 def branch(self):
585 return encoding.tolocal(self._changeset.extra.get("branch"))
579 return encoding.tolocal(self._changeset.extra.get("branch"))
586 def closesbranch(self):
580 def closesbranch(self):
587 return 'close' in self._changeset.extra
581 return 'close' in self._changeset.extra
588 def extra(self):
582 def extra(self):
589 """Return a dict of extra information."""
583 """Return a dict of extra information."""
590 return self._changeset.extra
584 return self._changeset.extra
591 def tags(self):
585 def tags(self):
592 """Return a list of byte tag names"""
586 """Return a list of byte tag names"""
593 return self._repo.nodetags(self._node)
587 return self._repo.nodetags(self._node)
594 def bookmarks(self):
588 def bookmarks(self):
595 """Return a list of byte bookmark names."""
589 """Return a list of byte bookmark names."""
596 return self._repo.nodebookmarks(self._node)
590 return self._repo.nodebookmarks(self._node)
597 def phase(self):
591 def phase(self):
598 return self._repo._phasecache.phase(self._repo, self._rev)
592 return self._repo._phasecache.phase(self._repo, self._rev)
599 def hidden(self):
593 def hidden(self):
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
594 return self._rev in repoview.filterrevs(self._repo, 'visible')
601
595
602 def isinmemory(self):
596 def isinmemory(self):
603 return False
597 return False
604
598
605 def children(self):
599 def children(self):
606 """return list of changectx contexts for each child changeset.
600 """return list of changectx contexts for each child changeset.
607
601
608 This returns only the immediate child changesets. Use descendants() to
602 This returns only the immediate child changesets. Use descendants() to
609 recursively walk children.
603 recursively walk children.
610 """
604 """
611 c = self._repo.changelog.children(self._node)
605 c = self._repo.changelog.children(self._node)
612 return [changectx(self._repo, x) for x in c]
606 return [changectx(self._repo, x) for x in c]
613
607
614 def ancestors(self):
608 def ancestors(self):
615 for a in self._repo.changelog.ancestors([self._rev]):
609 for a in self._repo.changelog.ancestors([self._rev]):
616 yield changectx(self._repo, a)
610 yield changectx(self._repo, a)
617
611
618 def descendants(self):
612 def descendants(self):
619 """Recursively yield all children of the changeset.
613 """Recursively yield all children of the changeset.
620
614
621 For just the immediate children, use children()
615 For just the immediate children, use children()
622 """
616 """
623 for d in self._repo.changelog.descendants([self._rev]):
617 for d in self._repo.changelog.descendants([self._rev]):
624 yield changectx(self._repo, d)
618 yield changectx(self._repo, d)
625
619
626 def filectx(self, path, fileid=None, filelog=None):
620 def filectx(self, path, fileid=None, filelog=None):
627 """get a file context from this changeset"""
621 """get a file context from this changeset"""
628 if fileid is None:
622 if fileid is None:
629 fileid = self.filenode(path)
623 fileid = self.filenode(path)
630 return filectx(self._repo, path, fileid=fileid,
624 return filectx(self._repo, path, fileid=fileid,
631 changectx=self, filelog=filelog)
625 changectx=self, filelog=filelog)
632
626
633 def ancestor(self, c2, warn=False):
627 def ancestor(self, c2, warn=False):
634 """return the "best" ancestor context of self and c2
628 """return the "best" ancestor context of self and c2
635
629
636 If there are multiple candidates, it will show a message and check
630 If there are multiple candidates, it will show a message and check
637 merge.preferancestor configuration before falling back to the
631 merge.preferancestor configuration before falling back to the
638 revlog ancestor."""
632 revlog ancestor."""
639 # deal with workingctxs
633 # deal with workingctxs
640 n2 = c2._node
634 n2 = c2._node
641 if n2 is None:
635 if n2 is None:
642 n2 = c2._parents[0]._node
636 n2 = c2._parents[0]._node
643 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
644 if not cahs:
638 if not cahs:
645 anc = nullid
639 anc = nullid
646 elif len(cahs) == 1:
640 elif len(cahs) == 1:
647 anc = cahs[0]
641 anc = cahs[0]
648 else:
642 else:
649 # experimental config: merge.preferancestor
643 # experimental config: merge.preferancestor
650 for r in self._repo.ui.configlist('merge', 'preferancestor'):
644 for r in self._repo.ui.configlist('merge', 'preferancestor'):
651 try:
645 try:
652 ctx = changectx(self._repo, r)
646 ctx = changectx(self._repo, r)
653 except error.RepoLookupError:
647 except error.RepoLookupError:
654 continue
648 continue
655 anc = ctx.node()
649 anc = ctx.node()
656 if anc in cahs:
650 if anc in cahs:
657 break
651 break
658 else:
652 else:
659 anc = self._repo.changelog.ancestor(self._node, n2)
653 anc = self._repo.changelog.ancestor(self._node, n2)
660 if warn:
654 if warn:
661 self._repo.ui.status(
655 self._repo.ui.status(
662 (_("note: using %s as ancestor of %s and %s\n") %
656 (_("note: using %s as ancestor of %s and %s\n") %
663 (short(anc), short(self._node), short(n2))) +
657 (short(anc), short(self._node), short(n2))) +
664 ''.join(_(" alternatively, use --config "
658 ''.join(_(" alternatively, use --config "
665 "merge.preferancestor=%s\n") %
659 "merge.preferancestor=%s\n") %
666 short(n) for n in sorted(cahs) if n != anc))
660 short(n) for n in sorted(cahs) if n != anc))
667 return changectx(self._repo, anc)
661 return changectx(self._repo, anc)
668
662
669 def descendant(self, other):
663 def descendant(self, other):
670 """True if other is descendant of this changeset"""
664 """True if other is descendant of this changeset"""
671 return self._repo.changelog.descendant(self._rev, other._rev)
665 return self._repo.changelog.descendant(self._rev, other._rev)
672
666
673 def walk(self, match):
667 def walk(self, match):
674 '''Generates matching file names.'''
668 '''Generates matching file names.'''
675
669
676 # Wrap match.bad method to have message with nodeid
670 # Wrap match.bad method to have message with nodeid
677 def bad(fn, msg):
671 def bad(fn, msg):
678 # The manifest doesn't know about subrepos, so don't complain about
672 # The manifest doesn't know about subrepos, so don't complain about
679 # paths into valid subrepos.
673 # paths into valid subrepos.
680 if any(fn == s or fn.startswith(s + '/')
674 if any(fn == s or fn.startswith(s + '/')
681 for s in self.substate):
675 for s in self.substate):
682 return
676 return
683 match.bad(fn, _('no such file in rev %s') % self)
677 match.bad(fn, _('no such file in rev %s') % self)
684
678
685 m = matchmod.badmatch(match, bad)
679 m = matchmod.badmatch(match, bad)
686 return self._manifest.walk(m)
680 return self._manifest.walk(m)
687
681
688 def matches(self, match):
682 def matches(self, match):
689 return self.walk(match)
683 return self.walk(match)
690
684
691 class basefilectx(object):
685 class basefilectx(object):
692 """A filecontext object represents the common logic for its children:
686 """A filecontext object represents the common logic for its children:
693 filectx: read-only access to a filerevision that is already present
687 filectx: read-only access to a filerevision that is already present
694 in the repo,
688 in the repo,
695 workingfilectx: a filecontext that represents files from the working
689 workingfilectx: a filecontext that represents files from the working
696 directory,
690 directory,
697 memfilectx: a filecontext that represents files in-memory,
691 memfilectx: a filecontext that represents files in-memory,
698 overlayfilectx: duplicate another filecontext with some fields overridden.
692 overlayfilectx: duplicate another filecontext with some fields overridden.
699 """
693 """
700 @propertycache
694 @propertycache
701 def _filelog(self):
695 def _filelog(self):
702 return self._repo.file(self._path)
696 return self._repo.file(self._path)
703
697
704 @propertycache
698 @propertycache
705 def _changeid(self):
699 def _changeid(self):
706 if r'_changeid' in self.__dict__:
700 if r'_changeid' in self.__dict__:
707 return self._changeid
701 return self._changeid
708 elif r'_changectx' in self.__dict__:
702 elif r'_changectx' in self.__dict__:
709 return self._changectx.rev()
703 return self._changectx.rev()
710 elif r'_descendantrev' in self.__dict__:
704 elif r'_descendantrev' in self.__dict__:
711 # this file context was created from a revision with a known
705 # this file context was created from a revision with a known
712 # descendant, we can (lazily) correct for linkrev aliases
706 # descendant, we can (lazily) correct for linkrev aliases
713 return self._adjustlinkrev(self._descendantrev)
707 return self._adjustlinkrev(self._descendantrev)
714 else:
708 else:
715 return self._filelog.linkrev(self._filerev)
709 return self._filelog.linkrev(self._filerev)
716
710
717 @propertycache
711 @propertycache
718 def _filenode(self):
712 def _filenode(self):
719 if r'_fileid' in self.__dict__:
713 if r'_fileid' in self.__dict__:
720 return self._filelog.lookup(self._fileid)
714 return self._filelog.lookup(self._fileid)
721 else:
715 else:
722 return self._changectx.filenode(self._path)
716 return self._changectx.filenode(self._path)
723
717
724 @propertycache
718 @propertycache
725 def _filerev(self):
719 def _filerev(self):
726 return self._filelog.rev(self._filenode)
720 return self._filelog.rev(self._filenode)
727
721
728 @propertycache
722 @propertycache
729 def _repopath(self):
723 def _repopath(self):
730 return self._path
724 return self._path
731
725
732 def __nonzero__(self):
726 def __nonzero__(self):
733 try:
727 try:
734 self._filenode
728 self._filenode
735 return True
729 return True
736 except error.LookupError:
730 except error.LookupError:
737 # file is missing
731 # file is missing
738 return False
732 return False
739
733
740 __bool__ = __nonzero__
734 __bool__ = __nonzero__
741
735
742 def __bytes__(self):
736 def __bytes__(self):
743 try:
737 try:
744 return "%s@%s" % (self.path(), self._changectx)
738 return "%s@%s" % (self.path(), self._changectx)
745 except error.LookupError:
739 except error.LookupError:
746 return "%s@???" % self.path()
740 return "%s@???" % self.path()
747
741
748 __str__ = encoding.strmethod(__bytes__)
742 __str__ = encoding.strmethod(__bytes__)
749
743
750 def __repr__(self):
744 def __repr__(self):
751 return r"<%s %s>" % (type(self).__name__, str(self))
745 return r"<%s %s>" % (type(self).__name__, str(self))
752
746
753 def __hash__(self):
747 def __hash__(self):
754 try:
748 try:
755 return hash((self._path, self._filenode))
749 return hash((self._path, self._filenode))
756 except AttributeError:
750 except AttributeError:
757 return id(self)
751 return id(self)
758
752
759 def __eq__(self, other):
753 def __eq__(self, other):
760 try:
754 try:
761 return (type(self) == type(other) and self._path == other._path
755 return (type(self) == type(other) and self._path == other._path
762 and self._filenode == other._filenode)
756 and self._filenode == other._filenode)
763 except AttributeError:
757 except AttributeError:
764 return False
758 return False
765
759
766 def __ne__(self, other):
760 def __ne__(self, other):
767 return not (self == other)
761 return not (self == other)
768
762
769 def filerev(self):
763 def filerev(self):
770 return self._filerev
764 return self._filerev
771 def filenode(self):
765 def filenode(self):
772 return self._filenode
766 return self._filenode
773 @propertycache
767 @propertycache
774 def _flags(self):
768 def _flags(self):
775 return self._changectx.flags(self._path)
769 return self._changectx.flags(self._path)
776 def flags(self):
770 def flags(self):
777 return self._flags
771 return self._flags
778 def filelog(self):
772 def filelog(self):
779 return self._filelog
773 return self._filelog
780 def rev(self):
774 def rev(self):
781 return self._changeid
775 return self._changeid
782 def linkrev(self):
776 def linkrev(self):
783 return self._filelog.linkrev(self._filerev)
777 return self._filelog.linkrev(self._filerev)
784 def node(self):
778 def node(self):
785 return self._changectx.node()
779 return self._changectx.node()
786 def hex(self):
780 def hex(self):
787 return self._changectx.hex()
781 return self._changectx.hex()
788 def user(self):
782 def user(self):
789 return self._changectx.user()
783 return self._changectx.user()
790 def date(self):
784 def date(self):
791 return self._changectx.date()
785 return self._changectx.date()
792 def files(self):
786 def files(self):
793 return self._changectx.files()
787 return self._changectx.files()
794 def description(self):
788 def description(self):
795 return self._changectx.description()
789 return self._changectx.description()
796 def branch(self):
790 def branch(self):
797 return self._changectx.branch()
791 return self._changectx.branch()
798 def extra(self):
792 def extra(self):
799 return self._changectx.extra()
793 return self._changectx.extra()
800 def phase(self):
794 def phase(self):
801 return self._changectx.phase()
795 return self._changectx.phase()
802 def phasestr(self):
796 def phasestr(self):
803 return self._changectx.phasestr()
797 return self._changectx.phasestr()
804 def obsolete(self):
798 def obsolete(self):
805 return self._changectx.obsolete()
799 return self._changectx.obsolete()
806 def instabilities(self):
800 def instabilities(self):
807 return self._changectx.instabilities()
801 return self._changectx.instabilities()
808 def manifest(self):
802 def manifest(self):
809 return self._changectx.manifest()
803 return self._changectx.manifest()
810 def changectx(self):
804 def changectx(self):
811 return self._changectx
805 return self._changectx
812 def renamed(self):
806 def renamed(self):
813 return self._copied
807 return self._copied
814 def repo(self):
808 def repo(self):
815 return self._repo
809 return self._repo
816 def size(self):
810 def size(self):
817 return len(self.data())
811 return len(self.data())
818
812
819 def path(self):
813 def path(self):
820 return self._path
814 return self._path
821
815
822 def isbinary(self):
816 def isbinary(self):
823 try:
817 try:
824 return stringutil.binary(self.data())
818 return stringutil.binary(self.data())
825 except IOError:
819 except IOError:
826 return False
820 return False
827 def isexec(self):
821 def isexec(self):
828 return 'x' in self.flags()
822 return 'x' in self.flags()
829 def islink(self):
823 def islink(self):
830 return 'l' in self.flags()
824 return 'l' in self.flags()
831
825
832 def isabsent(self):
826 def isabsent(self):
833 """whether this filectx represents a file not in self._changectx
827 """whether this filectx represents a file not in self._changectx
834
828
835 This is mainly for merge code to detect change/delete conflicts. This is
829 This is mainly for merge code to detect change/delete conflicts. This is
836 expected to be True for all subclasses of basectx."""
830 expected to be True for all subclasses of basectx."""
837 return False
831 return False
838
832
839 _customcmp = False
833 _customcmp = False
840 def cmp(self, fctx):
834 def cmp(self, fctx):
841 """compare with other file context
835 """compare with other file context
842
836
843 returns True if different than fctx.
837 returns True if different than fctx.
844 """
838 """
845 if fctx._customcmp:
839 if fctx._customcmp:
846 return fctx.cmp(self)
840 return fctx.cmp(self)
847
841
848 if (fctx._filenode is None
842 if (fctx._filenode is None
849 and (self._repo._encodefilterpats
843 and (self._repo._encodefilterpats
850 # if file data starts with '\1\n', empty metadata block is
844 # if file data starts with '\1\n', empty metadata block is
851 # prepended, which adds 4 bytes to filelog.size().
845 # prepended, which adds 4 bytes to filelog.size().
852 or self.size() - 4 == fctx.size())
846 or self.size() - 4 == fctx.size())
853 or self.size() == fctx.size()):
847 or self.size() == fctx.size()):
854 return self._filelog.cmp(self._filenode, fctx.data())
848 return self._filelog.cmp(self._filenode, fctx.data())
855
849
856 return True
850 return True
857
851
858 def _adjustlinkrev(self, srcrev, inclusive=False):
852 def _adjustlinkrev(self, srcrev, inclusive=False):
859 """return the first ancestor of <srcrev> introducing <fnode>
853 """return the first ancestor of <srcrev> introducing <fnode>
860
854
861 If the linkrev of the file revision does not point to an ancestor of
855 If the linkrev of the file revision does not point to an ancestor of
862 srcrev, we'll walk down the ancestors until we find one introducing
856 srcrev, we'll walk down the ancestors until we find one introducing
863 this file revision.
857 this file revision.
864
858
865 :srcrev: the changeset revision we search ancestors from
859 :srcrev: the changeset revision we search ancestors from
866 :inclusive: if true, the src revision will also be checked
860 :inclusive: if true, the src revision will also be checked
867 """
861 """
868 repo = self._repo
862 repo = self._repo
869 cl = repo.unfiltered().changelog
863 cl = repo.unfiltered().changelog
870 mfl = repo.manifestlog
864 mfl = repo.manifestlog
871 # fetch the linkrev
865 # fetch the linkrev
872 lkr = self.linkrev()
866 lkr = self.linkrev()
873 # hack to reuse ancestor computation when searching for renames
867 # hack to reuse ancestor computation when searching for renames
874 memberanc = getattr(self, '_ancestrycontext', None)
868 memberanc = getattr(self, '_ancestrycontext', None)
875 iteranc = None
869 iteranc = None
876 if srcrev is None:
870 if srcrev is None:
877 # wctx case, used by workingfilectx during mergecopy
871 # wctx case, used by workingfilectx during mergecopy
878 revs = [p.rev() for p in self._repo[None].parents()]
872 revs = [p.rev() for p in self._repo[None].parents()]
879 inclusive = True # we skipped the real (revless) source
873 inclusive = True # we skipped the real (revless) source
880 else:
874 else:
881 revs = [srcrev]
875 revs = [srcrev]
882 if memberanc is None:
876 if memberanc is None:
883 memberanc = iteranc = cl.ancestors(revs, lkr,
877 memberanc = iteranc = cl.ancestors(revs, lkr,
884 inclusive=inclusive)
878 inclusive=inclusive)
885 # check if this linkrev is an ancestor of srcrev
879 # check if this linkrev is an ancestor of srcrev
886 if lkr not in memberanc:
880 if lkr not in memberanc:
887 if iteranc is None:
881 if iteranc is None:
888 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
882 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
889 fnode = self._filenode
883 fnode = self._filenode
890 path = self._path
884 path = self._path
891 for a in iteranc:
885 for a in iteranc:
892 ac = cl.read(a) # get changeset data (we avoid object creation)
886 ac = cl.read(a) # get changeset data (we avoid object creation)
893 if path in ac[3]: # checking the 'files' field.
887 if path in ac[3]: # checking the 'files' field.
894 # The file has been touched, check if the content is
888 # The file has been touched, check if the content is
895 # similar to the one we search for.
889 # similar to the one we search for.
896 if fnode == mfl[ac[0]].readfast().get(path):
890 if fnode == mfl[ac[0]].readfast().get(path):
897 return a
891 return a
898 # In theory, we should never get out of that loop without a result.
892 # In theory, we should never get out of that loop without a result.
899 # But if manifest uses a buggy file revision (not children of the
893 # But if manifest uses a buggy file revision (not children of the
900 # one it replaces) we could. Such a buggy situation will likely
894 # one it replaces) we could. Such a buggy situation will likely
901 # result is crash somewhere else at to some point.
895 # result is crash somewhere else at to some point.
902 return lkr
896 return lkr
903
897
904 def introrev(self):
898 def introrev(self):
905 """return the rev of the changeset which introduced this file revision
899 """return the rev of the changeset which introduced this file revision
906
900
907 This method is different from linkrev because it take into account the
901 This method is different from linkrev because it take into account the
908 changeset the filectx was created from. It ensures the returned
902 changeset the filectx was created from. It ensures the returned
909 revision is one of its ancestors. This prevents bugs from
903 revision is one of its ancestors. This prevents bugs from
910 'linkrev-shadowing' when a file revision is used by multiple
904 'linkrev-shadowing' when a file revision is used by multiple
911 changesets.
905 changesets.
912 """
906 """
913 lkr = self.linkrev()
907 lkr = self.linkrev()
914 attrs = vars(self)
908 attrs = vars(self)
915 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
909 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
916 if noctx or self.rev() == lkr:
910 if noctx or self.rev() == lkr:
917 return self.linkrev()
911 return self.linkrev()
918 return self._adjustlinkrev(self.rev(), inclusive=True)
912 return self._adjustlinkrev(self.rev(), inclusive=True)
919
913
920 def introfilectx(self):
914 def introfilectx(self):
921 """Return filectx having identical contents, but pointing to the
915 """Return filectx having identical contents, but pointing to the
922 changeset revision where this filectx was introduced"""
916 changeset revision where this filectx was introduced"""
923 introrev = self.introrev()
917 introrev = self.introrev()
924 if self.rev() == introrev:
918 if self.rev() == introrev:
925 return self
919 return self
926 return self.filectx(self.filenode(), changeid=introrev)
920 return self.filectx(self.filenode(), changeid=introrev)
927
921
928 def _parentfilectx(self, path, fileid, filelog):
922 def _parentfilectx(self, path, fileid, filelog):
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
923 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
924 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
925 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
932 # If self is associated with a changeset (probably explicitly
926 # If self is associated with a changeset (probably explicitly
933 # fed), ensure the created filectx is associated with a
927 # fed), ensure the created filectx is associated with a
934 # changeset that is an ancestor of self.changectx.
928 # changeset that is an ancestor of self.changectx.
935 # This lets us later use _adjustlinkrev to get a correct link.
929 # This lets us later use _adjustlinkrev to get a correct link.
936 fctx._descendantrev = self.rev()
930 fctx._descendantrev = self.rev()
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
931 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 elif r'_descendantrev' in vars(self):
932 elif r'_descendantrev' in vars(self):
939 # Otherwise propagate _descendantrev if we have one associated.
933 # Otherwise propagate _descendantrev if we have one associated.
940 fctx._descendantrev = self._descendantrev
934 fctx._descendantrev = self._descendantrev
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 return fctx
936 return fctx
943
937
944 def parents(self):
938 def parents(self):
945 _path = self._path
939 _path = self._path
946 fl = self._filelog
940 fl = self._filelog
947 parents = self._filelog.parents(self._filenode)
941 parents = self._filelog.parents(self._filenode)
948 pl = [(_path, node, fl) for node in parents if node != nullid]
942 pl = [(_path, node, fl) for node in parents if node != nullid]
949
943
950 r = fl.renamed(self._filenode)
944 r = fl.renamed(self._filenode)
951 if r:
945 if r:
952 # - In the simple rename case, both parent are nullid, pl is empty.
946 # - In the simple rename case, both parent are nullid, pl is empty.
953 # - In case of merge, only one of the parent is null id and should
947 # - In case of merge, only one of the parent is null id and should
954 # be replaced with the rename information. This parent is -always-
948 # be replaced with the rename information. This parent is -always-
955 # the first one.
949 # the first one.
956 #
950 #
957 # As null id have always been filtered out in the previous list
951 # As null id have always been filtered out in the previous list
958 # comprehension, inserting to 0 will always result in "replacing
952 # comprehension, inserting to 0 will always result in "replacing
959 # first nullid parent with rename information.
953 # first nullid parent with rename information.
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
954 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961
955
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
956 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963
957
964 def p1(self):
958 def p1(self):
965 return self.parents()[0]
959 return self.parents()[0]
966
960
967 def p2(self):
961 def p2(self):
968 p = self.parents()
962 p = self.parents()
969 if len(p) == 2:
963 if len(p) == 2:
970 return p[1]
964 return p[1]
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
965 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972
966
973 def annotate(self, follow=False, skiprevs=None, diffopts=None):
967 def annotate(self, follow=False, skiprevs=None, diffopts=None):
974 """Returns a list of annotateline objects for each line in the file
968 """Returns a list of annotateline objects for each line in the file
975
969
976 - line.fctx is the filectx of the node where that line was last changed
970 - line.fctx is the filectx of the node where that line was last changed
977 - line.lineno is the line number at the first appearance in the managed
971 - line.lineno is the line number at the first appearance in the managed
978 file
972 file
979 - line.text is the data on that line (including newline character)
973 - line.text is the data on that line (including newline character)
980 """
974 """
981 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
982
976
983 def parents(f):
977 def parents(f):
984 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
985 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
986 # from the topmost introrev (= srcrev) down to p.linkrev() if it
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
987 # isn't an ancestor of the srcrev.
981 # isn't an ancestor of the srcrev.
988 f._changeid
982 f._changeid
989 pl = f.parents()
983 pl = f.parents()
990
984
991 # Don't return renamed parents if we aren't following.
985 # Don't return renamed parents if we aren't following.
992 if not follow:
986 if not follow:
993 pl = [p for p in pl if p.path() == f.path()]
987 pl = [p for p in pl if p.path() == f.path()]
994
988
995 # renamed filectx won't have a filelog yet, so set it
989 # renamed filectx won't have a filelog yet, so set it
996 # from the cache to save time
990 # from the cache to save time
997 for p in pl:
991 for p in pl:
998 if not r'_filelog' in p.__dict__:
992 if not r'_filelog' in p.__dict__:
999 p._filelog = getlog(p.path())
993 p._filelog = getlog(p.path())
1000
994
1001 return pl
995 return pl
1002
996
1003 # use linkrev to find the first changeset where self appeared
997 # use linkrev to find the first changeset where self appeared
1004 base = self.introfilectx()
998 base = self.introfilectx()
1005 if getattr(base, '_ancestrycontext', None) is None:
999 if getattr(base, '_ancestrycontext', None) is None:
1006 cl = self._repo.changelog
1000 cl = self._repo.changelog
1007 if base.rev() is None:
1001 if base.rev() is None:
1008 # wctx is not inclusive, but works because _ancestrycontext
1002 # wctx is not inclusive, but works because _ancestrycontext
1009 # is used to test filelog revisions
1003 # is used to test filelog revisions
1010 ac = cl.ancestors([p.rev() for p in base.parents()],
1004 ac = cl.ancestors([p.rev() for p in base.parents()],
1011 inclusive=True)
1005 inclusive=True)
1012 else:
1006 else:
1013 ac = cl.ancestors([base.rev()], inclusive=True)
1007 ac = cl.ancestors([base.rev()], inclusive=True)
1014 base._ancestrycontext = ac
1008 base._ancestrycontext = ac
1015
1009
1016 return dagop.annotate(base, parents, skiprevs=skiprevs,
1010 return dagop.annotate(base, parents, skiprevs=skiprevs,
1017 diffopts=diffopts)
1011 diffopts=diffopts)
1018
1012
1019 def ancestors(self, followfirst=False):
1013 def ancestors(self, followfirst=False):
1020 visit = {}
1014 visit = {}
1021 c = self
1015 c = self
1022 if followfirst:
1016 if followfirst:
1023 cut = 1
1017 cut = 1
1024 else:
1018 else:
1025 cut = None
1019 cut = None
1026
1020
1027 while True:
1021 while True:
1028 for parent in c.parents()[:cut]:
1022 for parent in c.parents()[:cut]:
1029 visit[(parent.linkrev(), parent.filenode())] = parent
1023 visit[(parent.linkrev(), parent.filenode())] = parent
1030 if not visit:
1024 if not visit:
1031 break
1025 break
1032 c = visit.pop(max(visit))
1026 c = visit.pop(max(visit))
1033 yield c
1027 yield c
1034
1028
1035 def decodeddata(self):
1029 def decodeddata(self):
1036 """Returns `data()` after running repository decoding filters.
1030 """Returns `data()` after running repository decoding filters.
1037
1031
1038 This is often equivalent to how the data would be expressed on disk.
1032 This is often equivalent to how the data would be expressed on disk.
1039 """
1033 """
1040 return self._repo.wwritedata(self.path(), self.data())
1034 return self._repo.wwritedata(self.path(), self.data())
1041
1035
1042 class filectx(basefilectx):
1036 class filectx(basefilectx):
1043 """A filecontext object makes access to data related to a particular
1037 """A filecontext object makes access to data related to a particular
1044 filerevision convenient."""
1038 filerevision convenient."""
1045 def __init__(self, repo, path, changeid=None, fileid=None,
1039 def __init__(self, repo, path, changeid=None, fileid=None,
1046 filelog=None, changectx=None):
1040 filelog=None, changectx=None):
1047 """changeid can be a changeset revision, node, or tag.
1041 """changeid can be a changeset revision, node, or tag.
1048 fileid can be a file revision or node."""
1042 fileid can be a file revision or node."""
1049 self._repo = repo
1043 self._repo = repo
1050 self._path = path
1044 self._path = path
1051
1045
1052 assert (changeid is not None
1046 assert (changeid is not None
1053 or fileid is not None
1047 or fileid is not None
1054 or changectx is not None), \
1048 or changectx is not None), \
1055 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1049 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1056 % (changeid, fileid, changectx))
1050 % (changeid, fileid, changectx))
1057
1051
1058 if filelog is not None:
1052 if filelog is not None:
1059 self._filelog = filelog
1053 self._filelog = filelog
1060
1054
1061 if changeid is not None:
1055 if changeid is not None:
1062 self._changeid = changeid
1056 self._changeid = changeid
1063 if changectx is not None:
1057 if changectx is not None:
1064 self._changectx = changectx
1058 self._changectx = changectx
1065 if fileid is not None:
1059 if fileid is not None:
1066 self._fileid = fileid
1060 self._fileid = fileid
1067
1061
1068 @propertycache
1062 @propertycache
1069 def _changectx(self):
1063 def _changectx(self):
1070 try:
1064 try:
1071 return changectx(self._repo, self._changeid)
1065 return changectx(self._repo, self._changeid)
1072 except error.FilteredRepoLookupError:
1066 except error.FilteredRepoLookupError:
1073 # Linkrev may point to any revision in the repository. When the
1067 # Linkrev may point to any revision in the repository. When the
1074 # repository is filtered this may lead to `filectx` trying to build
1068 # repository is filtered this may lead to `filectx` trying to build
1075 # `changectx` for filtered revision. In such case we fallback to
1069 # `changectx` for filtered revision. In such case we fallback to
1076 # creating `changectx` on the unfiltered version of the reposition.
1070 # creating `changectx` on the unfiltered version of the reposition.
1077 # This fallback should not be an issue because `changectx` from
1071 # This fallback should not be an issue because `changectx` from
1078 # `filectx` are not used in complex operations that care about
1072 # `filectx` are not used in complex operations that care about
1079 # filtering.
1073 # filtering.
1080 #
1074 #
1081 # This fallback is a cheap and dirty fix that prevent several
1075 # This fallback is a cheap and dirty fix that prevent several
1082 # crashes. It does not ensure the behavior is correct. However the
1076 # crashes. It does not ensure the behavior is correct. However the
1083 # behavior was not correct before filtering either and "incorrect
1077 # behavior was not correct before filtering either and "incorrect
1084 # behavior" is seen as better as "crash"
1078 # behavior" is seen as better as "crash"
1085 #
1079 #
1086 # Linkrevs have several serious troubles with filtering that are
1080 # Linkrevs have several serious troubles with filtering that are
1087 # complicated to solve. Proper handling of the issue here should be
1081 # complicated to solve. Proper handling of the issue here should be
1088 # considered when solving linkrev issue are on the table.
1082 # considered when solving linkrev issue are on the table.
1089 return changectx(self._repo.unfiltered(), self._changeid)
1083 return changectx(self._repo.unfiltered(), self._changeid)
1090
1084
1091 def filectx(self, fileid, changeid=None):
1085 def filectx(self, fileid, changeid=None):
1092 '''opens an arbitrary revision of the file without
1086 '''opens an arbitrary revision of the file without
1093 opening a new filelog'''
1087 opening a new filelog'''
1094 return filectx(self._repo, self._path, fileid=fileid,
1088 return filectx(self._repo, self._path, fileid=fileid,
1095 filelog=self._filelog, changeid=changeid)
1089 filelog=self._filelog, changeid=changeid)
1096
1090
1097 def rawdata(self):
1091 def rawdata(self):
1098 return self._filelog.revision(self._filenode, raw=True)
1092 return self._filelog.revision(self._filenode, raw=True)
1099
1093
1100 def rawflags(self):
1094 def rawflags(self):
1101 """low-level revlog flags"""
1095 """low-level revlog flags"""
1102 return self._filelog.flags(self._filerev)
1096 return self._filelog.flags(self._filerev)
1103
1097
1104 def data(self):
1098 def data(self):
1105 try:
1099 try:
1106 return self._filelog.read(self._filenode)
1100 return self._filelog.read(self._filenode)
1107 except error.CensoredNodeError:
1101 except error.CensoredNodeError:
1108 if self._repo.ui.config("censor", "policy") == "ignore":
1102 if self._repo.ui.config("censor", "policy") == "ignore":
1109 return ""
1103 return ""
1110 raise error.Abort(_("censored node: %s") % short(self._filenode),
1104 raise error.Abort(_("censored node: %s") % short(self._filenode),
1111 hint=_("set censor.policy to ignore errors"))
1105 hint=_("set censor.policy to ignore errors"))
1112
1106
1113 def size(self):
1107 def size(self):
1114 return self._filelog.size(self._filerev)
1108 return self._filelog.size(self._filerev)
1115
1109
1116 @propertycache
1110 @propertycache
1117 def _copied(self):
1111 def _copied(self):
1118 """check if file was actually renamed in this changeset revision
1112 """check if file was actually renamed in this changeset revision
1119
1113
1120 If rename logged in file revision, we report copy for changeset only
1114 If rename logged in file revision, we report copy for changeset only
1121 if file revisions linkrev points back to the changeset in question
1115 if file revisions linkrev points back to the changeset in question
1122 or both changeset parents contain different file revisions.
1116 or both changeset parents contain different file revisions.
1123 """
1117 """
1124
1118
1125 renamed = self._filelog.renamed(self._filenode)
1119 renamed = self._filelog.renamed(self._filenode)
1126 if not renamed:
1120 if not renamed:
1127 return renamed
1121 return renamed
1128
1122
1129 if self.rev() == self.linkrev():
1123 if self.rev() == self.linkrev():
1130 return renamed
1124 return renamed
1131
1125
1132 name = self.path()
1126 name = self.path()
1133 fnode = self._filenode
1127 fnode = self._filenode
1134 for p in self._changectx.parents():
1128 for p in self._changectx.parents():
1135 try:
1129 try:
1136 if fnode == p.filenode(name):
1130 if fnode == p.filenode(name):
1137 return None
1131 return None
1138 except error.LookupError:
1132 except error.LookupError:
1139 pass
1133 pass
1140 return renamed
1134 return renamed
1141
1135
1142 def children(self):
1136 def children(self):
1143 # hard for renames
1137 # hard for renames
1144 c = self._filelog.children(self._filenode)
1138 c = self._filelog.children(self._filenode)
1145 return [filectx(self._repo, self._path, fileid=x,
1139 return [filectx(self._repo, self._path, fileid=x,
1146 filelog=self._filelog) for x in c]
1140 filelog=self._filelog) for x in c]
1147
1141
1148 class committablectx(basectx):
1142 class committablectx(basectx):
1149 """A committablectx object provides common functionality for a context that
1143 """A committablectx object provides common functionality for a context that
1150 wants the ability to commit, e.g. workingctx or memctx."""
1144 wants the ability to commit, e.g. workingctx or memctx."""
1151 def __init__(self, repo, text="", user=None, date=None, extra=None,
1145 def __init__(self, repo, text="", user=None, date=None, extra=None,
1152 changes=None):
1146 changes=None):
1153 self._repo = repo
1147 self._repo = repo
1154 self._rev = None
1148 self._rev = None
1155 self._node = None
1149 self._node = None
1156 self._text = text
1150 self._text = text
1157 if date:
1151 if date:
1158 self._date = dateutil.parsedate(date)
1152 self._date = dateutil.parsedate(date)
1159 if user:
1153 if user:
1160 self._user = user
1154 self._user = user
1161 if changes:
1155 if changes:
1162 self._status = changes
1156 self._status = changes
1163
1157
1164 self._extra = {}
1158 self._extra = {}
1165 if extra:
1159 if extra:
1166 self._extra = extra.copy()
1160 self._extra = extra.copy()
1167 if 'branch' not in self._extra:
1161 if 'branch' not in self._extra:
1168 try:
1162 try:
1169 branch = encoding.fromlocal(self._repo.dirstate.branch())
1163 branch = encoding.fromlocal(self._repo.dirstate.branch())
1170 except UnicodeDecodeError:
1164 except UnicodeDecodeError:
1171 raise error.Abort(_('branch name not in UTF-8!'))
1165 raise error.Abort(_('branch name not in UTF-8!'))
1172 self._extra['branch'] = branch
1166 self._extra['branch'] = branch
1173 if self._extra['branch'] == '':
1167 if self._extra['branch'] == '':
1174 self._extra['branch'] = 'default'
1168 self._extra['branch'] = 'default'
1175
1169
1176 def __bytes__(self):
1170 def __bytes__(self):
1177 return bytes(self._parents[0]) + "+"
1171 return bytes(self._parents[0]) + "+"
1178
1172
1179 __str__ = encoding.strmethod(__bytes__)
1173 __str__ = encoding.strmethod(__bytes__)
1180
1174
1181 def __nonzero__(self):
1175 def __nonzero__(self):
1182 return True
1176 return True
1183
1177
1184 __bool__ = __nonzero__
1178 __bool__ = __nonzero__
1185
1179
1186 def _buildflagfunc(self):
1180 def _buildflagfunc(self):
1187 # Create a fallback function for getting file flags when the
1181 # Create a fallback function for getting file flags when the
1188 # filesystem doesn't support them
1182 # filesystem doesn't support them
1189
1183
1190 copiesget = self._repo.dirstate.copies().get
1184 copiesget = self._repo.dirstate.copies().get
1191 parents = self.parents()
1185 parents = self.parents()
1192 if len(parents) < 2:
1186 if len(parents) < 2:
1193 # when we have one parent, it's easy: copy from parent
1187 # when we have one parent, it's easy: copy from parent
1194 man = parents[0].manifest()
1188 man = parents[0].manifest()
1195 def func(f):
1189 def func(f):
1196 f = copiesget(f, f)
1190 f = copiesget(f, f)
1197 return man.flags(f)
1191 return man.flags(f)
1198 else:
1192 else:
1199 # merges are tricky: we try to reconstruct the unstored
1193 # merges are tricky: we try to reconstruct the unstored
1200 # result from the merge (issue1802)
1194 # result from the merge (issue1802)
1201 p1, p2 = parents
1195 p1, p2 = parents
1202 pa = p1.ancestor(p2)
1196 pa = p1.ancestor(p2)
1203 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1197 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1204
1198
1205 def func(f):
1199 def func(f):
1206 f = copiesget(f, f) # may be wrong for merges with copies
1200 f = copiesget(f, f) # may be wrong for merges with copies
1207 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1201 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1208 if fl1 == fl2:
1202 if fl1 == fl2:
1209 return fl1
1203 return fl1
1210 if fl1 == fla:
1204 if fl1 == fla:
1211 return fl2
1205 return fl2
1212 if fl2 == fla:
1206 if fl2 == fla:
1213 return fl1
1207 return fl1
1214 return '' # punt for conflicts
1208 return '' # punt for conflicts
1215
1209
1216 return func
1210 return func
1217
1211
1218 @propertycache
1212 @propertycache
1219 def _flagfunc(self):
1213 def _flagfunc(self):
1220 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1214 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1221
1215
1222 @propertycache
1216 @propertycache
1223 def _status(self):
1217 def _status(self):
1224 return self._repo.status()
1218 return self._repo.status()
1225
1219
1226 @propertycache
1220 @propertycache
1227 def _user(self):
1221 def _user(self):
1228 return self._repo.ui.username()
1222 return self._repo.ui.username()
1229
1223
1230 @propertycache
1224 @propertycache
1231 def _date(self):
1225 def _date(self):
1232 ui = self._repo.ui
1226 ui = self._repo.ui
1233 date = ui.configdate('devel', 'default-date')
1227 date = ui.configdate('devel', 'default-date')
1234 if date is None:
1228 if date is None:
1235 date = dateutil.makedate()
1229 date = dateutil.makedate()
1236 return date
1230 return date
1237
1231
1238 def subrev(self, subpath):
1232 def subrev(self, subpath):
1239 return None
1233 return None
1240
1234
1241 def manifestnode(self):
1235 def manifestnode(self):
1242 return None
1236 return None
1243 def user(self):
1237 def user(self):
1244 return self._user or self._repo.ui.username()
1238 return self._user or self._repo.ui.username()
1245 def date(self):
1239 def date(self):
1246 return self._date
1240 return self._date
1247 def description(self):
1241 def description(self):
1248 return self._text
1242 return self._text
1249 def files(self):
1243 def files(self):
1250 return sorted(self._status.modified + self._status.added +
1244 return sorted(self._status.modified + self._status.added +
1251 self._status.removed)
1245 self._status.removed)
1252
1246
1253 def modified(self):
1247 def modified(self):
1254 return self._status.modified
1248 return self._status.modified
1255 def added(self):
1249 def added(self):
1256 return self._status.added
1250 return self._status.added
1257 def removed(self):
1251 def removed(self):
1258 return self._status.removed
1252 return self._status.removed
1259 def deleted(self):
1253 def deleted(self):
1260 return self._status.deleted
1254 return self._status.deleted
1261 def branch(self):
1255 def branch(self):
1262 return encoding.tolocal(self._extra['branch'])
1256 return encoding.tolocal(self._extra['branch'])
1263 def closesbranch(self):
1257 def closesbranch(self):
1264 return 'close' in self._extra
1258 return 'close' in self._extra
1265 def extra(self):
1259 def extra(self):
1266 return self._extra
1260 return self._extra
1267
1261
1268 def isinmemory(self):
1262 def isinmemory(self):
1269 return False
1263 return False
1270
1264
1271 def tags(self):
1265 def tags(self):
1272 return []
1266 return []
1273
1267
1274 def bookmarks(self):
1268 def bookmarks(self):
1275 b = []
1269 b = []
1276 for p in self.parents():
1270 for p in self.parents():
1277 b.extend(p.bookmarks())
1271 b.extend(p.bookmarks())
1278 return b
1272 return b
1279
1273
1280 def phase(self):
1274 def phase(self):
1281 phase = phases.draft # default phase to draft
1275 phase = phases.draft # default phase to draft
1282 for p in self.parents():
1276 for p in self.parents():
1283 phase = max(phase, p.phase())
1277 phase = max(phase, p.phase())
1284 return phase
1278 return phase
1285
1279
1286 def hidden(self):
1280 def hidden(self):
1287 return False
1281 return False
1288
1282
1289 def children(self):
1283 def children(self):
1290 return []
1284 return []
1291
1285
1292 def flags(self, path):
1286 def flags(self, path):
1293 if r'_manifest' in self.__dict__:
1287 if r'_manifest' in self.__dict__:
1294 try:
1288 try:
1295 return self._manifest.flags(path)
1289 return self._manifest.flags(path)
1296 except KeyError:
1290 except KeyError:
1297 return ''
1291 return ''
1298
1292
1299 try:
1293 try:
1300 return self._flagfunc(path)
1294 return self._flagfunc(path)
1301 except OSError:
1295 except OSError:
1302 return ''
1296 return ''
1303
1297
1304 def ancestor(self, c2):
1298 def ancestor(self, c2):
1305 """return the "best" ancestor context of self and c2"""
1299 """return the "best" ancestor context of self and c2"""
1306 return self._parents[0].ancestor(c2) # punt on two parents for now
1300 return self._parents[0].ancestor(c2) # punt on two parents for now
1307
1301
1308 def walk(self, match):
1302 def walk(self, match):
1309 '''Generates matching file names.'''
1303 '''Generates matching file names.'''
1310 return sorted(self._repo.dirstate.walk(match,
1304 return sorted(self._repo.dirstate.walk(match,
1311 subrepos=sorted(self.substate),
1305 subrepos=sorted(self.substate),
1312 unknown=True, ignored=False))
1306 unknown=True, ignored=False))
1313
1307
1314 def matches(self, match):
1308 def matches(self, match):
1315 return sorted(self._repo.dirstate.matches(match))
1309 return sorted(self._repo.dirstate.matches(match))
1316
1310
1317 def ancestors(self):
1311 def ancestors(self):
1318 for p in self._parents:
1312 for p in self._parents:
1319 yield p
1313 yield p
1320 for a in self._repo.changelog.ancestors(
1314 for a in self._repo.changelog.ancestors(
1321 [p.rev() for p in self._parents]):
1315 [p.rev() for p in self._parents]):
1322 yield changectx(self._repo, a)
1316 yield changectx(self._repo, a)
1323
1317
1324 def markcommitted(self, node):
1318 def markcommitted(self, node):
1325 """Perform post-commit cleanup necessary after committing this ctx
1319 """Perform post-commit cleanup necessary after committing this ctx
1326
1320
1327 Specifically, this updates backing stores this working context
1321 Specifically, this updates backing stores this working context
1328 wraps to reflect the fact that the changes reflected by this
1322 wraps to reflect the fact that the changes reflected by this
1329 workingctx have been committed. For example, it marks
1323 workingctx have been committed. For example, it marks
1330 modified and added files as normal in the dirstate.
1324 modified and added files as normal in the dirstate.
1331
1325
1332 """
1326 """
1333
1327
1334 with self._repo.dirstate.parentchange():
1328 with self._repo.dirstate.parentchange():
1335 for f in self.modified() + self.added():
1329 for f in self.modified() + self.added():
1336 self._repo.dirstate.normal(f)
1330 self._repo.dirstate.normal(f)
1337 for f in self.removed():
1331 for f in self.removed():
1338 self._repo.dirstate.drop(f)
1332 self._repo.dirstate.drop(f)
1339 self._repo.dirstate.setparents(node)
1333 self._repo.dirstate.setparents(node)
1340
1334
1341 # write changes out explicitly, because nesting wlock at
1335 # write changes out explicitly, because nesting wlock at
1342 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1336 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1343 # from immediately doing so for subsequent changing files
1337 # from immediately doing so for subsequent changing files
1344 self._repo.dirstate.write(self._repo.currenttransaction())
1338 self._repo.dirstate.write(self._repo.currenttransaction())
1345
1339
1346 def dirty(self, missing=False, merge=True, branch=True):
1340 def dirty(self, missing=False, merge=True, branch=True):
1347 return False
1341 return False
1348
1342
1349 class workingctx(committablectx):
1343 class workingctx(committablectx):
1350 """A workingctx object makes access to data related to
1344 """A workingctx object makes access to data related to
1351 the current working directory convenient.
1345 the current working directory convenient.
1352 date - any valid date string or (unixtime, offset), or None.
1346 date - any valid date string or (unixtime, offset), or None.
1353 user - username string, or None.
1347 user - username string, or None.
1354 extra - a dictionary of extra values, or None.
1348 extra - a dictionary of extra values, or None.
1355 changes - a list of file lists as returned by localrepo.status()
1349 changes - a list of file lists as returned by localrepo.status()
1356 or None to use the repository status.
1350 or None to use the repository status.
1357 """
1351 """
1358 def __init__(self, repo, text="", user=None, date=None, extra=None,
1352 def __init__(self, repo, text="", user=None, date=None, extra=None,
1359 changes=None):
1353 changes=None):
1360 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1354 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1361
1355
1362 def __iter__(self):
1356 def __iter__(self):
1363 d = self._repo.dirstate
1357 d = self._repo.dirstate
1364 for f in d:
1358 for f in d:
1365 if d[f] != 'r':
1359 if d[f] != 'r':
1366 yield f
1360 yield f
1367
1361
1368 def __contains__(self, key):
1362 def __contains__(self, key):
1369 return self._repo.dirstate[key] not in "?r"
1363 return self._repo.dirstate[key] not in "?r"
1370
1364
1371 def hex(self):
1365 def hex(self):
1372 return hex(wdirid)
1366 return hex(wdirid)
1373
1367
1374 @propertycache
1368 @propertycache
1375 def _parents(self):
1369 def _parents(self):
1376 p = self._repo.dirstate.parents()
1370 p = self._repo.dirstate.parents()
1377 if p[1] == nullid:
1371 if p[1] == nullid:
1378 p = p[:-1]
1372 p = p[:-1]
1379 return [changectx(self._repo, x) for x in p]
1373 return [changectx(self._repo, x) for x in p]
1380
1374
1381 def filectx(self, path, filelog=None):
1375 def filectx(self, path, filelog=None):
1382 """get a file context from the working directory"""
1376 """get a file context from the working directory"""
1383 return workingfilectx(self._repo, path, workingctx=self,
1377 return workingfilectx(self._repo, path, workingctx=self,
1384 filelog=filelog)
1378 filelog=filelog)
1385
1379
1386 def dirty(self, missing=False, merge=True, branch=True):
1380 def dirty(self, missing=False, merge=True, branch=True):
1387 "check whether a working directory is modified"
1381 "check whether a working directory is modified"
1388 # check subrepos first
1382 # check subrepos first
1389 for s in sorted(self.substate):
1383 for s in sorted(self.substate):
1390 if self.sub(s).dirty(missing=missing):
1384 if self.sub(s).dirty(missing=missing):
1391 return True
1385 return True
1392 # check current working dir
1386 # check current working dir
1393 return ((merge and self.p2()) or
1387 return ((merge and self.p2()) or
1394 (branch and self.branch() != self.p1().branch()) or
1388 (branch and self.branch() != self.p1().branch()) or
1395 self.modified() or self.added() or self.removed() or
1389 self.modified() or self.added() or self.removed() or
1396 (missing and self.deleted()))
1390 (missing and self.deleted()))
1397
1391
1398 def add(self, list, prefix=""):
1392 def add(self, list, prefix=""):
1399 with self._repo.wlock():
1393 with self._repo.wlock():
1400 ui, ds = self._repo.ui, self._repo.dirstate
1394 ui, ds = self._repo.ui, self._repo.dirstate
1401 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1395 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1402 rejected = []
1396 rejected = []
1403 lstat = self._repo.wvfs.lstat
1397 lstat = self._repo.wvfs.lstat
1404 for f in list:
1398 for f in list:
1405 # ds.pathto() returns an absolute file when this is invoked from
1399 # ds.pathto() returns an absolute file when this is invoked from
1406 # the keyword extension. That gets flagged as non-portable on
1400 # the keyword extension. That gets flagged as non-portable on
1407 # Windows, since it contains the drive letter and colon.
1401 # Windows, since it contains the drive letter and colon.
1408 scmutil.checkportable(ui, os.path.join(prefix, f))
1402 scmutil.checkportable(ui, os.path.join(prefix, f))
1409 try:
1403 try:
1410 st = lstat(f)
1404 st = lstat(f)
1411 except OSError:
1405 except OSError:
1412 ui.warn(_("%s does not exist!\n") % uipath(f))
1406 ui.warn(_("%s does not exist!\n") % uipath(f))
1413 rejected.append(f)
1407 rejected.append(f)
1414 continue
1408 continue
1415 if st.st_size > 10000000:
1409 if st.st_size > 10000000:
1416 ui.warn(_("%s: up to %d MB of RAM may be required "
1410 ui.warn(_("%s: up to %d MB of RAM may be required "
1417 "to manage this file\n"
1411 "to manage this file\n"
1418 "(use 'hg revert %s' to cancel the "
1412 "(use 'hg revert %s' to cancel the "
1419 "pending addition)\n")
1413 "pending addition)\n")
1420 % (f, 3 * st.st_size // 1000000, uipath(f)))
1414 % (f, 3 * st.st_size // 1000000, uipath(f)))
1421 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1415 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1422 ui.warn(_("%s not added: only files and symlinks "
1416 ui.warn(_("%s not added: only files and symlinks "
1423 "supported currently\n") % uipath(f))
1417 "supported currently\n") % uipath(f))
1424 rejected.append(f)
1418 rejected.append(f)
1425 elif ds[f] in 'amn':
1419 elif ds[f] in 'amn':
1426 ui.warn(_("%s already tracked!\n") % uipath(f))
1420 ui.warn(_("%s already tracked!\n") % uipath(f))
1427 elif ds[f] == 'r':
1421 elif ds[f] == 'r':
1428 ds.normallookup(f)
1422 ds.normallookup(f)
1429 else:
1423 else:
1430 ds.add(f)
1424 ds.add(f)
1431 return rejected
1425 return rejected
1432
1426
1433 def forget(self, files, prefix=""):
1427 def forget(self, files, prefix=""):
1434 with self._repo.wlock():
1428 with self._repo.wlock():
1435 ds = self._repo.dirstate
1429 ds = self._repo.dirstate
1436 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1430 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1437 rejected = []
1431 rejected = []
1438 for f in files:
1432 for f in files:
1439 if f not in self._repo.dirstate:
1433 if f not in self._repo.dirstate:
1440 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1434 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1441 rejected.append(f)
1435 rejected.append(f)
1442 elif self._repo.dirstate[f] != 'a':
1436 elif self._repo.dirstate[f] != 'a':
1443 self._repo.dirstate.remove(f)
1437 self._repo.dirstate.remove(f)
1444 else:
1438 else:
1445 self._repo.dirstate.drop(f)
1439 self._repo.dirstate.drop(f)
1446 return rejected
1440 return rejected
1447
1441
1448 def undelete(self, list):
1442 def undelete(self, list):
1449 pctxs = self.parents()
1443 pctxs = self.parents()
1450 with self._repo.wlock():
1444 with self._repo.wlock():
1451 ds = self._repo.dirstate
1445 ds = self._repo.dirstate
1452 for f in list:
1446 for f in list:
1453 if self._repo.dirstate[f] != 'r':
1447 if self._repo.dirstate[f] != 'r':
1454 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1448 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1455 else:
1449 else:
1456 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1450 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1457 t = fctx.data()
1451 t = fctx.data()
1458 self._repo.wwrite(f, t, fctx.flags())
1452 self._repo.wwrite(f, t, fctx.flags())
1459 self._repo.dirstate.normal(f)
1453 self._repo.dirstate.normal(f)
1460
1454
1461 def copy(self, source, dest):
1455 def copy(self, source, dest):
1462 try:
1456 try:
1463 st = self._repo.wvfs.lstat(dest)
1457 st = self._repo.wvfs.lstat(dest)
1464 except OSError as err:
1458 except OSError as err:
1465 if err.errno != errno.ENOENT:
1459 if err.errno != errno.ENOENT:
1466 raise
1460 raise
1467 self._repo.ui.warn(_("%s does not exist!\n")
1461 self._repo.ui.warn(_("%s does not exist!\n")
1468 % self._repo.dirstate.pathto(dest))
1462 % self._repo.dirstate.pathto(dest))
1469 return
1463 return
1470 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1464 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1471 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1465 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1472 "symbolic link\n")
1466 "symbolic link\n")
1473 % self._repo.dirstate.pathto(dest))
1467 % self._repo.dirstate.pathto(dest))
1474 else:
1468 else:
1475 with self._repo.wlock():
1469 with self._repo.wlock():
1476 if self._repo.dirstate[dest] in '?':
1470 if self._repo.dirstate[dest] in '?':
1477 self._repo.dirstate.add(dest)
1471 self._repo.dirstate.add(dest)
1478 elif self._repo.dirstate[dest] in 'r':
1472 elif self._repo.dirstate[dest] in 'r':
1479 self._repo.dirstate.normallookup(dest)
1473 self._repo.dirstate.normallookup(dest)
1480 self._repo.dirstate.copy(source, dest)
1474 self._repo.dirstate.copy(source, dest)
1481
1475
1482 def match(self, pats=None, include=None, exclude=None, default='glob',
1476 def match(self, pats=None, include=None, exclude=None, default='glob',
1483 listsubrepos=False, badfn=None):
1477 listsubrepos=False, badfn=None):
1484 r = self._repo
1478 r = self._repo
1485
1479
1486 # Only a case insensitive filesystem needs magic to translate user input
1480 # Only a case insensitive filesystem needs magic to translate user input
1487 # to actual case in the filesystem.
1481 # to actual case in the filesystem.
1488 icasefs = not util.fscasesensitive(r.root)
1482 icasefs = not util.fscasesensitive(r.root)
1489 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1483 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1490 default, auditor=r.auditor, ctx=self,
1484 default, auditor=r.auditor, ctx=self,
1491 listsubrepos=listsubrepos, badfn=badfn,
1485 listsubrepos=listsubrepos, badfn=badfn,
1492 icasefs=icasefs)
1486 icasefs=icasefs)
1493
1487
1494 def _filtersuspectsymlink(self, files):
1488 def _filtersuspectsymlink(self, files):
1495 if not files or self._repo.dirstate._checklink:
1489 if not files or self._repo.dirstate._checklink:
1496 return files
1490 return files
1497
1491
1498 # Symlink placeholders may get non-symlink-like contents
1492 # Symlink placeholders may get non-symlink-like contents
1499 # via user error or dereferencing by NFS or Samba servers,
1493 # via user error or dereferencing by NFS or Samba servers,
1500 # so we filter out any placeholders that don't look like a
1494 # so we filter out any placeholders that don't look like a
1501 # symlink
1495 # symlink
1502 sane = []
1496 sane = []
1503 for f in files:
1497 for f in files:
1504 if self.flags(f) == 'l':
1498 if self.flags(f) == 'l':
1505 d = self[f].data()
1499 d = self[f].data()
1506 if (d == '' or len(d) >= 1024 or '\n' in d
1500 if (d == '' or len(d) >= 1024 or '\n' in d
1507 or stringutil.binary(d)):
1501 or stringutil.binary(d)):
1508 self._repo.ui.debug('ignoring suspect symlink placeholder'
1502 self._repo.ui.debug('ignoring suspect symlink placeholder'
1509 ' "%s"\n' % f)
1503 ' "%s"\n' % f)
1510 continue
1504 continue
1511 sane.append(f)
1505 sane.append(f)
1512 return sane
1506 return sane
1513
1507
1514 def _checklookup(self, files):
1508 def _checklookup(self, files):
1515 # check for any possibly clean files
1509 # check for any possibly clean files
1516 if not files:
1510 if not files:
1517 return [], [], []
1511 return [], [], []
1518
1512
1519 modified = []
1513 modified = []
1520 deleted = []
1514 deleted = []
1521 fixup = []
1515 fixup = []
1522 pctx = self._parents[0]
1516 pctx = self._parents[0]
1523 # do a full compare of any files that might have changed
1517 # do a full compare of any files that might have changed
1524 for f in sorted(files):
1518 for f in sorted(files):
1525 try:
1519 try:
1526 # This will return True for a file that got replaced by a
1520 # This will return True for a file that got replaced by a
1527 # directory in the interim, but fixing that is pretty hard.
1521 # directory in the interim, but fixing that is pretty hard.
1528 if (f not in pctx or self.flags(f) != pctx.flags(f)
1522 if (f not in pctx or self.flags(f) != pctx.flags(f)
1529 or pctx[f].cmp(self[f])):
1523 or pctx[f].cmp(self[f])):
1530 modified.append(f)
1524 modified.append(f)
1531 else:
1525 else:
1532 fixup.append(f)
1526 fixup.append(f)
1533 except (IOError, OSError):
1527 except (IOError, OSError):
1534 # A file become inaccessible in between? Mark it as deleted,
1528 # A file become inaccessible in between? Mark it as deleted,
1535 # matching dirstate behavior (issue5584).
1529 # matching dirstate behavior (issue5584).
1536 # The dirstate has more complex behavior around whether a
1530 # The dirstate has more complex behavior around whether a
1537 # missing file matches a directory, etc, but we don't need to
1531 # missing file matches a directory, etc, but we don't need to
1538 # bother with that: if f has made it to this point, we're sure
1532 # bother with that: if f has made it to this point, we're sure
1539 # it's in the dirstate.
1533 # it's in the dirstate.
1540 deleted.append(f)
1534 deleted.append(f)
1541
1535
1542 return modified, deleted, fixup
1536 return modified, deleted, fixup
1543
1537
1544 def _poststatusfixup(self, status, fixup):
1538 def _poststatusfixup(self, status, fixup):
1545 """update dirstate for files that are actually clean"""
1539 """update dirstate for files that are actually clean"""
1546 poststatus = self._repo.postdsstatus()
1540 poststatus = self._repo.postdsstatus()
1547 if fixup or poststatus:
1541 if fixup or poststatus:
1548 try:
1542 try:
1549 oldid = self._repo.dirstate.identity()
1543 oldid = self._repo.dirstate.identity()
1550
1544
1551 # updating the dirstate is optional
1545 # updating the dirstate is optional
1552 # so we don't wait on the lock
1546 # so we don't wait on the lock
1553 # wlock can invalidate the dirstate, so cache normal _after_
1547 # wlock can invalidate the dirstate, so cache normal _after_
1554 # taking the lock
1548 # taking the lock
1555 with self._repo.wlock(False):
1549 with self._repo.wlock(False):
1556 if self._repo.dirstate.identity() == oldid:
1550 if self._repo.dirstate.identity() == oldid:
1557 if fixup:
1551 if fixup:
1558 normal = self._repo.dirstate.normal
1552 normal = self._repo.dirstate.normal
1559 for f in fixup:
1553 for f in fixup:
1560 normal(f)
1554 normal(f)
1561 # write changes out explicitly, because nesting
1555 # write changes out explicitly, because nesting
1562 # wlock at runtime may prevent 'wlock.release()'
1556 # wlock at runtime may prevent 'wlock.release()'
1563 # after this block from doing so for subsequent
1557 # after this block from doing so for subsequent
1564 # changing files
1558 # changing files
1565 tr = self._repo.currenttransaction()
1559 tr = self._repo.currenttransaction()
1566 self._repo.dirstate.write(tr)
1560 self._repo.dirstate.write(tr)
1567
1561
1568 if poststatus:
1562 if poststatus:
1569 for ps in poststatus:
1563 for ps in poststatus:
1570 ps(self, status)
1564 ps(self, status)
1571 else:
1565 else:
1572 # in this case, writing changes out breaks
1566 # in this case, writing changes out breaks
1573 # consistency, because .hg/dirstate was
1567 # consistency, because .hg/dirstate was
1574 # already changed simultaneously after last
1568 # already changed simultaneously after last
1575 # caching (see also issue5584 for detail)
1569 # caching (see also issue5584 for detail)
1576 self._repo.ui.debug('skip updating dirstate: '
1570 self._repo.ui.debug('skip updating dirstate: '
1577 'identity mismatch\n')
1571 'identity mismatch\n')
1578 except error.LockError:
1572 except error.LockError:
1579 pass
1573 pass
1580 finally:
1574 finally:
1581 # Even if the wlock couldn't be grabbed, clear out the list.
1575 # Even if the wlock couldn't be grabbed, clear out the list.
1582 self._repo.clearpostdsstatus()
1576 self._repo.clearpostdsstatus()
1583
1577
1584 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1578 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1585 '''Gets the status from the dirstate -- internal use only.'''
1579 '''Gets the status from the dirstate -- internal use only.'''
1586 subrepos = []
1580 subrepos = []
1587 if '.hgsub' in self:
1581 if '.hgsub' in self:
1588 subrepos = sorted(self.substate)
1582 subrepos = sorted(self.substate)
1589 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1583 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1590 clean=clean, unknown=unknown)
1584 clean=clean, unknown=unknown)
1591
1585
1592 # check for any possibly clean files
1586 # check for any possibly clean files
1593 fixup = []
1587 fixup = []
1594 if cmp:
1588 if cmp:
1595 modified2, deleted2, fixup = self._checklookup(cmp)
1589 modified2, deleted2, fixup = self._checklookup(cmp)
1596 s.modified.extend(modified2)
1590 s.modified.extend(modified2)
1597 s.deleted.extend(deleted2)
1591 s.deleted.extend(deleted2)
1598
1592
1599 if fixup and clean:
1593 if fixup and clean:
1600 s.clean.extend(fixup)
1594 s.clean.extend(fixup)
1601
1595
1602 self._poststatusfixup(s, fixup)
1596 self._poststatusfixup(s, fixup)
1603
1597
1604 if match.always():
1598 if match.always():
1605 # cache for performance
1599 # cache for performance
1606 if s.unknown or s.ignored or s.clean:
1600 if s.unknown or s.ignored or s.clean:
1607 # "_status" is cached with list*=False in the normal route
1601 # "_status" is cached with list*=False in the normal route
1608 self._status = scmutil.status(s.modified, s.added, s.removed,
1602 self._status = scmutil.status(s.modified, s.added, s.removed,
1609 s.deleted, [], [], [])
1603 s.deleted, [], [], [])
1610 else:
1604 else:
1611 self._status = s
1605 self._status = s
1612
1606
1613 return s
1607 return s
1614
1608
1615 @propertycache
1609 @propertycache
1616 def _manifest(self):
1610 def _manifest(self):
1617 """generate a manifest corresponding to the values in self._status
1611 """generate a manifest corresponding to the values in self._status
1618
1612
1619 This reuse the file nodeid from parent, but we use special node
1613 This reuse the file nodeid from parent, but we use special node
1620 identifiers for added and modified files. This is used by manifests
1614 identifiers for added and modified files. This is used by manifests
1621 merge to see that files are different and by update logic to avoid
1615 merge to see that files are different and by update logic to avoid
1622 deleting newly added files.
1616 deleting newly added files.
1623 """
1617 """
1624 return self._buildstatusmanifest(self._status)
1618 return self._buildstatusmanifest(self._status)
1625
1619
1626 def _buildstatusmanifest(self, status):
1620 def _buildstatusmanifest(self, status):
1627 """Builds a manifest that includes the given status results."""
1621 """Builds a manifest that includes the given status results."""
1628 parents = self.parents()
1622 parents = self.parents()
1629
1623
1630 man = parents[0].manifest().copy()
1624 man = parents[0].manifest().copy()
1631
1625
1632 ff = self._flagfunc
1626 ff = self._flagfunc
1633 for i, l in ((addednodeid, status.added),
1627 for i, l in ((addednodeid, status.added),
1634 (modifiednodeid, status.modified)):
1628 (modifiednodeid, status.modified)):
1635 for f in l:
1629 for f in l:
1636 man[f] = i
1630 man[f] = i
1637 try:
1631 try:
1638 man.setflag(f, ff(f))
1632 man.setflag(f, ff(f))
1639 except OSError:
1633 except OSError:
1640 pass
1634 pass
1641
1635
1642 for f in status.deleted + status.removed:
1636 for f in status.deleted + status.removed:
1643 if f in man:
1637 if f in man:
1644 del man[f]
1638 del man[f]
1645
1639
1646 return man
1640 return man
1647
1641
1648 def _buildstatus(self, other, s, match, listignored, listclean,
1642 def _buildstatus(self, other, s, match, listignored, listclean,
1649 listunknown):
1643 listunknown):
1650 """build a status with respect to another context
1644 """build a status with respect to another context
1651
1645
1652 This includes logic for maintaining the fast path of status when
1646 This includes logic for maintaining the fast path of status when
1653 comparing the working directory against its parent, which is to skip
1647 comparing the working directory against its parent, which is to skip
1654 building a new manifest if self (working directory) is not comparing
1648 building a new manifest if self (working directory) is not comparing
1655 against its parent (repo['.']).
1649 against its parent (repo['.']).
1656 """
1650 """
1657 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1651 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1658 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1652 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1659 # might have accidentally ended up with the entire contents of the file
1653 # might have accidentally ended up with the entire contents of the file
1660 # they are supposed to be linking to.
1654 # they are supposed to be linking to.
1661 s.modified[:] = self._filtersuspectsymlink(s.modified)
1655 s.modified[:] = self._filtersuspectsymlink(s.modified)
1662 if other != self._repo['.']:
1656 if other != self._repo['.']:
1663 s = super(workingctx, self)._buildstatus(other, s, match,
1657 s = super(workingctx, self)._buildstatus(other, s, match,
1664 listignored, listclean,
1658 listignored, listclean,
1665 listunknown)
1659 listunknown)
1666 return s
1660 return s
1667
1661
1668 def _matchstatus(self, other, match):
1662 def _matchstatus(self, other, match):
1669 """override the match method with a filter for directory patterns
1663 """override the match method with a filter for directory patterns
1670
1664
1671 We use inheritance to customize the match.bad method only in cases of
1665 We use inheritance to customize the match.bad method only in cases of
1672 workingctx since it belongs only to the working directory when
1666 workingctx since it belongs only to the working directory when
1673 comparing against the parent changeset.
1667 comparing against the parent changeset.
1674
1668
1675 If we aren't comparing against the working directory's parent, then we
1669 If we aren't comparing against the working directory's parent, then we
1676 just use the default match object sent to us.
1670 just use the default match object sent to us.
1677 """
1671 """
1678 if other != self._repo['.']:
1672 if other != self._repo['.']:
1679 def bad(f, msg):
1673 def bad(f, msg):
1680 # 'f' may be a directory pattern from 'match.files()',
1674 # 'f' may be a directory pattern from 'match.files()',
1681 # so 'f not in ctx1' is not enough
1675 # so 'f not in ctx1' is not enough
1682 if f not in other and not other.hasdir(f):
1676 if f not in other and not other.hasdir(f):
1683 self._repo.ui.warn('%s: %s\n' %
1677 self._repo.ui.warn('%s: %s\n' %
1684 (self._repo.dirstate.pathto(f), msg))
1678 (self._repo.dirstate.pathto(f), msg))
1685 match.bad = bad
1679 match.bad = bad
1686 return match
1680 return match
1687
1681
1688 def markcommitted(self, node):
1682 def markcommitted(self, node):
1689 super(workingctx, self).markcommitted(node)
1683 super(workingctx, self).markcommitted(node)
1690
1684
1691 sparse.aftercommit(self._repo, node)
1685 sparse.aftercommit(self._repo, node)
1692
1686
1693 class committablefilectx(basefilectx):
1687 class committablefilectx(basefilectx):
1694 """A committablefilectx provides common functionality for a file context
1688 """A committablefilectx provides common functionality for a file context
1695 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1689 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1696 def __init__(self, repo, path, filelog=None, ctx=None):
1690 def __init__(self, repo, path, filelog=None, ctx=None):
1697 self._repo = repo
1691 self._repo = repo
1698 self._path = path
1692 self._path = path
1699 self._changeid = None
1693 self._changeid = None
1700 self._filerev = self._filenode = None
1694 self._filerev = self._filenode = None
1701
1695
1702 if filelog is not None:
1696 if filelog is not None:
1703 self._filelog = filelog
1697 self._filelog = filelog
1704 if ctx:
1698 if ctx:
1705 self._changectx = ctx
1699 self._changectx = ctx
1706
1700
1707 def __nonzero__(self):
1701 def __nonzero__(self):
1708 return True
1702 return True
1709
1703
1710 __bool__ = __nonzero__
1704 __bool__ = __nonzero__
1711
1705
1712 def linkrev(self):
1706 def linkrev(self):
1713 # linked to self._changectx no matter if file is modified or not
1707 # linked to self._changectx no matter if file is modified or not
1714 return self.rev()
1708 return self.rev()
1715
1709
1716 def parents(self):
1710 def parents(self):
1717 '''return parent filectxs, following copies if necessary'''
1711 '''return parent filectxs, following copies if necessary'''
1718 def filenode(ctx, path):
1712 def filenode(ctx, path):
1719 return ctx._manifest.get(path, nullid)
1713 return ctx._manifest.get(path, nullid)
1720
1714
1721 path = self._path
1715 path = self._path
1722 fl = self._filelog
1716 fl = self._filelog
1723 pcl = self._changectx._parents
1717 pcl = self._changectx._parents
1724 renamed = self.renamed()
1718 renamed = self.renamed()
1725
1719
1726 if renamed:
1720 if renamed:
1727 pl = [renamed + (None,)]
1721 pl = [renamed + (None,)]
1728 else:
1722 else:
1729 pl = [(path, filenode(pcl[0], path), fl)]
1723 pl = [(path, filenode(pcl[0], path), fl)]
1730
1724
1731 for pc in pcl[1:]:
1725 for pc in pcl[1:]:
1732 pl.append((path, filenode(pc, path), fl))
1726 pl.append((path, filenode(pc, path), fl))
1733
1727
1734 return [self._parentfilectx(p, fileid=n, filelog=l)
1728 return [self._parentfilectx(p, fileid=n, filelog=l)
1735 for p, n, l in pl if n != nullid]
1729 for p, n, l in pl if n != nullid]
1736
1730
1737 def children(self):
1731 def children(self):
1738 return []
1732 return []
1739
1733
1740 class workingfilectx(committablefilectx):
1734 class workingfilectx(committablefilectx):
1741 """A workingfilectx object makes access to data related to a particular
1735 """A workingfilectx object makes access to data related to a particular
1742 file in the working directory convenient."""
1736 file in the working directory convenient."""
1743 def __init__(self, repo, path, filelog=None, workingctx=None):
1737 def __init__(self, repo, path, filelog=None, workingctx=None):
1744 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1738 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1745
1739
1746 @propertycache
1740 @propertycache
1747 def _changectx(self):
1741 def _changectx(self):
1748 return workingctx(self._repo)
1742 return workingctx(self._repo)
1749
1743
1750 def data(self):
1744 def data(self):
1751 return self._repo.wread(self._path)
1745 return self._repo.wread(self._path)
1752 def renamed(self):
1746 def renamed(self):
1753 rp = self._repo.dirstate.copied(self._path)
1747 rp = self._repo.dirstate.copied(self._path)
1754 if not rp:
1748 if not rp:
1755 return None
1749 return None
1756 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1750 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1757
1751
1758 def size(self):
1752 def size(self):
1759 return self._repo.wvfs.lstat(self._path).st_size
1753 return self._repo.wvfs.lstat(self._path).st_size
1760 def date(self):
1754 def date(self):
1761 t, tz = self._changectx.date()
1755 t, tz = self._changectx.date()
1762 try:
1756 try:
1763 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1757 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1764 except OSError as err:
1758 except OSError as err:
1765 if err.errno != errno.ENOENT:
1759 if err.errno != errno.ENOENT:
1766 raise
1760 raise
1767 return (t, tz)
1761 return (t, tz)
1768
1762
1769 def exists(self):
1763 def exists(self):
1770 return self._repo.wvfs.exists(self._path)
1764 return self._repo.wvfs.exists(self._path)
1771
1765
1772 def lexists(self):
1766 def lexists(self):
1773 return self._repo.wvfs.lexists(self._path)
1767 return self._repo.wvfs.lexists(self._path)
1774
1768
1775 def audit(self):
1769 def audit(self):
1776 return self._repo.wvfs.audit(self._path)
1770 return self._repo.wvfs.audit(self._path)
1777
1771
1778 def cmp(self, fctx):
1772 def cmp(self, fctx):
1779 """compare with other file context
1773 """compare with other file context
1780
1774
1781 returns True if different than fctx.
1775 returns True if different than fctx.
1782 """
1776 """
1783 # fctx should be a filectx (not a workingfilectx)
1777 # fctx should be a filectx (not a workingfilectx)
1784 # invert comparison to reuse the same code path
1778 # invert comparison to reuse the same code path
1785 return fctx.cmp(self)
1779 return fctx.cmp(self)
1786
1780
1787 def remove(self, ignoremissing=False):
1781 def remove(self, ignoremissing=False):
1788 """wraps unlink for a repo's working directory"""
1782 """wraps unlink for a repo's working directory"""
1789 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1783 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1790
1784
1791 def write(self, data, flags, backgroundclose=False, **kwargs):
1785 def write(self, data, flags, backgroundclose=False, **kwargs):
1792 """wraps repo.wwrite"""
1786 """wraps repo.wwrite"""
1793 self._repo.wwrite(self._path, data, flags,
1787 self._repo.wwrite(self._path, data, flags,
1794 backgroundclose=backgroundclose,
1788 backgroundclose=backgroundclose,
1795 **kwargs)
1789 **kwargs)
1796
1790
1797 def markcopied(self, src):
1791 def markcopied(self, src):
1798 """marks this file a copy of `src`"""
1792 """marks this file a copy of `src`"""
1799 if self._repo.dirstate[self._path] in "nma":
1793 if self._repo.dirstate[self._path] in "nma":
1800 self._repo.dirstate.copy(src, self._path)
1794 self._repo.dirstate.copy(src, self._path)
1801
1795
1802 def clearunknown(self):
1796 def clearunknown(self):
1803 """Removes conflicting items in the working directory so that
1797 """Removes conflicting items in the working directory so that
1804 ``write()`` can be called successfully.
1798 ``write()`` can be called successfully.
1805 """
1799 """
1806 wvfs = self._repo.wvfs
1800 wvfs = self._repo.wvfs
1807 f = self._path
1801 f = self._path
1808 wvfs.audit(f)
1802 wvfs.audit(f)
1809 if wvfs.isdir(f) and not wvfs.islink(f):
1803 if wvfs.isdir(f) and not wvfs.islink(f):
1810 wvfs.rmtree(f, forcibly=True)
1804 wvfs.rmtree(f, forcibly=True)
1811 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1805 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1812 for p in reversed(list(util.finddirs(f))):
1806 for p in reversed(list(util.finddirs(f))):
1813 if wvfs.isfileorlink(p):
1807 if wvfs.isfileorlink(p):
1814 wvfs.unlink(p)
1808 wvfs.unlink(p)
1815 break
1809 break
1816
1810
1817 def setflags(self, l, x):
1811 def setflags(self, l, x):
1818 self._repo.wvfs.setflags(self._path, l, x)
1812 self._repo.wvfs.setflags(self._path, l, x)
1819
1813
1820 class overlayworkingctx(committablectx):
1814 class overlayworkingctx(committablectx):
1821 """Wraps another mutable context with a write-back cache that can be
1815 """Wraps another mutable context with a write-back cache that can be
1822 converted into a commit context.
1816 converted into a commit context.
1823
1817
1824 self._cache[path] maps to a dict with keys: {
1818 self._cache[path] maps to a dict with keys: {
1825 'exists': bool?
1819 'exists': bool?
1826 'date': date?
1820 'date': date?
1827 'data': str?
1821 'data': str?
1828 'flags': str?
1822 'flags': str?
1829 'copied': str? (path or None)
1823 'copied': str? (path or None)
1830 }
1824 }
1831 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1825 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1832 is `False`, the file was deleted.
1826 is `False`, the file was deleted.
1833 """
1827 """
1834
1828
1835 def __init__(self, repo):
1829 def __init__(self, repo):
1836 super(overlayworkingctx, self).__init__(repo)
1830 super(overlayworkingctx, self).__init__(repo)
1837 self._repo = repo
1831 self._repo = repo
1838 self.clean()
1832 self.clean()
1839
1833
1840 def setbase(self, wrappedctx):
1834 def setbase(self, wrappedctx):
1841 self._wrappedctx = wrappedctx
1835 self._wrappedctx = wrappedctx
1842 self._parents = [wrappedctx]
1836 self._parents = [wrappedctx]
1843 # Drop old manifest cache as it is now out of date.
1837 # Drop old manifest cache as it is now out of date.
1844 # This is necessary when, e.g., rebasing several nodes with one
1838 # This is necessary when, e.g., rebasing several nodes with one
1845 # ``overlayworkingctx`` (e.g. with --collapse).
1839 # ``overlayworkingctx`` (e.g. with --collapse).
1846 util.clearcachedproperty(self, '_manifest')
1840 util.clearcachedproperty(self, '_manifest')
1847
1841
1848 def data(self, path):
1842 def data(self, path):
1849 if self.isdirty(path):
1843 if self.isdirty(path):
1850 if self._cache[path]['exists']:
1844 if self._cache[path]['exists']:
1851 if self._cache[path]['data']:
1845 if self._cache[path]['data']:
1852 return self._cache[path]['data']
1846 return self._cache[path]['data']
1853 else:
1847 else:
1854 # Must fallback here, too, because we only set flags.
1848 # Must fallback here, too, because we only set flags.
1855 return self._wrappedctx[path].data()
1849 return self._wrappedctx[path].data()
1856 else:
1850 else:
1857 raise error.ProgrammingError("No such file or directory: %s" %
1851 raise error.ProgrammingError("No such file or directory: %s" %
1858 path)
1852 path)
1859 else:
1853 else:
1860 return self._wrappedctx[path].data()
1854 return self._wrappedctx[path].data()
1861
1855
1862 @propertycache
1856 @propertycache
1863 def _manifest(self):
1857 def _manifest(self):
1864 parents = self.parents()
1858 parents = self.parents()
1865 man = parents[0].manifest().copy()
1859 man = parents[0].manifest().copy()
1866
1860
1867 flag = self._flagfunc
1861 flag = self._flagfunc
1868 for path in self.added():
1862 for path in self.added():
1869 man[path] = addednodeid
1863 man[path] = addednodeid
1870 man.setflag(path, flag(path))
1864 man.setflag(path, flag(path))
1871 for path in self.modified():
1865 for path in self.modified():
1872 man[path] = modifiednodeid
1866 man[path] = modifiednodeid
1873 man.setflag(path, flag(path))
1867 man.setflag(path, flag(path))
1874 for path in self.removed():
1868 for path in self.removed():
1875 del man[path]
1869 del man[path]
1876 return man
1870 return man
1877
1871
1878 @propertycache
1872 @propertycache
1879 def _flagfunc(self):
1873 def _flagfunc(self):
1880 def f(path):
1874 def f(path):
1881 return self._cache[path]['flags']
1875 return self._cache[path]['flags']
1882 return f
1876 return f
1883
1877
1884 def files(self):
1878 def files(self):
1885 return sorted(self.added() + self.modified() + self.removed())
1879 return sorted(self.added() + self.modified() + self.removed())
1886
1880
1887 def modified(self):
1881 def modified(self):
1888 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1882 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1889 self._existsinparent(f)]
1883 self._existsinparent(f)]
1890
1884
1891 def added(self):
1885 def added(self):
1892 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1886 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1893 not self._existsinparent(f)]
1887 not self._existsinparent(f)]
1894
1888
1895 def removed(self):
1889 def removed(self):
1896 return [f for f in self._cache.keys() if
1890 return [f for f in self._cache.keys() if
1897 not self._cache[f]['exists'] and self._existsinparent(f)]
1891 not self._cache[f]['exists'] and self._existsinparent(f)]
1898
1892
1899 def isinmemory(self):
1893 def isinmemory(self):
1900 return True
1894 return True
1901
1895
1902 def filedate(self, path):
1896 def filedate(self, path):
1903 if self.isdirty(path):
1897 if self.isdirty(path):
1904 return self._cache[path]['date']
1898 return self._cache[path]['date']
1905 else:
1899 else:
1906 return self._wrappedctx[path].date()
1900 return self._wrappedctx[path].date()
1907
1901
1908 def markcopied(self, path, origin):
1902 def markcopied(self, path, origin):
1909 if self.isdirty(path):
1903 if self.isdirty(path):
1910 self._cache[path]['copied'] = origin
1904 self._cache[path]['copied'] = origin
1911 else:
1905 else:
1912 raise error.ProgrammingError('markcopied() called on clean context')
1906 raise error.ProgrammingError('markcopied() called on clean context')
1913
1907
1914 def copydata(self, path):
1908 def copydata(self, path):
1915 if self.isdirty(path):
1909 if self.isdirty(path):
1916 return self._cache[path]['copied']
1910 return self._cache[path]['copied']
1917 else:
1911 else:
1918 raise error.ProgrammingError('copydata() called on clean context')
1912 raise error.ProgrammingError('copydata() called on clean context')
1919
1913
1920 def flags(self, path):
1914 def flags(self, path):
1921 if self.isdirty(path):
1915 if self.isdirty(path):
1922 if self._cache[path]['exists']:
1916 if self._cache[path]['exists']:
1923 return self._cache[path]['flags']
1917 return self._cache[path]['flags']
1924 else:
1918 else:
1925 raise error.ProgrammingError("No such file or directory: %s" %
1919 raise error.ProgrammingError("No such file or directory: %s" %
1926 self._path)
1920 self._path)
1927 else:
1921 else:
1928 return self._wrappedctx[path].flags()
1922 return self._wrappedctx[path].flags()
1929
1923
1930 def _existsinparent(self, path):
1924 def _existsinparent(self, path):
1931 try:
1925 try:
1932 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1926 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1933 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1927 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1934 # with an ``exists()`` function.
1928 # with an ``exists()`` function.
1935 self._wrappedctx[path]
1929 self._wrappedctx[path]
1936 return True
1930 return True
1937 except error.ManifestLookupError:
1931 except error.ManifestLookupError:
1938 return False
1932 return False
1939
1933
1940 def _auditconflicts(self, path):
1934 def _auditconflicts(self, path):
1941 """Replicates conflict checks done by wvfs.write().
1935 """Replicates conflict checks done by wvfs.write().
1942
1936
1943 Since we never write to the filesystem and never call `applyupdates` in
1937 Since we never write to the filesystem and never call `applyupdates` in
1944 IMM, we'll never check that a path is actually writable -- e.g., because
1938 IMM, we'll never check that a path is actually writable -- e.g., because
1945 it adds `a/foo`, but `a` is actually a file in the other commit.
1939 it adds `a/foo`, but `a` is actually a file in the other commit.
1946 """
1940 """
1947 def fail(path, component):
1941 def fail(path, component):
1948 # p1() is the base and we're receiving "writes" for p2()'s
1942 # p1() is the base and we're receiving "writes" for p2()'s
1949 # files.
1943 # files.
1950 if 'l' in self.p1()[component].flags():
1944 if 'l' in self.p1()[component].flags():
1951 raise error.Abort("error: %s conflicts with symlink %s "
1945 raise error.Abort("error: %s conflicts with symlink %s "
1952 "in %s." % (path, component,
1946 "in %s." % (path, component,
1953 self.p1().rev()))
1947 self.p1().rev()))
1954 else:
1948 else:
1955 raise error.Abort("error: '%s' conflicts with file '%s' in "
1949 raise error.Abort("error: '%s' conflicts with file '%s' in "
1956 "%s." % (path, component,
1950 "%s." % (path, component,
1957 self.p1().rev()))
1951 self.p1().rev()))
1958
1952
1959 # Test that each new directory to be created to write this path from p2
1953 # Test that each new directory to be created to write this path from p2
1960 # is not a file in p1.
1954 # is not a file in p1.
1961 components = path.split('/')
1955 components = path.split('/')
1962 for i in xrange(len(components)):
1956 for i in xrange(len(components)):
1963 component = "/".join(components[0:i])
1957 component = "/".join(components[0:i])
1964 if component in self.p1():
1958 if component in self.p1():
1965 fail(path, component)
1959 fail(path, component)
1966
1960
1967 # Test the other direction -- that this path from p2 isn't a directory
1961 # Test the other direction -- that this path from p2 isn't a directory
1968 # in p1 (test that p1 doesn't any paths matching `path/*`).
1962 # in p1 (test that p1 doesn't any paths matching `path/*`).
1969 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1963 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1970 matches = self.p1().manifest().matches(match)
1964 matches = self.p1().manifest().matches(match)
1971 if len(matches) > 0:
1965 if len(matches) > 0:
1972 if len(matches) == 1 and matches.keys()[0] == path:
1966 if len(matches) == 1 and matches.keys()[0] == path:
1973 return
1967 return
1974 raise error.Abort("error: file '%s' cannot be written because "
1968 raise error.Abort("error: file '%s' cannot be written because "
1975 " '%s/' is a folder in %s (containing %d "
1969 " '%s/' is a folder in %s (containing %d "
1976 "entries: %s)"
1970 "entries: %s)"
1977 % (path, path, self.p1(), len(matches),
1971 % (path, path, self.p1(), len(matches),
1978 ', '.join(matches.keys())))
1972 ', '.join(matches.keys())))
1979
1973
1980 def write(self, path, data, flags='', **kwargs):
1974 def write(self, path, data, flags='', **kwargs):
1981 if data is None:
1975 if data is None:
1982 raise error.ProgrammingError("data must be non-None")
1976 raise error.ProgrammingError("data must be non-None")
1983 self._auditconflicts(path)
1977 self._auditconflicts(path)
1984 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1978 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1985 flags=flags)
1979 flags=flags)
1986
1980
1987 def setflags(self, path, l, x):
1981 def setflags(self, path, l, x):
1988 self._markdirty(path, exists=True, date=dateutil.makedate(),
1982 self._markdirty(path, exists=True, date=dateutil.makedate(),
1989 flags=(l and 'l' or '') + (x and 'x' or ''))
1983 flags=(l and 'l' or '') + (x and 'x' or ''))
1990
1984
1991 def remove(self, path):
1985 def remove(self, path):
1992 self._markdirty(path, exists=False)
1986 self._markdirty(path, exists=False)
1993
1987
1994 def exists(self, path):
1988 def exists(self, path):
1995 """exists behaves like `lexists`, but needs to follow symlinks and
1989 """exists behaves like `lexists`, but needs to follow symlinks and
1996 return False if they are broken.
1990 return False if they are broken.
1997 """
1991 """
1998 if self.isdirty(path):
1992 if self.isdirty(path):
1999 # If this path exists and is a symlink, "follow" it by calling
1993 # If this path exists and is a symlink, "follow" it by calling
2000 # exists on the destination path.
1994 # exists on the destination path.
2001 if (self._cache[path]['exists'] and
1995 if (self._cache[path]['exists'] and
2002 'l' in self._cache[path]['flags']):
1996 'l' in self._cache[path]['flags']):
2003 return self.exists(self._cache[path]['data'].strip())
1997 return self.exists(self._cache[path]['data'].strip())
2004 else:
1998 else:
2005 return self._cache[path]['exists']
1999 return self._cache[path]['exists']
2006
2000
2007 return self._existsinparent(path)
2001 return self._existsinparent(path)
2008
2002
2009 def lexists(self, path):
2003 def lexists(self, path):
2010 """lexists returns True if the path exists"""
2004 """lexists returns True if the path exists"""
2011 if self.isdirty(path):
2005 if self.isdirty(path):
2012 return self._cache[path]['exists']
2006 return self._cache[path]['exists']
2013
2007
2014 return self._existsinparent(path)
2008 return self._existsinparent(path)
2015
2009
2016 def size(self, path):
2010 def size(self, path):
2017 if self.isdirty(path):
2011 if self.isdirty(path):
2018 if self._cache[path]['exists']:
2012 if self._cache[path]['exists']:
2019 return len(self._cache[path]['data'])
2013 return len(self._cache[path]['data'])
2020 else:
2014 else:
2021 raise error.ProgrammingError("No such file or directory: %s" %
2015 raise error.ProgrammingError("No such file or directory: %s" %
2022 self._path)
2016 self._path)
2023 return self._wrappedctx[path].size()
2017 return self._wrappedctx[path].size()
2024
2018
2025 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2019 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2026 user=None, editor=None):
2020 user=None, editor=None):
2027 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2021 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2028 committed.
2022 committed.
2029
2023
2030 ``text`` is the commit message.
2024 ``text`` is the commit message.
2031 ``parents`` (optional) are rev numbers.
2025 ``parents`` (optional) are rev numbers.
2032 """
2026 """
2033 # Default parents to the wrapped contexts' if not passed.
2027 # Default parents to the wrapped contexts' if not passed.
2034 if parents is None:
2028 if parents is None:
2035 parents = self._wrappedctx.parents()
2029 parents = self._wrappedctx.parents()
2036 if len(parents) == 1:
2030 if len(parents) == 1:
2037 parents = (parents[0], None)
2031 parents = (parents[0], None)
2038
2032
2039 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2033 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2040 if parents[1] is None:
2034 if parents[1] is None:
2041 parents = (self._repo[parents[0]], None)
2035 parents = (self._repo[parents[0]], None)
2042 else:
2036 else:
2043 parents = (self._repo[parents[0]], self._repo[parents[1]])
2037 parents = (self._repo[parents[0]], self._repo[parents[1]])
2044
2038
2045 files = self._cache.keys()
2039 files = self._cache.keys()
2046 def getfile(repo, memctx, path):
2040 def getfile(repo, memctx, path):
2047 if self._cache[path]['exists']:
2041 if self._cache[path]['exists']:
2048 return memfilectx(repo, memctx, path,
2042 return memfilectx(repo, memctx, path,
2049 self._cache[path]['data'],
2043 self._cache[path]['data'],
2050 'l' in self._cache[path]['flags'],
2044 'l' in self._cache[path]['flags'],
2051 'x' in self._cache[path]['flags'],
2045 'x' in self._cache[path]['flags'],
2052 self._cache[path]['copied'])
2046 self._cache[path]['copied'])
2053 else:
2047 else:
2054 # Returning None, but including the path in `files`, is
2048 # Returning None, but including the path in `files`, is
2055 # necessary for memctx to register a deletion.
2049 # necessary for memctx to register a deletion.
2056 return None
2050 return None
2057 return memctx(self._repo, parents, text, files, getfile, date=date,
2051 return memctx(self._repo, parents, text, files, getfile, date=date,
2058 extra=extra, user=user, branch=branch, editor=editor)
2052 extra=extra, user=user, branch=branch, editor=editor)
2059
2053
2060 def isdirty(self, path):
2054 def isdirty(self, path):
2061 return path in self._cache
2055 return path in self._cache
2062
2056
2063 def isempty(self):
2057 def isempty(self):
2064 # We need to discard any keys that are actually clean before the empty
2058 # We need to discard any keys that are actually clean before the empty
2065 # commit check.
2059 # commit check.
2066 self._compact()
2060 self._compact()
2067 return len(self._cache) == 0
2061 return len(self._cache) == 0
2068
2062
2069 def clean(self):
2063 def clean(self):
2070 self._cache = {}
2064 self._cache = {}
2071
2065
2072 def _compact(self):
2066 def _compact(self):
2073 """Removes keys from the cache that are actually clean, by comparing
2067 """Removes keys from the cache that are actually clean, by comparing
2074 them with the underlying context.
2068 them with the underlying context.
2075
2069
2076 This can occur during the merge process, e.g. by passing --tool :local
2070 This can occur during the merge process, e.g. by passing --tool :local
2077 to resolve a conflict.
2071 to resolve a conflict.
2078 """
2072 """
2079 keys = []
2073 keys = []
2080 for path in self._cache.keys():
2074 for path in self._cache.keys():
2081 cache = self._cache[path]
2075 cache = self._cache[path]
2082 try:
2076 try:
2083 underlying = self._wrappedctx[path]
2077 underlying = self._wrappedctx[path]
2084 if (underlying.data() == cache['data'] and
2078 if (underlying.data() == cache['data'] and
2085 underlying.flags() == cache['flags']):
2079 underlying.flags() == cache['flags']):
2086 keys.append(path)
2080 keys.append(path)
2087 except error.ManifestLookupError:
2081 except error.ManifestLookupError:
2088 # Path not in the underlying manifest (created).
2082 # Path not in the underlying manifest (created).
2089 continue
2083 continue
2090
2084
2091 for path in keys:
2085 for path in keys:
2092 del self._cache[path]
2086 del self._cache[path]
2093 return keys
2087 return keys
2094
2088
2095 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2089 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2096 self._cache[path] = {
2090 self._cache[path] = {
2097 'exists': exists,
2091 'exists': exists,
2098 'data': data,
2092 'data': data,
2099 'date': date,
2093 'date': date,
2100 'flags': flags,
2094 'flags': flags,
2101 'copied': None,
2095 'copied': None,
2102 }
2096 }
2103
2097
2104 def filectx(self, path, filelog=None):
2098 def filectx(self, path, filelog=None):
2105 return overlayworkingfilectx(self._repo, path, parent=self,
2099 return overlayworkingfilectx(self._repo, path, parent=self,
2106 filelog=filelog)
2100 filelog=filelog)
2107
2101
2108 class overlayworkingfilectx(committablefilectx):
2102 class overlayworkingfilectx(committablefilectx):
2109 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2103 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2110 cache, which can be flushed through later by calling ``flush()``."""
2104 cache, which can be flushed through later by calling ``flush()``."""
2111
2105
2112 def __init__(self, repo, path, filelog=None, parent=None):
2106 def __init__(self, repo, path, filelog=None, parent=None):
2113 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2107 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2114 parent)
2108 parent)
2115 self._repo = repo
2109 self._repo = repo
2116 self._parent = parent
2110 self._parent = parent
2117 self._path = path
2111 self._path = path
2118
2112
2119 def cmp(self, fctx):
2113 def cmp(self, fctx):
2120 return self.data() != fctx.data()
2114 return self.data() != fctx.data()
2121
2115
2122 def changectx(self):
2116 def changectx(self):
2123 return self._parent
2117 return self._parent
2124
2118
2125 def data(self):
2119 def data(self):
2126 return self._parent.data(self._path)
2120 return self._parent.data(self._path)
2127
2121
2128 def date(self):
2122 def date(self):
2129 return self._parent.filedate(self._path)
2123 return self._parent.filedate(self._path)
2130
2124
2131 def exists(self):
2125 def exists(self):
2132 return self.lexists()
2126 return self.lexists()
2133
2127
2134 def lexists(self):
2128 def lexists(self):
2135 return self._parent.exists(self._path)
2129 return self._parent.exists(self._path)
2136
2130
2137 def renamed(self):
2131 def renamed(self):
2138 path = self._parent.copydata(self._path)
2132 path = self._parent.copydata(self._path)
2139 if not path:
2133 if not path:
2140 return None
2134 return None
2141 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2135 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2142
2136
2143 def size(self):
2137 def size(self):
2144 return self._parent.size(self._path)
2138 return self._parent.size(self._path)
2145
2139
2146 def markcopied(self, origin):
2140 def markcopied(self, origin):
2147 self._parent.markcopied(self._path, origin)
2141 self._parent.markcopied(self._path, origin)
2148
2142
2149 def audit(self):
2143 def audit(self):
2150 pass
2144 pass
2151
2145
2152 def flags(self):
2146 def flags(self):
2153 return self._parent.flags(self._path)
2147 return self._parent.flags(self._path)
2154
2148
2155 def setflags(self, islink, isexec):
2149 def setflags(self, islink, isexec):
2156 return self._parent.setflags(self._path, islink, isexec)
2150 return self._parent.setflags(self._path, islink, isexec)
2157
2151
2158 def write(self, data, flags, backgroundclose=False, **kwargs):
2152 def write(self, data, flags, backgroundclose=False, **kwargs):
2159 return self._parent.write(self._path, data, flags, **kwargs)
2153 return self._parent.write(self._path, data, flags, **kwargs)
2160
2154
2161 def remove(self, ignoremissing=False):
2155 def remove(self, ignoremissing=False):
2162 return self._parent.remove(self._path)
2156 return self._parent.remove(self._path)
2163
2157
2164 def clearunknown(self):
2158 def clearunknown(self):
2165 pass
2159 pass
2166
2160
2167 class workingcommitctx(workingctx):
2161 class workingcommitctx(workingctx):
2168 """A workingcommitctx object makes access to data related to
2162 """A workingcommitctx object makes access to data related to
2169 the revision being committed convenient.
2163 the revision being committed convenient.
2170
2164
2171 This hides changes in the working directory, if they aren't
2165 This hides changes in the working directory, if they aren't
2172 committed in this context.
2166 committed in this context.
2173 """
2167 """
2174 def __init__(self, repo, changes,
2168 def __init__(self, repo, changes,
2175 text="", user=None, date=None, extra=None):
2169 text="", user=None, date=None, extra=None):
2176 super(workingctx, self).__init__(repo, text, user, date, extra,
2170 super(workingctx, self).__init__(repo, text, user, date, extra,
2177 changes)
2171 changes)
2178
2172
2179 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2173 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2180 """Return matched files only in ``self._status``
2174 """Return matched files only in ``self._status``
2181
2175
2182 Uncommitted files appear "clean" via this context, even if
2176 Uncommitted files appear "clean" via this context, even if
2183 they aren't actually so in the working directory.
2177 they aren't actually so in the working directory.
2184 """
2178 """
2185 if clean:
2179 if clean:
2186 clean = [f for f in self._manifest if f not in self._changedset]
2180 clean = [f for f in self._manifest if f not in self._changedset]
2187 else:
2181 else:
2188 clean = []
2182 clean = []
2189 return scmutil.status([f for f in self._status.modified if match(f)],
2183 return scmutil.status([f for f in self._status.modified if match(f)],
2190 [f for f in self._status.added if match(f)],
2184 [f for f in self._status.added if match(f)],
2191 [f for f in self._status.removed if match(f)],
2185 [f for f in self._status.removed if match(f)],
2192 [], [], [], clean)
2186 [], [], [], clean)
2193
2187
2194 @propertycache
2188 @propertycache
2195 def _changedset(self):
2189 def _changedset(self):
2196 """Return the set of files changed in this context
2190 """Return the set of files changed in this context
2197 """
2191 """
2198 changed = set(self._status.modified)
2192 changed = set(self._status.modified)
2199 changed.update(self._status.added)
2193 changed.update(self._status.added)
2200 changed.update(self._status.removed)
2194 changed.update(self._status.removed)
2201 return changed
2195 return changed
2202
2196
2203 def makecachingfilectxfn(func):
2197 def makecachingfilectxfn(func):
2204 """Create a filectxfn that caches based on the path.
2198 """Create a filectxfn that caches based on the path.
2205
2199
2206 We can't use util.cachefunc because it uses all arguments as the cache
2200 We can't use util.cachefunc because it uses all arguments as the cache
2207 key and this creates a cycle since the arguments include the repo and
2201 key and this creates a cycle since the arguments include the repo and
2208 memctx.
2202 memctx.
2209 """
2203 """
2210 cache = {}
2204 cache = {}
2211
2205
2212 def getfilectx(repo, memctx, path):
2206 def getfilectx(repo, memctx, path):
2213 if path not in cache:
2207 if path not in cache:
2214 cache[path] = func(repo, memctx, path)
2208 cache[path] = func(repo, memctx, path)
2215 return cache[path]
2209 return cache[path]
2216
2210
2217 return getfilectx
2211 return getfilectx
2218
2212
2219 def memfilefromctx(ctx):
2213 def memfilefromctx(ctx):
2220 """Given a context return a memfilectx for ctx[path]
2214 """Given a context return a memfilectx for ctx[path]
2221
2215
2222 This is a convenience method for building a memctx based on another
2216 This is a convenience method for building a memctx based on another
2223 context.
2217 context.
2224 """
2218 """
2225 def getfilectx(repo, memctx, path):
2219 def getfilectx(repo, memctx, path):
2226 fctx = ctx[path]
2220 fctx = ctx[path]
2227 # this is weird but apparently we only keep track of one parent
2221 # this is weird but apparently we only keep track of one parent
2228 # (why not only store that instead of a tuple?)
2222 # (why not only store that instead of a tuple?)
2229 copied = fctx.renamed()
2223 copied = fctx.renamed()
2230 if copied:
2224 if copied:
2231 copied = copied[0]
2225 copied = copied[0]
2232 return memfilectx(repo, memctx, path, fctx.data(),
2226 return memfilectx(repo, memctx, path, fctx.data(),
2233 islink=fctx.islink(), isexec=fctx.isexec(),
2227 islink=fctx.islink(), isexec=fctx.isexec(),
2234 copied=copied)
2228 copied=copied)
2235
2229
2236 return getfilectx
2230 return getfilectx
2237
2231
2238 def memfilefrompatch(patchstore):
2232 def memfilefrompatch(patchstore):
2239 """Given a patch (e.g. patchstore object) return a memfilectx
2233 """Given a patch (e.g. patchstore object) return a memfilectx
2240
2234
2241 This is a convenience method for building a memctx based on a patchstore.
2235 This is a convenience method for building a memctx based on a patchstore.
2242 """
2236 """
2243 def getfilectx(repo, memctx, path):
2237 def getfilectx(repo, memctx, path):
2244 data, mode, copied = patchstore.getfile(path)
2238 data, mode, copied = patchstore.getfile(path)
2245 if data is None:
2239 if data is None:
2246 return None
2240 return None
2247 islink, isexec = mode
2241 islink, isexec = mode
2248 return memfilectx(repo, memctx, path, data, islink=islink,
2242 return memfilectx(repo, memctx, path, data, islink=islink,
2249 isexec=isexec, copied=copied)
2243 isexec=isexec, copied=copied)
2250
2244
2251 return getfilectx
2245 return getfilectx
2252
2246
2253 class memctx(committablectx):
2247 class memctx(committablectx):
2254 """Use memctx to perform in-memory commits via localrepo.commitctx().
2248 """Use memctx to perform in-memory commits via localrepo.commitctx().
2255
2249
2256 Revision information is supplied at initialization time while
2250 Revision information is supplied at initialization time while
2257 related files data and is made available through a callback
2251 related files data and is made available through a callback
2258 mechanism. 'repo' is the current localrepo, 'parents' is a
2252 mechanism. 'repo' is the current localrepo, 'parents' is a
2259 sequence of two parent revisions identifiers (pass None for every
2253 sequence of two parent revisions identifiers (pass None for every
2260 missing parent), 'text' is the commit message and 'files' lists
2254 missing parent), 'text' is the commit message and 'files' lists
2261 names of files touched by the revision (normalized and relative to
2255 names of files touched by the revision (normalized and relative to
2262 repository root).
2256 repository root).
2263
2257
2264 filectxfn(repo, memctx, path) is a callable receiving the
2258 filectxfn(repo, memctx, path) is a callable receiving the
2265 repository, the current memctx object and the normalized path of
2259 repository, the current memctx object and the normalized path of
2266 requested file, relative to repository root. It is fired by the
2260 requested file, relative to repository root. It is fired by the
2267 commit function for every file in 'files', but calls order is
2261 commit function for every file in 'files', but calls order is
2268 undefined. If the file is available in the revision being
2262 undefined. If the file is available in the revision being
2269 committed (updated or added), filectxfn returns a memfilectx
2263 committed (updated or added), filectxfn returns a memfilectx
2270 object. If the file was removed, filectxfn return None for recent
2264 object. If the file was removed, filectxfn return None for recent
2271 Mercurial. Moved files are represented by marking the source file
2265 Mercurial. Moved files are represented by marking the source file
2272 removed and the new file added with copy information (see
2266 removed and the new file added with copy information (see
2273 memfilectx).
2267 memfilectx).
2274
2268
2275 user receives the committer name and defaults to current
2269 user receives the committer name and defaults to current
2276 repository username, date is the commit date in any format
2270 repository username, date is the commit date in any format
2277 supported by dateutil.parsedate() and defaults to current date, extra
2271 supported by dateutil.parsedate() and defaults to current date, extra
2278 is a dictionary of metadata or is left empty.
2272 is a dictionary of metadata or is left empty.
2279 """
2273 """
2280
2274
2281 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2275 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2282 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2276 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2283 # this field to determine what to do in filectxfn.
2277 # this field to determine what to do in filectxfn.
2284 _returnnoneformissingfiles = True
2278 _returnnoneformissingfiles = True
2285
2279
2286 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2280 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2287 date=None, extra=None, branch=None, editor=False):
2281 date=None, extra=None, branch=None, editor=False):
2288 super(memctx, self).__init__(repo, text, user, date, extra)
2282 super(memctx, self).__init__(repo, text, user, date, extra)
2289 self._rev = None
2283 self._rev = None
2290 self._node = None
2284 self._node = None
2291 parents = [(p or nullid) for p in parents]
2285 parents = [(p or nullid) for p in parents]
2292 p1, p2 = parents
2286 p1, p2 = parents
2293 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2287 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2294 files = sorted(set(files))
2288 files = sorted(set(files))
2295 self._files = files
2289 self._files = files
2296 if branch is not None:
2290 if branch is not None:
2297 self._extra['branch'] = encoding.fromlocal(branch)
2291 self._extra['branch'] = encoding.fromlocal(branch)
2298 self.substate = {}
2292 self.substate = {}
2299
2293
2300 if isinstance(filectxfn, patch.filestore):
2294 if isinstance(filectxfn, patch.filestore):
2301 filectxfn = memfilefrompatch(filectxfn)
2295 filectxfn = memfilefrompatch(filectxfn)
2302 elif not callable(filectxfn):
2296 elif not callable(filectxfn):
2303 # if store is not callable, wrap it in a function
2297 # if store is not callable, wrap it in a function
2304 filectxfn = memfilefromctx(filectxfn)
2298 filectxfn = memfilefromctx(filectxfn)
2305
2299
2306 # memoizing increases performance for e.g. vcs convert scenarios.
2300 # memoizing increases performance for e.g. vcs convert scenarios.
2307 self._filectxfn = makecachingfilectxfn(filectxfn)
2301 self._filectxfn = makecachingfilectxfn(filectxfn)
2308
2302
2309 if editor:
2303 if editor:
2310 self._text = editor(self._repo, self, [])
2304 self._text = editor(self._repo, self, [])
2311 self._repo.savecommitmessage(self._text)
2305 self._repo.savecommitmessage(self._text)
2312
2306
2313 def filectx(self, path, filelog=None):
2307 def filectx(self, path, filelog=None):
2314 """get a file context from the working directory
2308 """get a file context from the working directory
2315
2309
2316 Returns None if file doesn't exist and should be removed."""
2310 Returns None if file doesn't exist and should be removed."""
2317 return self._filectxfn(self._repo, self, path)
2311 return self._filectxfn(self._repo, self, path)
2318
2312
2319 def commit(self):
2313 def commit(self):
2320 """commit context to the repo"""
2314 """commit context to the repo"""
2321 return self._repo.commitctx(self)
2315 return self._repo.commitctx(self)
2322
2316
2323 @propertycache
2317 @propertycache
2324 def _manifest(self):
2318 def _manifest(self):
2325 """generate a manifest based on the return values of filectxfn"""
2319 """generate a manifest based on the return values of filectxfn"""
2326
2320
2327 # keep this simple for now; just worry about p1
2321 # keep this simple for now; just worry about p1
2328 pctx = self._parents[0]
2322 pctx = self._parents[0]
2329 man = pctx.manifest().copy()
2323 man = pctx.manifest().copy()
2330
2324
2331 for f in self._status.modified:
2325 for f in self._status.modified:
2332 p1node = nullid
2326 p1node = nullid
2333 p2node = nullid
2327 p2node = nullid
2334 p = pctx[f].parents() # if file isn't in pctx, check p2?
2328 p = pctx[f].parents() # if file isn't in pctx, check p2?
2335 if len(p) > 0:
2329 if len(p) > 0:
2336 p1node = p[0].filenode()
2330 p1node = p[0].filenode()
2337 if len(p) > 1:
2331 if len(p) > 1:
2338 p2node = p[1].filenode()
2332 p2node = p[1].filenode()
2339 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2333 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2340
2334
2341 for f in self._status.added:
2335 for f in self._status.added:
2342 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2336 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2343
2337
2344 for f in self._status.removed:
2338 for f in self._status.removed:
2345 if f in man:
2339 if f in man:
2346 del man[f]
2340 del man[f]
2347
2341
2348 return man
2342 return man
2349
2343
2350 @propertycache
2344 @propertycache
2351 def _status(self):
2345 def _status(self):
2352 """Calculate exact status from ``files`` specified at construction
2346 """Calculate exact status from ``files`` specified at construction
2353 """
2347 """
2354 man1 = self.p1().manifest()
2348 man1 = self.p1().manifest()
2355 p2 = self._parents[1]
2349 p2 = self._parents[1]
2356 # "1 < len(self._parents)" can't be used for checking
2350 # "1 < len(self._parents)" can't be used for checking
2357 # existence of the 2nd parent, because "memctx._parents" is
2351 # existence of the 2nd parent, because "memctx._parents" is
2358 # explicitly initialized by the list, of which length is 2.
2352 # explicitly initialized by the list, of which length is 2.
2359 if p2.node() != nullid:
2353 if p2.node() != nullid:
2360 man2 = p2.manifest()
2354 man2 = p2.manifest()
2361 managing = lambda f: f in man1 or f in man2
2355 managing = lambda f: f in man1 or f in man2
2362 else:
2356 else:
2363 managing = lambda f: f in man1
2357 managing = lambda f: f in man1
2364
2358
2365 modified, added, removed = [], [], []
2359 modified, added, removed = [], [], []
2366 for f in self._files:
2360 for f in self._files:
2367 if not managing(f):
2361 if not managing(f):
2368 added.append(f)
2362 added.append(f)
2369 elif self[f]:
2363 elif self[f]:
2370 modified.append(f)
2364 modified.append(f)
2371 else:
2365 else:
2372 removed.append(f)
2366 removed.append(f)
2373
2367
2374 return scmutil.status(modified, added, removed, [], [], [], [])
2368 return scmutil.status(modified, added, removed, [], [], [], [])
2375
2369
2376 class memfilectx(committablefilectx):
2370 class memfilectx(committablefilectx):
2377 """memfilectx represents an in-memory file to commit.
2371 """memfilectx represents an in-memory file to commit.
2378
2372
2379 See memctx and committablefilectx for more details.
2373 See memctx and committablefilectx for more details.
2380 """
2374 """
2381 def __init__(self, repo, changectx, path, data, islink=False,
2375 def __init__(self, repo, changectx, path, data, islink=False,
2382 isexec=False, copied=None):
2376 isexec=False, copied=None):
2383 """
2377 """
2384 path is the normalized file path relative to repository root.
2378 path is the normalized file path relative to repository root.
2385 data is the file content as a string.
2379 data is the file content as a string.
2386 islink is True if the file is a symbolic link.
2380 islink is True if the file is a symbolic link.
2387 isexec is True if the file is executable.
2381 isexec is True if the file is executable.
2388 copied is the source file path if current file was copied in the
2382 copied is the source file path if current file was copied in the
2389 revision being committed, or None."""
2383 revision being committed, or None."""
2390 super(memfilectx, self).__init__(repo, path, None, changectx)
2384 super(memfilectx, self).__init__(repo, path, None, changectx)
2391 self._data = data
2385 self._data = data
2392 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2386 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2393 self._copied = None
2387 self._copied = None
2394 if copied:
2388 if copied:
2395 self._copied = (copied, nullid)
2389 self._copied = (copied, nullid)
2396
2390
2397 def data(self):
2391 def data(self):
2398 return self._data
2392 return self._data
2399
2393
2400 def remove(self, ignoremissing=False):
2394 def remove(self, ignoremissing=False):
2401 """wraps unlink for a repo's working directory"""
2395 """wraps unlink for a repo's working directory"""
2402 # need to figure out what to do here
2396 # need to figure out what to do here
2403 del self._changectx[self._path]
2397 del self._changectx[self._path]
2404
2398
2405 def write(self, data, flags, **kwargs):
2399 def write(self, data, flags, **kwargs):
2406 """wraps repo.wwrite"""
2400 """wraps repo.wwrite"""
2407 self._data = data
2401 self._data = data
2408
2402
2409 class overlayfilectx(committablefilectx):
2403 class overlayfilectx(committablefilectx):
2410 """Like memfilectx but take an original filectx and optional parameters to
2404 """Like memfilectx but take an original filectx and optional parameters to
2411 override parts of it. This is useful when fctx.data() is expensive (i.e.
2405 override parts of it. This is useful when fctx.data() is expensive (i.e.
2412 flag processor is expensive) and raw data, flags, and filenode could be
2406 flag processor is expensive) and raw data, flags, and filenode could be
2413 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2407 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2414 """
2408 """
2415
2409
2416 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2410 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2417 copied=None, ctx=None):
2411 copied=None, ctx=None):
2418 """originalfctx: filecontext to duplicate
2412 """originalfctx: filecontext to duplicate
2419
2413
2420 datafunc: None or a function to override data (file content). It is a
2414 datafunc: None or a function to override data (file content). It is a
2421 function to be lazy. path, flags, copied, ctx: None or overridden value
2415 function to be lazy. path, flags, copied, ctx: None or overridden value
2422
2416
2423 copied could be (path, rev), or False. copied could also be just path,
2417 copied could be (path, rev), or False. copied could also be just path,
2424 and will be converted to (path, nullid). This simplifies some callers.
2418 and will be converted to (path, nullid). This simplifies some callers.
2425 """
2419 """
2426
2420
2427 if path is None:
2421 if path is None:
2428 path = originalfctx.path()
2422 path = originalfctx.path()
2429 if ctx is None:
2423 if ctx is None:
2430 ctx = originalfctx.changectx()
2424 ctx = originalfctx.changectx()
2431 ctxmatch = lambda: True
2425 ctxmatch = lambda: True
2432 else:
2426 else:
2433 ctxmatch = lambda: ctx == originalfctx.changectx()
2427 ctxmatch = lambda: ctx == originalfctx.changectx()
2434
2428
2435 repo = originalfctx.repo()
2429 repo = originalfctx.repo()
2436 flog = originalfctx.filelog()
2430 flog = originalfctx.filelog()
2437 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2431 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2438
2432
2439 if copied is None:
2433 if copied is None:
2440 copied = originalfctx.renamed()
2434 copied = originalfctx.renamed()
2441 copiedmatch = lambda: True
2435 copiedmatch = lambda: True
2442 else:
2436 else:
2443 if copied and not isinstance(copied, tuple):
2437 if copied and not isinstance(copied, tuple):
2444 # repo._filecommit will recalculate copyrev so nullid is okay
2438 # repo._filecommit will recalculate copyrev so nullid is okay
2445 copied = (copied, nullid)
2439 copied = (copied, nullid)
2446 copiedmatch = lambda: copied == originalfctx.renamed()
2440 copiedmatch = lambda: copied == originalfctx.renamed()
2447
2441
2448 # When data, copied (could affect data), ctx (could affect filelog
2442 # When data, copied (could affect data), ctx (could affect filelog
2449 # parents) are not overridden, rawdata, rawflags, and filenode may be
2443 # parents) are not overridden, rawdata, rawflags, and filenode may be
2450 # reused (repo._filecommit should double check filelog parents).
2444 # reused (repo._filecommit should double check filelog parents).
2451 #
2445 #
2452 # path, flags are not hashed in filelog (but in manifestlog) so they do
2446 # path, flags are not hashed in filelog (but in manifestlog) so they do
2453 # not affect reusable here.
2447 # not affect reusable here.
2454 #
2448 #
2455 # If ctx or copied is overridden to a same value with originalfctx,
2449 # If ctx or copied is overridden to a same value with originalfctx,
2456 # still consider it's reusable. originalfctx.renamed() may be a bit
2450 # still consider it's reusable. originalfctx.renamed() may be a bit
2457 # expensive so it's not called unless necessary. Assuming datafunc is
2451 # expensive so it's not called unless necessary. Assuming datafunc is
2458 # always expensive, do not call it for this "reusable" test.
2452 # always expensive, do not call it for this "reusable" test.
2459 reusable = datafunc is None and ctxmatch() and copiedmatch()
2453 reusable = datafunc is None and ctxmatch() and copiedmatch()
2460
2454
2461 if datafunc is None:
2455 if datafunc is None:
2462 datafunc = originalfctx.data
2456 datafunc = originalfctx.data
2463 if flags is None:
2457 if flags is None:
2464 flags = originalfctx.flags()
2458 flags = originalfctx.flags()
2465
2459
2466 self._datafunc = datafunc
2460 self._datafunc = datafunc
2467 self._flags = flags
2461 self._flags = flags
2468 self._copied = copied
2462 self._copied = copied
2469
2463
2470 if reusable:
2464 if reusable:
2471 # copy extra fields from originalfctx
2465 # copy extra fields from originalfctx
2472 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2466 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2473 for attr_ in attrs:
2467 for attr_ in attrs:
2474 if util.safehasattr(originalfctx, attr_):
2468 if util.safehasattr(originalfctx, attr_):
2475 setattr(self, attr_, getattr(originalfctx, attr_))
2469 setattr(self, attr_, getattr(originalfctx, attr_))
2476
2470
2477 def data(self):
2471 def data(self):
2478 return self._datafunc()
2472 return self._datafunc()
2479
2473
2480 class metadataonlyctx(committablectx):
2474 class metadataonlyctx(committablectx):
2481 """Like memctx but it's reusing the manifest of different commit.
2475 """Like memctx but it's reusing the manifest of different commit.
2482 Intended to be used by lightweight operations that are creating
2476 Intended to be used by lightweight operations that are creating
2483 metadata-only changes.
2477 metadata-only changes.
2484
2478
2485 Revision information is supplied at initialization time. 'repo' is the
2479 Revision information is supplied at initialization time. 'repo' is the
2486 current localrepo, 'ctx' is original revision which manifest we're reuisng
2480 current localrepo, 'ctx' is original revision which manifest we're reuisng
2487 'parents' is a sequence of two parent revisions identifiers (pass None for
2481 'parents' is a sequence of two parent revisions identifiers (pass None for
2488 every missing parent), 'text' is the commit.
2482 every missing parent), 'text' is the commit.
2489
2483
2490 user receives the committer name and defaults to current repository
2484 user receives the committer name and defaults to current repository
2491 username, date is the commit date in any format supported by
2485 username, date is the commit date in any format supported by
2492 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2486 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2493 metadata or is left empty.
2487 metadata or is left empty.
2494 """
2488 """
2495 def __new__(cls, repo, originalctx, *args, **kwargs):
2489 def __new__(cls, repo, originalctx, *args, **kwargs):
2496 return super(metadataonlyctx, cls).__new__(cls, repo)
2490 return super(metadataonlyctx, cls).__new__(cls, repo)
2497
2491
2498 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2492 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2499 date=None, extra=None, editor=False):
2493 date=None, extra=None, editor=False):
2500 if text is None:
2494 if text is None:
2501 text = originalctx.description()
2495 text = originalctx.description()
2502 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2496 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2503 self._rev = None
2497 self._rev = None
2504 self._node = None
2498 self._node = None
2505 self._originalctx = originalctx
2499 self._originalctx = originalctx
2506 self._manifestnode = originalctx.manifestnode()
2500 self._manifestnode = originalctx.manifestnode()
2507 if parents is None:
2501 if parents is None:
2508 parents = originalctx.parents()
2502 parents = originalctx.parents()
2509 else:
2503 else:
2510 parents = [repo[p] for p in parents if p is not None]
2504 parents = [repo[p] for p in parents if p is not None]
2511 parents = parents[:]
2505 parents = parents[:]
2512 while len(parents) < 2:
2506 while len(parents) < 2:
2513 parents.append(repo[nullid])
2507 parents.append(repo[nullid])
2514 p1, p2 = self._parents = parents
2508 p1, p2 = self._parents = parents
2515
2509
2516 # sanity check to ensure that the reused manifest parents are
2510 # sanity check to ensure that the reused manifest parents are
2517 # manifests of our commit parents
2511 # manifests of our commit parents
2518 mp1, mp2 = self.manifestctx().parents
2512 mp1, mp2 = self.manifestctx().parents
2519 if p1 != nullid and p1.manifestnode() != mp1:
2513 if p1 != nullid and p1.manifestnode() != mp1:
2520 raise RuntimeError('can\'t reuse the manifest: '
2514 raise RuntimeError('can\'t reuse the manifest: '
2521 'its p1 doesn\'t match the new ctx p1')
2515 'its p1 doesn\'t match the new ctx p1')
2522 if p2 != nullid and p2.manifestnode() != mp2:
2516 if p2 != nullid and p2.manifestnode() != mp2:
2523 raise RuntimeError('can\'t reuse the manifest: '
2517 raise RuntimeError('can\'t reuse the manifest: '
2524 'its p2 doesn\'t match the new ctx p2')
2518 'its p2 doesn\'t match the new ctx p2')
2525
2519
2526 self._files = originalctx.files()
2520 self._files = originalctx.files()
2527 self.substate = {}
2521 self.substate = {}
2528
2522
2529 if editor:
2523 if editor:
2530 self._text = editor(self._repo, self, [])
2524 self._text = editor(self._repo, self, [])
2531 self._repo.savecommitmessage(self._text)
2525 self._repo.savecommitmessage(self._text)
2532
2526
2533 def manifestnode(self):
2527 def manifestnode(self):
2534 return self._manifestnode
2528 return self._manifestnode
2535
2529
2536 @property
2530 @property
2537 def _manifestctx(self):
2531 def _manifestctx(self):
2538 return self._repo.manifestlog[self._manifestnode]
2532 return self._repo.manifestlog[self._manifestnode]
2539
2533
2540 def filectx(self, path, filelog=None):
2534 def filectx(self, path, filelog=None):
2541 return self._originalctx.filectx(path, filelog=filelog)
2535 return self._originalctx.filectx(path, filelog=filelog)
2542
2536
2543 def commit(self):
2537 def commit(self):
2544 """commit context to the repo"""
2538 """commit context to the repo"""
2545 return self._repo.commitctx(self)
2539 return self._repo.commitctx(self)
2546
2540
2547 @property
2541 @property
2548 def _manifest(self):
2542 def _manifest(self):
2549 return self._originalctx.manifest()
2543 return self._originalctx.manifest()
2550
2544
2551 @propertycache
2545 @propertycache
2552 def _status(self):
2546 def _status(self):
2553 """Calculate exact status from ``files`` specified in the ``origctx``
2547 """Calculate exact status from ``files`` specified in the ``origctx``
2554 and parents manifests.
2548 and parents manifests.
2555 """
2549 """
2556 man1 = self.p1().manifest()
2550 man1 = self.p1().manifest()
2557 p2 = self._parents[1]
2551 p2 = self._parents[1]
2558 # "1 < len(self._parents)" can't be used for checking
2552 # "1 < len(self._parents)" can't be used for checking
2559 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2553 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2560 # explicitly initialized by the list, of which length is 2.
2554 # explicitly initialized by the list, of which length is 2.
2561 if p2.node() != nullid:
2555 if p2.node() != nullid:
2562 man2 = p2.manifest()
2556 man2 = p2.manifest()
2563 managing = lambda f: f in man1 or f in man2
2557 managing = lambda f: f in man1 or f in man2
2564 else:
2558 else:
2565 managing = lambda f: f in man1
2559 managing = lambda f: f in man1
2566
2560
2567 modified, added, removed = [], [], []
2561 modified, added, removed = [], [], []
2568 for f in self._files:
2562 for f in self._files:
2569 if not managing(f):
2563 if not managing(f):
2570 added.append(f)
2564 added.append(f)
2571 elif f in self:
2565 elif f in self:
2572 modified.append(f)
2566 modified.append(f)
2573 else:
2567 else:
2574 removed.append(f)
2568 removed.append(f)
2575
2569
2576 return scmutil.status(modified, added, removed, [], [], [], [])
2570 return scmutil.status(modified, added, removed, [], [], [], [])
2577
2571
2578 class arbitraryfilectx(object):
2572 class arbitraryfilectx(object):
2579 """Allows you to use filectx-like functions on a file in an arbitrary
2573 """Allows you to use filectx-like functions on a file in an arbitrary
2580 location on disk, possibly not in the working directory.
2574 location on disk, possibly not in the working directory.
2581 """
2575 """
2582 def __init__(self, path, repo=None):
2576 def __init__(self, path, repo=None):
2583 # Repo is optional because contrib/simplemerge uses this class.
2577 # Repo is optional because contrib/simplemerge uses this class.
2584 self._repo = repo
2578 self._repo = repo
2585 self._path = path
2579 self._path = path
2586
2580
2587 def cmp(self, fctx):
2581 def cmp(self, fctx):
2588 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2582 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2589 # path if either side is a symlink.
2583 # path if either side is a symlink.
2590 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2584 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2591 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2585 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2592 # Add a fast-path for merge if both sides are disk-backed.
2586 # Add a fast-path for merge if both sides are disk-backed.
2593 # Note that filecmp uses the opposite return values (True if same)
2587 # Note that filecmp uses the opposite return values (True if same)
2594 # from our cmp functions (True if different).
2588 # from our cmp functions (True if different).
2595 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2589 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2596 return self.data() != fctx.data()
2590 return self.data() != fctx.data()
2597
2591
2598 def path(self):
2592 def path(self):
2599 return self._path
2593 return self._path
2600
2594
2601 def flags(self):
2595 def flags(self):
2602 return ''
2596 return ''
2603
2597
2604 def data(self):
2598 def data(self):
2605 return util.readfile(self._path)
2599 return util.readfile(self._path)
2606
2600
2607 def decodeddata(self):
2601 def decodeddata(self):
2608 with open(self._path, "rb") as f:
2602 with open(self._path, "rb") as f:
2609 return f.read()
2603 return f.read()
2610
2604
2611 def remove(self):
2605 def remove(self):
2612 util.unlink(self._path)
2606 util.unlink(self._path)
2613
2607
2614 def write(self, data, flags, **kwargs):
2608 def write(self, data, flags, **kwargs):
2615 assert not flags
2609 assert not flags
2616 with open(self._path, "w") as f:
2610 with open(self._path, "w") as f:
2617 f.write(data)
2611 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now