##// END OF EJS Templates
context: clarify deprecation warning message...
Martin von Zweigbergk -
r37747:6e137da5 default
parent child Browse files
Show More
@@ -1,2597 +1,2598
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirfilenodeids,
25 wdirfilenodeids,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 repoview,
40 repoview,
41 revlog,
41 revlog,
42 scmutil,
42 scmutil,
43 sparse,
43 sparse,
44 subrepo,
44 subrepo,
45 subrepoutil,
45 subrepoutil,
46 util,
46 util,
47 )
47 )
48 from .utils import (
48 from .utils import (
49 dateutil,
49 dateutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 propertycache = util.propertycache
53 propertycache = util.propertycache
54
54
55 nonascii = re.compile(br'[^\x21-\x7f]').search
55 nonascii = re.compile(br'[^\x21-\x7f]').search
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return r"<%s %s>" % (type(self).__name__, str(self))
74 return r"<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(self, other, s, match, listignored, listclean,
106 def _buildstatus(self, other, s, match, listignored, listclean,
107 listunknown):
107 listunknown):
108 """build a status with respect to another context"""
108 """build a status with respect to another context"""
109 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta application.
114 # delta application.
115 mf2 = None
115 mf2 = None
116 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
119 if mf2 is None:
119 if mf2 is None:
120 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
121
121
122 modified, added = [], []
122 modified, added = [], []
123 removed = []
123 removed = []
124 clean = []
124 clean = []
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deletedset = set(deleted)
126 deletedset = set(deleted)
127 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
128 for fn, value in d.iteritems():
128 for fn, value in d.iteritems():
129 if fn in deletedset:
129 if fn in deletedset:
130 continue
130 continue
131 if value is None:
131 if value is None:
132 clean.append(fn)
132 clean.append(fn)
133 continue
133 continue
134 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
135 if node1 is None:
135 if node1 is None:
136 added.append(fn)
136 added.append(fn)
137 elif node2 is None:
137 elif node2 is None:
138 removed.append(fn)
138 removed.append(fn)
139 elif flag1 != flag2:
139 elif flag1 != flag2:
140 modified.append(fn)
140 modified.append(fn)
141 elif node2 not in wdirfilenodeids:
141 elif node2 not in wdirfilenodeids:
142 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
143 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
144 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
145 # to a file as a modification.
145 # to a file as a modification.
146 modified.append(fn)
146 modified.append(fn)
147 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
148 modified.append(fn)
148 modified.append(fn)
149 else:
149 else:
150 clean.append(fn)
150 clean.append(fn)
151
151
152 if removed:
152 if removed:
153 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
154 unknown = [fn for fn in unknown if fn not in mf1 and
154 unknown = [fn for fn in unknown if fn not in mf1 and
155 (not match or match(fn))]
155 (not match or match(fn))]
156 ignored = [fn for fn in ignored if fn not in mf1 and
156 ignored = [fn for fn in ignored if fn not in mf1 and
157 (not match or match(fn))]
157 (not match or match(fn))]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepoutil.state(self, self._repo.ui)
166 return subrepoutil.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def orphan(self):
199 def orphan(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202
202
203 def phasedivergent(self):
203 def phasedivergent(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209
209
210 def contentdivergent(self):
210 def contentdivergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216
216
217 def isunstable(self):
217 def isunstable(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220
220
221 def instabilities(self):
221 def instabilities(self):
222 """return the list of instabilities affecting this changeset.
222 """return the list of instabilities affecting this changeset.
223
223
224 Instabilities are returned as strings. possible values are:
224 Instabilities are returned as strings. possible values are:
225 - orphan,
225 - orphan,
226 - phase-divergent,
226 - phase-divergent,
227 - content-divergent.
227 - content-divergent.
228 """
228 """
229 instabilities = []
229 instabilities = []
230 if self.orphan():
230 if self.orphan():
231 instabilities.append('orphan')
231 instabilities.append('orphan')
232 if self.phasedivergent():
232 if self.phasedivergent():
233 instabilities.append('phase-divergent')
233 instabilities.append('phase-divergent')
234 if self.contentdivergent():
234 if self.contentdivergent():
235 instabilities.append('content-divergent')
235 instabilities.append('content-divergent')
236 return instabilities
236 return instabilities
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if r'_manifest' in self.__dict__:
252 if r'_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=None, include=None, exclude=None, default='glob',
293 def match(self, pats=None, include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380 def changectxdeprecwarn(repo):
380 def changectxdeprecwarn(repo):
381 # changectx's constructor will soon lose support for these forms of
381 # changectx's constructor will soon lose support for these forms of
382 # changeids:
382 # changeids:
383 # * stringinfied ints
383 # * stringinfied ints
384 # * bookmarks, tags, branches, and other namespace identifiers
384 # * bookmarks, tags, branches, and other namespace identifiers
385 # * hex nodeid prefixes
385 # * hex nodeid prefixes
386 #
386 #
387 # Depending on your use case, replace repo[x] by one of these:
387 # Depending on your use case, replace repo[x] by one of these:
388 # * If you want to support general revsets, use scmutil.revsingle(x)
388 # * If you want to support general revsets, use scmutil.revsingle(x)
389 # * If you know that "x" is a stringified int, use repo[int(x)]
389 # * If you know that "x" is a stringified int, use repo[int(x)]
390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
390 # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x)
391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
391 # * If you know that "x" is a tag, use repo[repo.tags()[x]]
392 # * If you know that "x" is a branch or in some other namespace,
392 # * If you know that "x" is a branch or in some other namespace,
393 # use the appropriate mechanism for that namespace
393 # use the appropriate mechanism for that namespace
394 # * If you know that "x" is a hex nodeid prefix, use
394 # * If you know that "x" is a hex nodeid prefix, use
395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
395 # repo[scmutil.resolvehexnodeidprefix(repo, x)]
396 # * If "x" is a string that can be any of the above, but you don't want
396 # * If "x" is a string that can be any of the above, but you don't want
397 # to allow general revsets (perhaps because "x" may come from a remote
397 # to allow general revsets (perhaps because "x" may come from a remote
398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
398 # user and the revset may be too costly), use scmutil.revsymbol(repo, x)
399 # * If "x" can be a mix of the above, you'll have to figure it out
399 # * If "x" can be a mix of the above, you'll have to figure it out
400 # yourself
400 # yourself
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see source "
401 repo.ui.deprecwarn("changectx.__init__ is getting more limited, see "
402 "for details", "4.6", stacklevel=4)
402 "context.changectxdeprecwarn() for details", "4.6",
403 stacklevel=4)
403
404
404 class changectx(basectx):
405 class changectx(basectx):
405 """A changecontext object makes access to data related to a particular
406 """A changecontext object makes access to data related to a particular
406 changeset convenient. It represents a read-only context already present in
407 changeset convenient. It represents a read-only context already present in
407 the repo."""
408 the repo."""
408 def __init__(self, repo, changeid='.'):
409 def __init__(self, repo, changeid='.'):
409 """changeid is a revision number, node, or tag"""
410 """changeid is a revision number, node, or tag"""
410 super(changectx, self).__init__(repo)
411 super(changectx, self).__init__(repo)
411
412
412 try:
413 try:
413 if isinstance(changeid, int):
414 if isinstance(changeid, int):
414 self._node = repo.changelog.node(changeid)
415 self._node = repo.changelog.node(changeid)
415 self._rev = changeid
416 self._rev = changeid
416 return
417 return
417 if changeid == 'null':
418 if changeid == 'null':
418 self._node = nullid
419 self._node = nullid
419 self._rev = nullrev
420 self._rev = nullrev
420 return
421 return
421 if changeid == 'tip':
422 if changeid == 'tip':
422 self._node = repo.changelog.tip()
423 self._node = repo.changelog.tip()
423 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
424 return
425 return
425 if (changeid == '.'
426 if (changeid == '.'
426 or repo.local() and changeid == repo.dirstate.p1()):
427 or repo.local() and changeid == repo.dirstate.p1()):
427 # this is a hack to delay/avoid loading obsmarkers
428 # this is a hack to delay/avoid loading obsmarkers
428 # when we know that '.' won't be hidden
429 # when we know that '.' won't be hidden
429 self._node = repo.dirstate.p1()
430 self._node = repo.dirstate.p1()
430 self._rev = repo.unfiltered().changelog.rev(self._node)
431 self._rev = repo.unfiltered().changelog.rev(self._node)
431 return
432 return
432 if len(changeid) == 20:
433 if len(changeid) == 20:
433 try:
434 try:
434 self._node = changeid
435 self._node = changeid
435 self._rev = repo.changelog.rev(changeid)
436 self._rev = repo.changelog.rev(changeid)
436 return
437 return
437 except error.FilteredLookupError:
438 except error.FilteredLookupError:
438 raise
439 raise
439 except LookupError:
440 except LookupError:
440 pass
441 pass
441
442
442 try:
443 try:
443 r = int(changeid)
444 r = int(changeid)
444 if '%d' % r != changeid:
445 if '%d' % r != changeid:
445 raise ValueError
446 raise ValueError
446 l = len(repo.changelog)
447 l = len(repo.changelog)
447 if r < 0:
448 if r < 0:
448 r += l
449 r += l
449 if r < 0 or r >= l and r != wdirrev:
450 if r < 0 or r >= l and r != wdirrev:
450 raise ValueError
451 raise ValueError
451 self._rev = r
452 self._rev = r
452 self._node = repo.changelog.node(r)
453 self._node = repo.changelog.node(r)
453 changectxdeprecwarn(repo)
454 changectxdeprecwarn(repo)
454 return
455 return
455 except error.FilteredIndexError:
456 except error.FilteredIndexError:
456 raise
457 raise
457 except (ValueError, OverflowError, IndexError):
458 except (ValueError, OverflowError, IndexError):
458 pass
459 pass
459
460
460 if len(changeid) == 40:
461 if len(changeid) == 40:
461 try:
462 try:
462 self._node = bin(changeid)
463 self._node = bin(changeid)
463 self._rev = repo.changelog.rev(self._node)
464 self._rev = repo.changelog.rev(self._node)
464 return
465 return
465 except error.FilteredLookupError:
466 except error.FilteredLookupError:
466 raise
467 raise
467 except (TypeError, LookupError):
468 except (TypeError, LookupError):
468 pass
469 pass
469
470
470 # lookup bookmarks through the name interface
471 # lookup bookmarks through the name interface
471 try:
472 try:
472 self._node = repo.names.singlenode(repo, changeid)
473 self._node = repo.names.singlenode(repo, changeid)
473 self._rev = repo.changelog.rev(self._node)
474 self._rev = repo.changelog.rev(self._node)
474 changectxdeprecwarn(repo)
475 changectxdeprecwarn(repo)
475 return
476 return
476 except KeyError:
477 except KeyError:
477 pass
478 pass
478
479
479 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
480 self._node = scmutil.resolvehexnodeidprefix(repo, changeid)
480 if self._node is not None:
481 if self._node is not None:
481 self._rev = repo.changelog.rev(self._node)
482 self._rev = repo.changelog.rev(self._node)
482 changectxdeprecwarn(repo)
483 changectxdeprecwarn(repo)
483 return
484 return
484
485
485 # lookup failed
486 # lookup failed
486 # check if it might have come from damaged dirstate
487 # check if it might have come from damaged dirstate
487 #
488 #
488 # XXX we could avoid the unfiltered if we had a recognizable
489 # XXX we could avoid the unfiltered if we had a recognizable
489 # exception for filtered changeset access
490 # exception for filtered changeset access
490 if (repo.local()
491 if (repo.local()
491 and changeid in repo.unfiltered().dirstate.parents()):
492 and changeid in repo.unfiltered().dirstate.parents()):
492 msg = _("working directory has unknown parent '%s'!")
493 msg = _("working directory has unknown parent '%s'!")
493 raise error.Abort(msg % short(changeid))
494 raise error.Abort(msg % short(changeid))
494 try:
495 try:
495 if len(changeid) == 20 and nonascii(changeid):
496 if len(changeid) == 20 and nonascii(changeid):
496 changeid = hex(changeid)
497 changeid = hex(changeid)
497 except TypeError:
498 except TypeError:
498 pass
499 pass
499 except (error.FilteredIndexError, error.FilteredLookupError,
500 except (error.FilteredIndexError, error.FilteredLookupError,
500 error.FilteredRepoLookupError):
501 error.FilteredRepoLookupError):
501 raise
502 raise
502 except IndexError:
503 except IndexError:
503 pass
504 pass
504 raise error.RepoLookupError(
505 raise error.RepoLookupError(
505 _("unknown revision '%s'") % changeid)
506 _("unknown revision '%s'") % changeid)
506
507
507 def __hash__(self):
508 def __hash__(self):
508 try:
509 try:
509 return hash(self._rev)
510 return hash(self._rev)
510 except AttributeError:
511 except AttributeError:
511 return id(self)
512 return id(self)
512
513
513 def __nonzero__(self):
514 def __nonzero__(self):
514 return self._rev != nullrev
515 return self._rev != nullrev
515
516
516 __bool__ = __nonzero__
517 __bool__ = __nonzero__
517
518
518 @propertycache
519 @propertycache
519 def _changeset(self):
520 def _changeset(self):
520 return self._repo.changelog.changelogrevision(self.rev())
521 return self._repo.changelog.changelogrevision(self.rev())
521
522
522 @propertycache
523 @propertycache
523 def _manifest(self):
524 def _manifest(self):
524 return self._manifestctx.read()
525 return self._manifestctx.read()
525
526
526 @property
527 @property
527 def _manifestctx(self):
528 def _manifestctx(self):
528 return self._repo.manifestlog[self._changeset.manifest]
529 return self._repo.manifestlog[self._changeset.manifest]
529
530
530 @propertycache
531 @propertycache
531 def _manifestdelta(self):
532 def _manifestdelta(self):
532 return self._manifestctx.readdelta()
533 return self._manifestctx.readdelta()
533
534
534 @propertycache
535 @propertycache
535 def _parents(self):
536 def _parents(self):
536 repo = self._repo
537 repo = self._repo
537 p1, p2 = repo.changelog.parentrevs(self._rev)
538 p1, p2 = repo.changelog.parentrevs(self._rev)
538 if p2 == nullrev:
539 if p2 == nullrev:
539 return [changectx(repo, p1)]
540 return [changectx(repo, p1)]
540 return [changectx(repo, p1), changectx(repo, p2)]
541 return [changectx(repo, p1), changectx(repo, p2)]
541
542
542 def changeset(self):
543 def changeset(self):
543 c = self._changeset
544 c = self._changeset
544 return (
545 return (
545 c.manifest,
546 c.manifest,
546 c.user,
547 c.user,
547 c.date,
548 c.date,
548 c.files,
549 c.files,
549 c.description,
550 c.description,
550 c.extra,
551 c.extra,
551 )
552 )
552 def manifestnode(self):
553 def manifestnode(self):
553 return self._changeset.manifest
554 return self._changeset.manifest
554
555
555 def user(self):
556 def user(self):
556 return self._changeset.user
557 return self._changeset.user
557 def date(self):
558 def date(self):
558 return self._changeset.date
559 return self._changeset.date
559 def files(self):
560 def files(self):
560 return self._changeset.files
561 return self._changeset.files
561 def description(self):
562 def description(self):
562 return self._changeset.description
563 return self._changeset.description
563 def branch(self):
564 def branch(self):
564 return encoding.tolocal(self._changeset.extra.get("branch"))
565 return encoding.tolocal(self._changeset.extra.get("branch"))
565 def closesbranch(self):
566 def closesbranch(self):
566 return 'close' in self._changeset.extra
567 return 'close' in self._changeset.extra
567 def extra(self):
568 def extra(self):
568 """Return a dict of extra information."""
569 """Return a dict of extra information."""
569 return self._changeset.extra
570 return self._changeset.extra
570 def tags(self):
571 def tags(self):
571 """Return a list of byte tag names"""
572 """Return a list of byte tag names"""
572 return self._repo.nodetags(self._node)
573 return self._repo.nodetags(self._node)
573 def bookmarks(self):
574 def bookmarks(self):
574 """Return a list of byte bookmark names."""
575 """Return a list of byte bookmark names."""
575 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
576 def phase(self):
577 def phase(self):
577 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 def hidden(self):
579 def hidden(self):
579 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580
581
581 def isinmemory(self):
582 def isinmemory(self):
582 return False
583 return False
583
584
584 def children(self):
585 def children(self):
585 """return list of changectx contexts for each child changeset.
586 """return list of changectx contexts for each child changeset.
586
587
587 This returns only the immediate child changesets. Use descendants() to
588 This returns only the immediate child changesets. Use descendants() to
588 recursively walk children.
589 recursively walk children.
589 """
590 """
590 c = self._repo.changelog.children(self._node)
591 c = self._repo.changelog.children(self._node)
591 return [changectx(self._repo, x) for x in c]
592 return [changectx(self._repo, x) for x in c]
592
593
593 def ancestors(self):
594 def ancestors(self):
594 for a in self._repo.changelog.ancestors([self._rev]):
595 for a in self._repo.changelog.ancestors([self._rev]):
595 yield changectx(self._repo, a)
596 yield changectx(self._repo, a)
596
597
597 def descendants(self):
598 def descendants(self):
598 """Recursively yield all children of the changeset.
599 """Recursively yield all children of the changeset.
599
600
600 For just the immediate children, use children()
601 For just the immediate children, use children()
601 """
602 """
602 for d in self._repo.changelog.descendants([self._rev]):
603 for d in self._repo.changelog.descendants([self._rev]):
603 yield changectx(self._repo, d)
604 yield changectx(self._repo, d)
604
605
605 def filectx(self, path, fileid=None, filelog=None):
606 def filectx(self, path, fileid=None, filelog=None):
606 """get a file context from this changeset"""
607 """get a file context from this changeset"""
607 if fileid is None:
608 if fileid is None:
608 fileid = self.filenode(path)
609 fileid = self.filenode(path)
609 return filectx(self._repo, path, fileid=fileid,
610 return filectx(self._repo, path, fileid=fileid,
610 changectx=self, filelog=filelog)
611 changectx=self, filelog=filelog)
611
612
612 def ancestor(self, c2, warn=False):
613 def ancestor(self, c2, warn=False):
613 """return the "best" ancestor context of self and c2
614 """return the "best" ancestor context of self and c2
614
615
615 If there are multiple candidates, it will show a message and check
616 If there are multiple candidates, it will show a message and check
616 merge.preferancestor configuration before falling back to the
617 merge.preferancestor configuration before falling back to the
617 revlog ancestor."""
618 revlog ancestor."""
618 # deal with workingctxs
619 # deal with workingctxs
619 n2 = c2._node
620 n2 = c2._node
620 if n2 is None:
621 if n2 is None:
621 n2 = c2._parents[0]._node
622 n2 = c2._parents[0]._node
622 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
623 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
623 if not cahs:
624 if not cahs:
624 anc = nullid
625 anc = nullid
625 elif len(cahs) == 1:
626 elif len(cahs) == 1:
626 anc = cahs[0]
627 anc = cahs[0]
627 else:
628 else:
628 # experimental config: merge.preferancestor
629 # experimental config: merge.preferancestor
629 for r in self._repo.ui.configlist('merge', 'preferancestor'):
630 for r in self._repo.ui.configlist('merge', 'preferancestor'):
630 try:
631 try:
631 ctx = scmutil.revsymbol(self._repo, r)
632 ctx = scmutil.revsymbol(self._repo, r)
632 except error.RepoLookupError:
633 except error.RepoLookupError:
633 continue
634 continue
634 anc = ctx.node()
635 anc = ctx.node()
635 if anc in cahs:
636 if anc in cahs:
636 break
637 break
637 else:
638 else:
638 anc = self._repo.changelog.ancestor(self._node, n2)
639 anc = self._repo.changelog.ancestor(self._node, n2)
639 if warn:
640 if warn:
640 self._repo.ui.status(
641 self._repo.ui.status(
641 (_("note: using %s as ancestor of %s and %s\n") %
642 (_("note: using %s as ancestor of %s and %s\n") %
642 (short(anc), short(self._node), short(n2))) +
643 (short(anc), short(self._node), short(n2))) +
643 ''.join(_(" alternatively, use --config "
644 ''.join(_(" alternatively, use --config "
644 "merge.preferancestor=%s\n") %
645 "merge.preferancestor=%s\n") %
645 short(n) for n in sorted(cahs) if n != anc))
646 short(n) for n in sorted(cahs) if n != anc))
646 return changectx(self._repo, anc)
647 return changectx(self._repo, anc)
647
648
648 def descendant(self, other):
649 def descendant(self, other):
649 """True if other is descendant of this changeset"""
650 """True if other is descendant of this changeset"""
650 return self._repo.changelog.descendant(self._rev, other._rev)
651 return self._repo.changelog.descendant(self._rev, other._rev)
651
652
652 def walk(self, match):
653 def walk(self, match):
653 '''Generates matching file names.'''
654 '''Generates matching file names.'''
654
655
655 # Wrap match.bad method to have message with nodeid
656 # Wrap match.bad method to have message with nodeid
656 def bad(fn, msg):
657 def bad(fn, msg):
657 # The manifest doesn't know about subrepos, so don't complain about
658 # The manifest doesn't know about subrepos, so don't complain about
658 # paths into valid subrepos.
659 # paths into valid subrepos.
659 if any(fn == s or fn.startswith(s + '/')
660 if any(fn == s or fn.startswith(s + '/')
660 for s in self.substate):
661 for s in self.substate):
661 return
662 return
662 match.bad(fn, _('no such file in rev %s') % self)
663 match.bad(fn, _('no such file in rev %s') % self)
663
664
664 m = matchmod.badmatch(match, bad)
665 m = matchmod.badmatch(match, bad)
665 return self._manifest.walk(m)
666 return self._manifest.walk(m)
666
667
667 def matches(self, match):
668 def matches(self, match):
668 return self.walk(match)
669 return self.walk(match)
669
670
670 class basefilectx(object):
671 class basefilectx(object):
671 """A filecontext object represents the common logic for its children:
672 """A filecontext object represents the common logic for its children:
672 filectx: read-only access to a filerevision that is already present
673 filectx: read-only access to a filerevision that is already present
673 in the repo,
674 in the repo,
674 workingfilectx: a filecontext that represents files from the working
675 workingfilectx: a filecontext that represents files from the working
675 directory,
676 directory,
676 memfilectx: a filecontext that represents files in-memory,
677 memfilectx: a filecontext that represents files in-memory,
677 overlayfilectx: duplicate another filecontext with some fields overridden.
678 overlayfilectx: duplicate another filecontext with some fields overridden.
678 """
679 """
679 @propertycache
680 @propertycache
680 def _filelog(self):
681 def _filelog(self):
681 return self._repo.file(self._path)
682 return self._repo.file(self._path)
682
683
683 @propertycache
684 @propertycache
684 def _changeid(self):
685 def _changeid(self):
685 if r'_changeid' in self.__dict__:
686 if r'_changeid' in self.__dict__:
686 return self._changeid
687 return self._changeid
687 elif r'_changectx' in self.__dict__:
688 elif r'_changectx' in self.__dict__:
688 return self._changectx.rev()
689 return self._changectx.rev()
689 elif r'_descendantrev' in self.__dict__:
690 elif r'_descendantrev' in self.__dict__:
690 # this file context was created from a revision with a known
691 # this file context was created from a revision with a known
691 # descendant, we can (lazily) correct for linkrev aliases
692 # descendant, we can (lazily) correct for linkrev aliases
692 return self._adjustlinkrev(self._descendantrev)
693 return self._adjustlinkrev(self._descendantrev)
693 else:
694 else:
694 return self._filelog.linkrev(self._filerev)
695 return self._filelog.linkrev(self._filerev)
695
696
696 @propertycache
697 @propertycache
697 def _filenode(self):
698 def _filenode(self):
698 if r'_fileid' in self.__dict__:
699 if r'_fileid' in self.__dict__:
699 return self._filelog.lookup(self._fileid)
700 return self._filelog.lookup(self._fileid)
700 else:
701 else:
701 return self._changectx.filenode(self._path)
702 return self._changectx.filenode(self._path)
702
703
703 @propertycache
704 @propertycache
704 def _filerev(self):
705 def _filerev(self):
705 return self._filelog.rev(self._filenode)
706 return self._filelog.rev(self._filenode)
706
707
707 @propertycache
708 @propertycache
708 def _repopath(self):
709 def _repopath(self):
709 return self._path
710 return self._path
710
711
711 def __nonzero__(self):
712 def __nonzero__(self):
712 try:
713 try:
713 self._filenode
714 self._filenode
714 return True
715 return True
715 except error.LookupError:
716 except error.LookupError:
716 # file is missing
717 # file is missing
717 return False
718 return False
718
719
719 __bool__ = __nonzero__
720 __bool__ = __nonzero__
720
721
721 def __bytes__(self):
722 def __bytes__(self):
722 try:
723 try:
723 return "%s@%s" % (self.path(), self._changectx)
724 return "%s@%s" % (self.path(), self._changectx)
724 except error.LookupError:
725 except error.LookupError:
725 return "%s@???" % self.path()
726 return "%s@???" % self.path()
726
727
727 __str__ = encoding.strmethod(__bytes__)
728 __str__ = encoding.strmethod(__bytes__)
728
729
729 def __repr__(self):
730 def __repr__(self):
730 return r"<%s %s>" % (type(self).__name__, str(self))
731 return r"<%s %s>" % (type(self).__name__, str(self))
731
732
732 def __hash__(self):
733 def __hash__(self):
733 try:
734 try:
734 return hash((self._path, self._filenode))
735 return hash((self._path, self._filenode))
735 except AttributeError:
736 except AttributeError:
736 return id(self)
737 return id(self)
737
738
738 def __eq__(self, other):
739 def __eq__(self, other):
739 try:
740 try:
740 return (type(self) == type(other) and self._path == other._path
741 return (type(self) == type(other) and self._path == other._path
741 and self._filenode == other._filenode)
742 and self._filenode == other._filenode)
742 except AttributeError:
743 except AttributeError:
743 return False
744 return False
744
745
745 def __ne__(self, other):
746 def __ne__(self, other):
746 return not (self == other)
747 return not (self == other)
747
748
748 def filerev(self):
749 def filerev(self):
749 return self._filerev
750 return self._filerev
750 def filenode(self):
751 def filenode(self):
751 return self._filenode
752 return self._filenode
752 @propertycache
753 @propertycache
753 def _flags(self):
754 def _flags(self):
754 return self._changectx.flags(self._path)
755 return self._changectx.flags(self._path)
755 def flags(self):
756 def flags(self):
756 return self._flags
757 return self._flags
757 def filelog(self):
758 def filelog(self):
758 return self._filelog
759 return self._filelog
759 def rev(self):
760 def rev(self):
760 return self._changeid
761 return self._changeid
761 def linkrev(self):
762 def linkrev(self):
762 return self._filelog.linkrev(self._filerev)
763 return self._filelog.linkrev(self._filerev)
763 def node(self):
764 def node(self):
764 return self._changectx.node()
765 return self._changectx.node()
765 def hex(self):
766 def hex(self):
766 return self._changectx.hex()
767 return self._changectx.hex()
767 def user(self):
768 def user(self):
768 return self._changectx.user()
769 return self._changectx.user()
769 def date(self):
770 def date(self):
770 return self._changectx.date()
771 return self._changectx.date()
771 def files(self):
772 def files(self):
772 return self._changectx.files()
773 return self._changectx.files()
773 def description(self):
774 def description(self):
774 return self._changectx.description()
775 return self._changectx.description()
775 def branch(self):
776 def branch(self):
776 return self._changectx.branch()
777 return self._changectx.branch()
777 def extra(self):
778 def extra(self):
778 return self._changectx.extra()
779 return self._changectx.extra()
779 def phase(self):
780 def phase(self):
780 return self._changectx.phase()
781 return self._changectx.phase()
781 def phasestr(self):
782 def phasestr(self):
782 return self._changectx.phasestr()
783 return self._changectx.phasestr()
783 def obsolete(self):
784 def obsolete(self):
784 return self._changectx.obsolete()
785 return self._changectx.obsolete()
785 def instabilities(self):
786 def instabilities(self):
786 return self._changectx.instabilities()
787 return self._changectx.instabilities()
787 def manifest(self):
788 def manifest(self):
788 return self._changectx.manifest()
789 return self._changectx.manifest()
789 def changectx(self):
790 def changectx(self):
790 return self._changectx
791 return self._changectx
791 def renamed(self):
792 def renamed(self):
792 return self._copied
793 return self._copied
793 def repo(self):
794 def repo(self):
794 return self._repo
795 return self._repo
795 def size(self):
796 def size(self):
796 return len(self.data())
797 return len(self.data())
797
798
798 def path(self):
799 def path(self):
799 return self._path
800 return self._path
800
801
801 def isbinary(self):
802 def isbinary(self):
802 try:
803 try:
803 return stringutil.binary(self.data())
804 return stringutil.binary(self.data())
804 except IOError:
805 except IOError:
805 return False
806 return False
806 def isexec(self):
807 def isexec(self):
807 return 'x' in self.flags()
808 return 'x' in self.flags()
808 def islink(self):
809 def islink(self):
809 return 'l' in self.flags()
810 return 'l' in self.flags()
810
811
811 def isabsent(self):
812 def isabsent(self):
812 """whether this filectx represents a file not in self._changectx
813 """whether this filectx represents a file not in self._changectx
813
814
814 This is mainly for merge code to detect change/delete conflicts. This is
815 This is mainly for merge code to detect change/delete conflicts. This is
815 expected to be True for all subclasses of basectx."""
816 expected to be True for all subclasses of basectx."""
816 return False
817 return False
817
818
818 _customcmp = False
819 _customcmp = False
819 def cmp(self, fctx):
820 def cmp(self, fctx):
820 """compare with other file context
821 """compare with other file context
821
822
822 returns True if different than fctx.
823 returns True if different than fctx.
823 """
824 """
824 if fctx._customcmp:
825 if fctx._customcmp:
825 return fctx.cmp(self)
826 return fctx.cmp(self)
826
827
827 if (fctx._filenode is None
828 if (fctx._filenode is None
828 and (self._repo._encodefilterpats
829 and (self._repo._encodefilterpats
829 # if file data starts with '\1\n', empty metadata block is
830 # if file data starts with '\1\n', empty metadata block is
830 # prepended, which adds 4 bytes to filelog.size().
831 # prepended, which adds 4 bytes to filelog.size().
831 or self.size() - 4 == fctx.size())
832 or self.size() - 4 == fctx.size())
832 or self.size() == fctx.size()):
833 or self.size() == fctx.size()):
833 return self._filelog.cmp(self._filenode, fctx.data())
834 return self._filelog.cmp(self._filenode, fctx.data())
834
835
835 return True
836 return True
836
837
837 def _adjustlinkrev(self, srcrev, inclusive=False):
838 def _adjustlinkrev(self, srcrev, inclusive=False):
838 """return the first ancestor of <srcrev> introducing <fnode>
839 """return the first ancestor of <srcrev> introducing <fnode>
839
840
840 If the linkrev of the file revision does not point to an ancestor of
841 If the linkrev of the file revision does not point to an ancestor of
841 srcrev, we'll walk down the ancestors until we find one introducing
842 srcrev, we'll walk down the ancestors until we find one introducing
842 this file revision.
843 this file revision.
843
844
844 :srcrev: the changeset revision we search ancestors from
845 :srcrev: the changeset revision we search ancestors from
845 :inclusive: if true, the src revision will also be checked
846 :inclusive: if true, the src revision will also be checked
846 """
847 """
847 repo = self._repo
848 repo = self._repo
848 cl = repo.unfiltered().changelog
849 cl = repo.unfiltered().changelog
849 mfl = repo.manifestlog
850 mfl = repo.manifestlog
850 # fetch the linkrev
851 # fetch the linkrev
851 lkr = self.linkrev()
852 lkr = self.linkrev()
852 # hack to reuse ancestor computation when searching for renames
853 # hack to reuse ancestor computation when searching for renames
853 memberanc = getattr(self, '_ancestrycontext', None)
854 memberanc = getattr(self, '_ancestrycontext', None)
854 iteranc = None
855 iteranc = None
855 if srcrev is None:
856 if srcrev is None:
856 # wctx case, used by workingfilectx during mergecopy
857 # wctx case, used by workingfilectx during mergecopy
857 revs = [p.rev() for p in self._repo[None].parents()]
858 revs = [p.rev() for p in self._repo[None].parents()]
858 inclusive = True # we skipped the real (revless) source
859 inclusive = True # we skipped the real (revless) source
859 else:
860 else:
860 revs = [srcrev]
861 revs = [srcrev]
861 if memberanc is None:
862 if memberanc is None:
862 memberanc = iteranc = cl.ancestors(revs, lkr,
863 memberanc = iteranc = cl.ancestors(revs, lkr,
863 inclusive=inclusive)
864 inclusive=inclusive)
864 # check if this linkrev is an ancestor of srcrev
865 # check if this linkrev is an ancestor of srcrev
865 if lkr not in memberanc:
866 if lkr not in memberanc:
866 if iteranc is None:
867 if iteranc is None:
867 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
868 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
868 fnode = self._filenode
869 fnode = self._filenode
869 path = self._path
870 path = self._path
870 for a in iteranc:
871 for a in iteranc:
871 ac = cl.read(a) # get changeset data (we avoid object creation)
872 ac = cl.read(a) # get changeset data (we avoid object creation)
872 if path in ac[3]: # checking the 'files' field.
873 if path in ac[3]: # checking the 'files' field.
873 # The file has been touched, check if the content is
874 # The file has been touched, check if the content is
874 # similar to the one we search for.
875 # similar to the one we search for.
875 if fnode == mfl[ac[0]].readfast().get(path):
876 if fnode == mfl[ac[0]].readfast().get(path):
876 return a
877 return a
877 # In theory, we should never get out of that loop without a result.
878 # In theory, we should never get out of that loop without a result.
878 # But if manifest uses a buggy file revision (not children of the
879 # But if manifest uses a buggy file revision (not children of the
879 # one it replaces) we could. Such a buggy situation will likely
880 # one it replaces) we could. Such a buggy situation will likely
880 # result is crash somewhere else at to some point.
881 # result is crash somewhere else at to some point.
881 return lkr
882 return lkr
882
883
883 def introrev(self):
884 def introrev(self):
884 """return the rev of the changeset which introduced this file revision
885 """return the rev of the changeset which introduced this file revision
885
886
886 This method is different from linkrev because it take into account the
887 This method is different from linkrev because it take into account the
887 changeset the filectx was created from. It ensures the returned
888 changeset the filectx was created from. It ensures the returned
888 revision is one of its ancestors. This prevents bugs from
889 revision is one of its ancestors. This prevents bugs from
889 'linkrev-shadowing' when a file revision is used by multiple
890 'linkrev-shadowing' when a file revision is used by multiple
890 changesets.
891 changesets.
891 """
892 """
892 lkr = self.linkrev()
893 lkr = self.linkrev()
893 attrs = vars(self)
894 attrs = vars(self)
894 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
895 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
895 if noctx or self.rev() == lkr:
896 if noctx or self.rev() == lkr:
896 return self.linkrev()
897 return self.linkrev()
897 return self._adjustlinkrev(self.rev(), inclusive=True)
898 return self._adjustlinkrev(self.rev(), inclusive=True)
898
899
899 def introfilectx(self):
900 def introfilectx(self):
900 """Return filectx having identical contents, but pointing to the
901 """Return filectx having identical contents, but pointing to the
901 changeset revision where this filectx was introduced"""
902 changeset revision where this filectx was introduced"""
902 introrev = self.introrev()
903 introrev = self.introrev()
903 if self.rev() == introrev:
904 if self.rev() == introrev:
904 return self
905 return self
905 return self.filectx(self.filenode(), changeid=introrev)
906 return self.filectx(self.filenode(), changeid=introrev)
906
907
907 def _parentfilectx(self, path, fileid, filelog):
908 def _parentfilectx(self, path, fileid, filelog):
908 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
911 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
911 # If self is associated with a changeset (probably explicitly
912 # If self is associated with a changeset (probably explicitly
912 # fed), ensure the created filectx is associated with a
913 # fed), ensure the created filectx is associated with a
913 # changeset that is an ancestor of self.changectx.
914 # changeset that is an ancestor of self.changectx.
914 # This lets us later use _adjustlinkrev to get a correct link.
915 # This lets us later use _adjustlinkrev to get a correct link.
915 fctx._descendantrev = self.rev()
916 fctx._descendantrev = self.rev()
916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 elif r'_descendantrev' in vars(self):
918 elif r'_descendantrev' in vars(self):
918 # Otherwise propagate _descendantrev if we have one associated.
919 # Otherwise propagate _descendantrev if we have one associated.
919 fctx._descendantrev = self._descendantrev
920 fctx._descendantrev = self._descendantrev
920 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 return fctx
922 return fctx
922
923
923 def parents(self):
924 def parents(self):
924 _path = self._path
925 _path = self._path
925 fl = self._filelog
926 fl = self._filelog
926 parents = self._filelog.parents(self._filenode)
927 parents = self._filelog.parents(self._filenode)
927 pl = [(_path, node, fl) for node in parents if node != nullid]
928 pl = [(_path, node, fl) for node in parents if node != nullid]
928
929
929 r = fl.renamed(self._filenode)
930 r = fl.renamed(self._filenode)
930 if r:
931 if r:
931 # - In the simple rename case, both parent are nullid, pl is empty.
932 # - In the simple rename case, both parent are nullid, pl is empty.
932 # - In case of merge, only one of the parent is null id and should
933 # - In case of merge, only one of the parent is null id and should
933 # be replaced with the rename information. This parent is -always-
934 # be replaced with the rename information. This parent is -always-
934 # the first one.
935 # the first one.
935 #
936 #
936 # As null id have always been filtered out in the previous list
937 # As null id have always been filtered out in the previous list
937 # comprehension, inserting to 0 will always result in "replacing
938 # comprehension, inserting to 0 will always result in "replacing
938 # first nullid parent with rename information.
939 # first nullid parent with rename information.
939 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940
941
941 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942
943
943 def p1(self):
944 def p1(self):
944 return self.parents()[0]
945 return self.parents()[0]
945
946
946 def p2(self):
947 def p2(self):
947 p = self.parents()
948 p = self.parents()
948 if len(p) == 2:
949 if len(p) == 2:
949 return p[1]
950 return p[1]
950 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951
952
952 def annotate(self, follow=False, skiprevs=None, diffopts=None):
953 def annotate(self, follow=False, skiprevs=None, diffopts=None):
953 """Returns a list of annotateline objects for each line in the file
954 """Returns a list of annotateline objects for each line in the file
954
955
955 - line.fctx is the filectx of the node where that line was last changed
956 - line.fctx is the filectx of the node where that line was last changed
956 - line.lineno is the line number at the first appearance in the managed
957 - line.lineno is the line number at the first appearance in the managed
957 file
958 file
958 - line.text is the data on that line (including newline character)
959 - line.text is the data on that line (including newline character)
959 """
960 """
960 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
961 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
961
962
962 def parents(f):
963 def parents(f):
963 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
964 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
964 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
965 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
965 # from the topmost introrev (= srcrev) down to p.linkrev() if it
966 # from the topmost introrev (= srcrev) down to p.linkrev() if it
966 # isn't an ancestor of the srcrev.
967 # isn't an ancestor of the srcrev.
967 f._changeid
968 f._changeid
968 pl = f.parents()
969 pl = f.parents()
969
970
970 # Don't return renamed parents if we aren't following.
971 # Don't return renamed parents if we aren't following.
971 if not follow:
972 if not follow:
972 pl = [p for p in pl if p.path() == f.path()]
973 pl = [p for p in pl if p.path() == f.path()]
973
974
974 # renamed filectx won't have a filelog yet, so set it
975 # renamed filectx won't have a filelog yet, so set it
975 # from the cache to save time
976 # from the cache to save time
976 for p in pl:
977 for p in pl:
977 if not r'_filelog' in p.__dict__:
978 if not r'_filelog' in p.__dict__:
978 p._filelog = getlog(p.path())
979 p._filelog = getlog(p.path())
979
980
980 return pl
981 return pl
981
982
982 # use linkrev to find the first changeset where self appeared
983 # use linkrev to find the first changeset where self appeared
983 base = self.introfilectx()
984 base = self.introfilectx()
984 if getattr(base, '_ancestrycontext', None) is None:
985 if getattr(base, '_ancestrycontext', None) is None:
985 cl = self._repo.changelog
986 cl = self._repo.changelog
986 if base.rev() is None:
987 if base.rev() is None:
987 # wctx is not inclusive, but works because _ancestrycontext
988 # wctx is not inclusive, but works because _ancestrycontext
988 # is used to test filelog revisions
989 # is used to test filelog revisions
989 ac = cl.ancestors([p.rev() for p in base.parents()],
990 ac = cl.ancestors([p.rev() for p in base.parents()],
990 inclusive=True)
991 inclusive=True)
991 else:
992 else:
992 ac = cl.ancestors([base.rev()], inclusive=True)
993 ac = cl.ancestors([base.rev()], inclusive=True)
993 base._ancestrycontext = ac
994 base._ancestrycontext = ac
994
995
995 return dagop.annotate(base, parents, skiprevs=skiprevs,
996 return dagop.annotate(base, parents, skiprevs=skiprevs,
996 diffopts=diffopts)
997 diffopts=diffopts)
997
998
998 def ancestors(self, followfirst=False):
999 def ancestors(self, followfirst=False):
999 visit = {}
1000 visit = {}
1000 c = self
1001 c = self
1001 if followfirst:
1002 if followfirst:
1002 cut = 1
1003 cut = 1
1003 else:
1004 else:
1004 cut = None
1005 cut = None
1005
1006
1006 while True:
1007 while True:
1007 for parent in c.parents()[:cut]:
1008 for parent in c.parents()[:cut]:
1008 visit[(parent.linkrev(), parent.filenode())] = parent
1009 visit[(parent.linkrev(), parent.filenode())] = parent
1009 if not visit:
1010 if not visit:
1010 break
1011 break
1011 c = visit.pop(max(visit))
1012 c = visit.pop(max(visit))
1012 yield c
1013 yield c
1013
1014
1014 def decodeddata(self):
1015 def decodeddata(self):
1015 """Returns `data()` after running repository decoding filters.
1016 """Returns `data()` after running repository decoding filters.
1016
1017
1017 This is often equivalent to how the data would be expressed on disk.
1018 This is often equivalent to how the data would be expressed on disk.
1018 """
1019 """
1019 return self._repo.wwritedata(self.path(), self.data())
1020 return self._repo.wwritedata(self.path(), self.data())
1020
1021
1021 class filectx(basefilectx):
1022 class filectx(basefilectx):
1022 """A filecontext object makes access to data related to a particular
1023 """A filecontext object makes access to data related to a particular
1023 filerevision convenient."""
1024 filerevision convenient."""
1024 def __init__(self, repo, path, changeid=None, fileid=None,
1025 def __init__(self, repo, path, changeid=None, fileid=None,
1025 filelog=None, changectx=None):
1026 filelog=None, changectx=None):
1026 """changeid can be a changeset revision, node, or tag.
1027 """changeid can be a changeset revision, node, or tag.
1027 fileid can be a file revision or node."""
1028 fileid can be a file revision or node."""
1028 self._repo = repo
1029 self._repo = repo
1029 self._path = path
1030 self._path = path
1030
1031
1031 assert (changeid is not None
1032 assert (changeid is not None
1032 or fileid is not None
1033 or fileid is not None
1033 or changectx is not None), \
1034 or changectx is not None), \
1034 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1035 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1035 % (changeid, fileid, changectx))
1036 % (changeid, fileid, changectx))
1036
1037
1037 if filelog is not None:
1038 if filelog is not None:
1038 self._filelog = filelog
1039 self._filelog = filelog
1039
1040
1040 if changeid is not None:
1041 if changeid is not None:
1041 self._changeid = changeid
1042 self._changeid = changeid
1042 if changectx is not None:
1043 if changectx is not None:
1043 self._changectx = changectx
1044 self._changectx = changectx
1044 if fileid is not None:
1045 if fileid is not None:
1045 self._fileid = fileid
1046 self._fileid = fileid
1046
1047
1047 @propertycache
1048 @propertycache
1048 def _changectx(self):
1049 def _changectx(self):
1049 try:
1050 try:
1050 return changectx(self._repo, self._changeid)
1051 return changectx(self._repo, self._changeid)
1051 except error.FilteredRepoLookupError:
1052 except error.FilteredRepoLookupError:
1052 # Linkrev may point to any revision in the repository. When the
1053 # Linkrev may point to any revision in the repository. When the
1053 # repository is filtered this may lead to `filectx` trying to build
1054 # repository is filtered this may lead to `filectx` trying to build
1054 # `changectx` for filtered revision. In such case we fallback to
1055 # `changectx` for filtered revision. In such case we fallback to
1055 # creating `changectx` on the unfiltered version of the reposition.
1056 # creating `changectx` on the unfiltered version of the reposition.
1056 # This fallback should not be an issue because `changectx` from
1057 # This fallback should not be an issue because `changectx` from
1057 # `filectx` are not used in complex operations that care about
1058 # `filectx` are not used in complex operations that care about
1058 # filtering.
1059 # filtering.
1059 #
1060 #
1060 # This fallback is a cheap and dirty fix that prevent several
1061 # This fallback is a cheap and dirty fix that prevent several
1061 # crashes. It does not ensure the behavior is correct. However the
1062 # crashes. It does not ensure the behavior is correct. However the
1062 # behavior was not correct before filtering either and "incorrect
1063 # behavior was not correct before filtering either and "incorrect
1063 # behavior" is seen as better as "crash"
1064 # behavior" is seen as better as "crash"
1064 #
1065 #
1065 # Linkrevs have several serious troubles with filtering that are
1066 # Linkrevs have several serious troubles with filtering that are
1066 # complicated to solve. Proper handling of the issue here should be
1067 # complicated to solve. Proper handling of the issue here should be
1067 # considered when solving linkrev issue are on the table.
1068 # considered when solving linkrev issue are on the table.
1068 return changectx(self._repo.unfiltered(), self._changeid)
1069 return changectx(self._repo.unfiltered(), self._changeid)
1069
1070
1070 def filectx(self, fileid, changeid=None):
1071 def filectx(self, fileid, changeid=None):
1071 '''opens an arbitrary revision of the file without
1072 '''opens an arbitrary revision of the file without
1072 opening a new filelog'''
1073 opening a new filelog'''
1073 return filectx(self._repo, self._path, fileid=fileid,
1074 return filectx(self._repo, self._path, fileid=fileid,
1074 filelog=self._filelog, changeid=changeid)
1075 filelog=self._filelog, changeid=changeid)
1075
1076
1076 def rawdata(self):
1077 def rawdata(self):
1077 return self._filelog.revision(self._filenode, raw=True)
1078 return self._filelog.revision(self._filenode, raw=True)
1078
1079
1079 def rawflags(self):
1080 def rawflags(self):
1080 """low-level revlog flags"""
1081 """low-level revlog flags"""
1081 return self._filelog.flags(self._filerev)
1082 return self._filelog.flags(self._filerev)
1082
1083
1083 def data(self):
1084 def data(self):
1084 try:
1085 try:
1085 return self._filelog.read(self._filenode)
1086 return self._filelog.read(self._filenode)
1086 except error.CensoredNodeError:
1087 except error.CensoredNodeError:
1087 if self._repo.ui.config("censor", "policy") == "ignore":
1088 if self._repo.ui.config("censor", "policy") == "ignore":
1088 return ""
1089 return ""
1089 raise error.Abort(_("censored node: %s") % short(self._filenode),
1090 raise error.Abort(_("censored node: %s") % short(self._filenode),
1090 hint=_("set censor.policy to ignore errors"))
1091 hint=_("set censor.policy to ignore errors"))
1091
1092
1092 def size(self):
1093 def size(self):
1093 return self._filelog.size(self._filerev)
1094 return self._filelog.size(self._filerev)
1094
1095
1095 @propertycache
1096 @propertycache
1096 def _copied(self):
1097 def _copied(self):
1097 """check if file was actually renamed in this changeset revision
1098 """check if file was actually renamed in this changeset revision
1098
1099
1099 If rename logged in file revision, we report copy for changeset only
1100 If rename logged in file revision, we report copy for changeset only
1100 if file revisions linkrev points back to the changeset in question
1101 if file revisions linkrev points back to the changeset in question
1101 or both changeset parents contain different file revisions.
1102 or both changeset parents contain different file revisions.
1102 """
1103 """
1103
1104
1104 renamed = self._filelog.renamed(self._filenode)
1105 renamed = self._filelog.renamed(self._filenode)
1105 if not renamed:
1106 if not renamed:
1106 return renamed
1107 return renamed
1107
1108
1108 if self.rev() == self.linkrev():
1109 if self.rev() == self.linkrev():
1109 return renamed
1110 return renamed
1110
1111
1111 name = self.path()
1112 name = self.path()
1112 fnode = self._filenode
1113 fnode = self._filenode
1113 for p in self._changectx.parents():
1114 for p in self._changectx.parents():
1114 try:
1115 try:
1115 if fnode == p.filenode(name):
1116 if fnode == p.filenode(name):
1116 return None
1117 return None
1117 except error.LookupError:
1118 except error.LookupError:
1118 pass
1119 pass
1119 return renamed
1120 return renamed
1120
1121
1121 def children(self):
1122 def children(self):
1122 # hard for renames
1123 # hard for renames
1123 c = self._filelog.children(self._filenode)
1124 c = self._filelog.children(self._filenode)
1124 return [filectx(self._repo, self._path, fileid=x,
1125 return [filectx(self._repo, self._path, fileid=x,
1125 filelog=self._filelog) for x in c]
1126 filelog=self._filelog) for x in c]
1126
1127
1127 class committablectx(basectx):
1128 class committablectx(basectx):
1128 """A committablectx object provides common functionality for a context that
1129 """A committablectx object provides common functionality for a context that
1129 wants the ability to commit, e.g. workingctx or memctx."""
1130 wants the ability to commit, e.g. workingctx or memctx."""
1130 def __init__(self, repo, text="", user=None, date=None, extra=None,
1131 def __init__(self, repo, text="", user=None, date=None, extra=None,
1131 changes=None):
1132 changes=None):
1132 super(committablectx, self).__init__(repo)
1133 super(committablectx, self).__init__(repo)
1133 self._rev = None
1134 self._rev = None
1134 self._node = None
1135 self._node = None
1135 self._text = text
1136 self._text = text
1136 if date:
1137 if date:
1137 self._date = dateutil.parsedate(date)
1138 self._date = dateutil.parsedate(date)
1138 if user:
1139 if user:
1139 self._user = user
1140 self._user = user
1140 if changes:
1141 if changes:
1141 self._status = changes
1142 self._status = changes
1142
1143
1143 self._extra = {}
1144 self._extra = {}
1144 if extra:
1145 if extra:
1145 self._extra = extra.copy()
1146 self._extra = extra.copy()
1146 if 'branch' not in self._extra:
1147 if 'branch' not in self._extra:
1147 try:
1148 try:
1148 branch = encoding.fromlocal(self._repo.dirstate.branch())
1149 branch = encoding.fromlocal(self._repo.dirstate.branch())
1149 except UnicodeDecodeError:
1150 except UnicodeDecodeError:
1150 raise error.Abort(_('branch name not in UTF-8!'))
1151 raise error.Abort(_('branch name not in UTF-8!'))
1151 self._extra['branch'] = branch
1152 self._extra['branch'] = branch
1152 if self._extra['branch'] == '':
1153 if self._extra['branch'] == '':
1153 self._extra['branch'] = 'default'
1154 self._extra['branch'] = 'default'
1154
1155
1155 def __bytes__(self):
1156 def __bytes__(self):
1156 return bytes(self._parents[0]) + "+"
1157 return bytes(self._parents[0]) + "+"
1157
1158
1158 __str__ = encoding.strmethod(__bytes__)
1159 __str__ = encoding.strmethod(__bytes__)
1159
1160
1160 def __nonzero__(self):
1161 def __nonzero__(self):
1161 return True
1162 return True
1162
1163
1163 __bool__ = __nonzero__
1164 __bool__ = __nonzero__
1164
1165
1165 def _buildflagfunc(self):
1166 def _buildflagfunc(self):
1166 # Create a fallback function for getting file flags when the
1167 # Create a fallback function for getting file flags when the
1167 # filesystem doesn't support them
1168 # filesystem doesn't support them
1168
1169
1169 copiesget = self._repo.dirstate.copies().get
1170 copiesget = self._repo.dirstate.copies().get
1170 parents = self.parents()
1171 parents = self.parents()
1171 if len(parents) < 2:
1172 if len(parents) < 2:
1172 # when we have one parent, it's easy: copy from parent
1173 # when we have one parent, it's easy: copy from parent
1173 man = parents[0].manifest()
1174 man = parents[0].manifest()
1174 def func(f):
1175 def func(f):
1175 f = copiesget(f, f)
1176 f = copiesget(f, f)
1176 return man.flags(f)
1177 return man.flags(f)
1177 else:
1178 else:
1178 # merges are tricky: we try to reconstruct the unstored
1179 # merges are tricky: we try to reconstruct the unstored
1179 # result from the merge (issue1802)
1180 # result from the merge (issue1802)
1180 p1, p2 = parents
1181 p1, p2 = parents
1181 pa = p1.ancestor(p2)
1182 pa = p1.ancestor(p2)
1182 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1183 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1183
1184
1184 def func(f):
1185 def func(f):
1185 f = copiesget(f, f) # may be wrong for merges with copies
1186 f = copiesget(f, f) # may be wrong for merges with copies
1186 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1187 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1187 if fl1 == fl2:
1188 if fl1 == fl2:
1188 return fl1
1189 return fl1
1189 if fl1 == fla:
1190 if fl1 == fla:
1190 return fl2
1191 return fl2
1191 if fl2 == fla:
1192 if fl2 == fla:
1192 return fl1
1193 return fl1
1193 return '' # punt for conflicts
1194 return '' # punt for conflicts
1194
1195
1195 return func
1196 return func
1196
1197
1197 @propertycache
1198 @propertycache
1198 def _flagfunc(self):
1199 def _flagfunc(self):
1199 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1200 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1200
1201
1201 @propertycache
1202 @propertycache
1202 def _status(self):
1203 def _status(self):
1203 return self._repo.status()
1204 return self._repo.status()
1204
1205
1205 @propertycache
1206 @propertycache
1206 def _user(self):
1207 def _user(self):
1207 return self._repo.ui.username()
1208 return self._repo.ui.username()
1208
1209
1209 @propertycache
1210 @propertycache
1210 def _date(self):
1211 def _date(self):
1211 ui = self._repo.ui
1212 ui = self._repo.ui
1212 date = ui.configdate('devel', 'default-date')
1213 date = ui.configdate('devel', 'default-date')
1213 if date is None:
1214 if date is None:
1214 date = dateutil.makedate()
1215 date = dateutil.makedate()
1215 return date
1216 return date
1216
1217
1217 def subrev(self, subpath):
1218 def subrev(self, subpath):
1218 return None
1219 return None
1219
1220
1220 def manifestnode(self):
1221 def manifestnode(self):
1221 return None
1222 return None
1222 def user(self):
1223 def user(self):
1223 return self._user or self._repo.ui.username()
1224 return self._user or self._repo.ui.username()
1224 def date(self):
1225 def date(self):
1225 return self._date
1226 return self._date
1226 def description(self):
1227 def description(self):
1227 return self._text
1228 return self._text
1228 def files(self):
1229 def files(self):
1229 return sorted(self._status.modified + self._status.added +
1230 return sorted(self._status.modified + self._status.added +
1230 self._status.removed)
1231 self._status.removed)
1231
1232
1232 def modified(self):
1233 def modified(self):
1233 return self._status.modified
1234 return self._status.modified
1234 def added(self):
1235 def added(self):
1235 return self._status.added
1236 return self._status.added
1236 def removed(self):
1237 def removed(self):
1237 return self._status.removed
1238 return self._status.removed
1238 def deleted(self):
1239 def deleted(self):
1239 return self._status.deleted
1240 return self._status.deleted
1240 def branch(self):
1241 def branch(self):
1241 return encoding.tolocal(self._extra['branch'])
1242 return encoding.tolocal(self._extra['branch'])
1242 def closesbranch(self):
1243 def closesbranch(self):
1243 return 'close' in self._extra
1244 return 'close' in self._extra
1244 def extra(self):
1245 def extra(self):
1245 return self._extra
1246 return self._extra
1246
1247
1247 def isinmemory(self):
1248 def isinmemory(self):
1248 return False
1249 return False
1249
1250
1250 def tags(self):
1251 def tags(self):
1251 return []
1252 return []
1252
1253
1253 def bookmarks(self):
1254 def bookmarks(self):
1254 b = []
1255 b = []
1255 for p in self.parents():
1256 for p in self.parents():
1256 b.extend(p.bookmarks())
1257 b.extend(p.bookmarks())
1257 return b
1258 return b
1258
1259
1259 def phase(self):
1260 def phase(self):
1260 phase = phases.draft # default phase to draft
1261 phase = phases.draft # default phase to draft
1261 for p in self.parents():
1262 for p in self.parents():
1262 phase = max(phase, p.phase())
1263 phase = max(phase, p.phase())
1263 return phase
1264 return phase
1264
1265
1265 def hidden(self):
1266 def hidden(self):
1266 return False
1267 return False
1267
1268
1268 def children(self):
1269 def children(self):
1269 return []
1270 return []
1270
1271
1271 def flags(self, path):
1272 def flags(self, path):
1272 if r'_manifest' in self.__dict__:
1273 if r'_manifest' in self.__dict__:
1273 try:
1274 try:
1274 return self._manifest.flags(path)
1275 return self._manifest.flags(path)
1275 except KeyError:
1276 except KeyError:
1276 return ''
1277 return ''
1277
1278
1278 try:
1279 try:
1279 return self._flagfunc(path)
1280 return self._flagfunc(path)
1280 except OSError:
1281 except OSError:
1281 return ''
1282 return ''
1282
1283
1283 def ancestor(self, c2):
1284 def ancestor(self, c2):
1284 """return the "best" ancestor context of self and c2"""
1285 """return the "best" ancestor context of self and c2"""
1285 return self._parents[0].ancestor(c2) # punt on two parents for now
1286 return self._parents[0].ancestor(c2) # punt on two parents for now
1286
1287
1287 def walk(self, match):
1288 def walk(self, match):
1288 '''Generates matching file names.'''
1289 '''Generates matching file names.'''
1289 return sorted(self._repo.dirstate.walk(match,
1290 return sorted(self._repo.dirstate.walk(match,
1290 subrepos=sorted(self.substate),
1291 subrepos=sorted(self.substate),
1291 unknown=True, ignored=False))
1292 unknown=True, ignored=False))
1292
1293
1293 def matches(self, match):
1294 def matches(self, match):
1294 return sorted(self._repo.dirstate.matches(match))
1295 return sorted(self._repo.dirstate.matches(match))
1295
1296
1296 def ancestors(self):
1297 def ancestors(self):
1297 for p in self._parents:
1298 for p in self._parents:
1298 yield p
1299 yield p
1299 for a in self._repo.changelog.ancestors(
1300 for a in self._repo.changelog.ancestors(
1300 [p.rev() for p in self._parents]):
1301 [p.rev() for p in self._parents]):
1301 yield changectx(self._repo, a)
1302 yield changectx(self._repo, a)
1302
1303
1303 def markcommitted(self, node):
1304 def markcommitted(self, node):
1304 """Perform post-commit cleanup necessary after committing this ctx
1305 """Perform post-commit cleanup necessary after committing this ctx
1305
1306
1306 Specifically, this updates backing stores this working context
1307 Specifically, this updates backing stores this working context
1307 wraps to reflect the fact that the changes reflected by this
1308 wraps to reflect the fact that the changes reflected by this
1308 workingctx have been committed. For example, it marks
1309 workingctx have been committed. For example, it marks
1309 modified and added files as normal in the dirstate.
1310 modified and added files as normal in the dirstate.
1310
1311
1311 """
1312 """
1312
1313
1313 with self._repo.dirstate.parentchange():
1314 with self._repo.dirstate.parentchange():
1314 for f in self.modified() + self.added():
1315 for f in self.modified() + self.added():
1315 self._repo.dirstate.normal(f)
1316 self._repo.dirstate.normal(f)
1316 for f in self.removed():
1317 for f in self.removed():
1317 self._repo.dirstate.drop(f)
1318 self._repo.dirstate.drop(f)
1318 self._repo.dirstate.setparents(node)
1319 self._repo.dirstate.setparents(node)
1319
1320
1320 # write changes out explicitly, because nesting wlock at
1321 # write changes out explicitly, because nesting wlock at
1321 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1322 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1322 # from immediately doing so for subsequent changing files
1323 # from immediately doing so for subsequent changing files
1323 self._repo.dirstate.write(self._repo.currenttransaction())
1324 self._repo.dirstate.write(self._repo.currenttransaction())
1324
1325
1325 def dirty(self, missing=False, merge=True, branch=True):
1326 def dirty(self, missing=False, merge=True, branch=True):
1326 return False
1327 return False
1327
1328
1328 class workingctx(committablectx):
1329 class workingctx(committablectx):
1329 """A workingctx object makes access to data related to
1330 """A workingctx object makes access to data related to
1330 the current working directory convenient.
1331 the current working directory convenient.
1331 date - any valid date string or (unixtime, offset), or None.
1332 date - any valid date string or (unixtime, offset), or None.
1332 user - username string, or None.
1333 user - username string, or None.
1333 extra - a dictionary of extra values, or None.
1334 extra - a dictionary of extra values, or None.
1334 changes - a list of file lists as returned by localrepo.status()
1335 changes - a list of file lists as returned by localrepo.status()
1335 or None to use the repository status.
1336 or None to use the repository status.
1336 """
1337 """
1337 def __init__(self, repo, text="", user=None, date=None, extra=None,
1338 def __init__(self, repo, text="", user=None, date=None, extra=None,
1338 changes=None):
1339 changes=None):
1339 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1340 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1340
1341
1341 def __iter__(self):
1342 def __iter__(self):
1342 d = self._repo.dirstate
1343 d = self._repo.dirstate
1343 for f in d:
1344 for f in d:
1344 if d[f] != 'r':
1345 if d[f] != 'r':
1345 yield f
1346 yield f
1346
1347
1347 def __contains__(self, key):
1348 def __contains__(self, key):
1348 return self._repo.dirstate[key] not in "?r"
1349 return self._repo.dirstate[key] not in "?r"
1349
1350
1350 def hex(self):
1351 def hex(self):
1351 return hex(wdirid)
1352 return hex(wdirid)
1352
1353
1353 @propertycache
1354 @propertycache
1354 def _parents(self):
1355 def _parents(self):
1355 p = self._repo.dirstate.parents()
1356 p = self._repo.dirstate.parents()
1356 if p[1] == nullid:
1357 if p[1] == nullid:
1357 p = p[:-1]
1358 p = p[:-1]
1358 return [changectx(self._repo, x) for x in p]
1359 return [changectx(self._repo, x) for x in p]
1359
1360
1360 def _fileinfo(self, path):
1361 def _fileinfo(self, path):
1361 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1362 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1362 self._manifest
1363 self._manifest
1363 return super(workingctx, self)._fileinfo(path)
1364 return super(workingctx, self)._fileinfo(path)
1364
1365
1365 def filectx(self, path, filelog=None):
1366 def filectx(self, path, filelog=None):
1366 """get a file context from the working directory"""
1367 """get a file context from the working directory"""
1367 return workingfilectx(self._repo, path, workingctx=self,
1368 return workingfilectx(self._repo, path, workingctx=self,
1368 filelog=filelog)
1369 filelog=filelog)
1369
1370
1370 def dirty(self, missing=False, merge=True, branch=True):
1371 def dirty(self, missing=False, merge=True, branch=True):
1371 "check whether a working directory is modified"
1372 "check whether a working directory is modified"
1372 # check subrepos first
1373 # check subrepos first
1373 for s in sorted(self.substate):
1374 for s in sorted(self.substate):
1374 if self.sub(s).dirty(missing=missing):
1375 if self.sub(s).dirty(missing=missing):
1375 return True
1376 return True
1376 # check current working dir
1377 # check current working dir
1377 return ((merge and self.p2()) or
1378 return ((merge and self.p2()) or
1378 (branch and self.branch() != self.p1().branch()) or
1379 (branch and self.branch() != self.p1().branch()) or
1379 self.modified() or self.added() or self.removed() or
1380 self.modified() or self.added() or self.removed() or
1380 (missing and self.deleted()))
1381 (missing and self.deleted()))
1381
1382
1382 def add(self, list, prefix=""):
1383 def add(self, list, prefix=""):
1383 with self._repo.wlock():
1384 with self._repo.wlock():
1384 ui, ds = self._repo.ui, self._repo.dirstate
1385 ui, ds = self._repo.ui, self._repo.dirstate
1385 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1386 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1386 rejected = []
1387 rejected = []
1387 lstat = self._repo.wvfs.lstat
1388 lstat = self._repo.wvfs.lstat
1388 for f in list:
1389 for f in list:
1389 # ds.pathto() returns an absolute file when this is invoked from
1390 # ds.pathto() returns an absolute file when this is invoked from
1390 # the keyword extension. That gets flagged as non-portable on
1391 # the keyword extension. That gets flagged as non-portable on
1391 # Windows, since it contains the drive letter and colon.
1392 # Windows, since it contains the drive letter and colon.
1392 scmutil.checkportable(ui, os.path.join(prefix, f))
1393 scmutil.checkportable(ui, os.path.join(prefix, f))
1393 try:
1394 try:
1394 st = lstat(f)
1395 st = lstat(f)
1395 except OSError:
1396 except OSError:
1396 ui.warn(_("%s does not exist!\n") % uipath(f))
1397 ui.warn(_("%s does not exist!\n") % uipath(f))
1397 rejected.append(f)
1398 rejected.append(f)
1398 continue
1399 continue
1399 if st.st_size > 10000000:
1400 if st.st_size > 10000000:
1400 ui.warn(_("%s: up to %d MB of RAM may be required "
1401 ui.warn(_("%s: up to %d MB of RAM may be required "
1401 "to manage this file\n"
1402 "to manage this file\n"
1402 "(use 'hg revert %s' to cancel the "
1403 "(use 'hg revert %s' to cancel the "
1403 "pending addition)\n")
1404 "pending addition)\n")
1404 % (f, 3 * st.st_size // 1000000, uipath(f)))
1405 % (f, 3 * st.st_size // 1000000, uipath(f)))
1405 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1406 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1406 ui.warn(_("%s not added: only files and symlinks "
1407 ui.warn(_("%s not added: only files and symlinks "
1407 "supported currently\n") % uipath(f))
1408 "supported currently\n") % uipath(f))
1408 rejected.append(f)
1409 rejected.append(f)
1409 elif ds[f] in 'amn':
1410 elif ds[f] in 'amn':
1410 ui.warn(_("%s already tracked!\n") % uipath(f))
1411 ui.warn(_("%s already tracked!\n") % uipath(f))
1411 elif ds[f] == 'r':
1412 elif ds[f] == 'r':
1412 ds.normallookup(f)
1413 ds.normallookup(f)
1413 else:
1414 else:
1414 ds.add(f)
1415 ds.add(f)
1415 return rejected
1416 return rejected
1416
1417
1417 def forget(self, files, prefix=""):
1418 def forget(self, files, prefix=""):
1418 with self._repo.wlock():
1419 with self._repo.wlock():
1419 ds = self._repo.dirstate
1420 ds = self._repo.dirstate
1420 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1421 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1421 rejected = []
1422 rejected = []
1422 for f in files:
1423 for f in files:
1423 if f not in self._repo.dirstate:
1424 if f not in self._repo.dirstate:
1424 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1425 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1425 rejected.append(f)
1426 rejected.append(f)
1426 elif self._repo.dirstate[f] != 'a':
1427 elif self._repo.dirstate[f] != 'a':
1427 self._repo.dirstate.remove(f)
1428 self._repo.dirstate.remove(f)
1428 else:
1429 else:
1429 self._repo.dirstate.drop(f)
1430 self._repo.dirstate.drop(f)
1430 return rejected
1431 return rejected
1431
1432
1432 def undelete(self, list):
1433 def undelete(self, list):
1433 pctxs = self.parents()
1434 pctxs = self.parents()
1434 with self._repo.wlock():
1435 with self._repo.wlock():
1435 ds = self._repo.dirstate
1436 ds = self._repo.dirstate
1436 for f in list:
1437 for f in list:
1437 if self._repo.dirstate[f] != 'r':
1438 if self._repo.dirstate[f] != 'r':
1438 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1439 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1439 else:
1440 else:
1440 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1441 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1441 t = fctx.data()
1442 t = fctx.data()
1442 self._repo.wwrite(f, t, fctx.flags())
1443 self._repo.wwrite(f, t, fctx.flags())
1443 self._repo.dirstate.normal(f)
1444 self._repo.dirstate.normal(f)
1444
1445
1445 def copy(self, source, dest):
1446 def copy(self, source, dest):
1446 try:
1447 try:
1447 st = self._repo.wvfs.lstat(dest)
1448 st = self._repo.wvfs.lstat(dest)
1448 except OSError as err:
1449 except OSError as err:
1449 if err.errno != errno.ENOENT:
1450 if err.errno != errno.ENOENT:
1450 raise
1451 raise
1451 self._repo.ui.warn(_("%s does not exist!\n")
1452 self._repo.ui.warn(_("%s does not exist!\n")
1452 % self._repo.dirstate.pathto(dest))
1453 % self._repo.dirstate.pathto(dest))
1453 return
1454 return
1454 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1456 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1456 "symbolic link\n")
1457 "symbolic link\n")
1457 % self._repo.dirstate.pathto(dest))
1458 % self._repo.dirstate.pathto(dest))
1458 else:
1459 else:
1459 with self._repo.wlock():
1460 with self._repo.wlock():
1460 if self._repo.dirstate[dest] in '?':
1461 if self._repo.dirstate[dest] in '?':
1461 self._repo.dirstate.add(dest)
1462 self._repo.dirstate.add(dest)
1462 elif self._repo.dirstate[dest] in 'r':
1463 elif self._repo.dirstate[dest] in 'r':
1463 self._repo.dirstate.normallookup(dest)
1464 self._repo.dirstate.normallookup(dest)
1464 self._repo.dirstate.copy(source, dest)
1465 self._repo.dirstate.copy(source, dest)
1465
1466
1466 def match(self, pats=None, include=None, exclude=None, default='glob',
1467 def match(self, pats=None, include=None, exclude=None, default='glob',
1467 listsubrepos=False, badfn=None):
1468 listsubrepos=False, badfn=None):
1468 r = self._repo
1469 r = self._repo
1469
1470
1470 # Only a case insensitive filesystem needs magic to translate user input
1471 # Only a case insensitive filesystem needs magic to translate user input
1471 # to actual case in the filesystem.
1472 # to actual case in the filesystem.
1472 icasefs = not util.fscasesensitive(r.root)
1473 icasefs = not util.fscasesensitive(r.root)
1473 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1474 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1474 default, auditor=r.auditor, ctx=self,
1475 default, auditor=r.auditor, ctx=self,
1475 listsubrepos=listsubrepos, badfn=badfn,
1476 listsubrepos=listsubrepos, badfn=badfn,
1476 icasefs=icasefs)
1477 icasefs=icasefs)
1477
1478
1478 def _filtersuspectsymlink(self, files):
1479 def _filtersuspectsymlink(self, files):
1479 if not files or self._repo.dirstate._checklink:
1480 if not files or self._repo.dirstate._checklink:
1480 return files
1481 return files
1481
1482
1482 # Symlink placeholders may get non-symlink-like contents
1483 # Symlink placeholders may get non-symlink-like contents
1483 # via user error or dereferencing by NFS or Samba servers,
1484 # via user error or dereferencing by NFS or Samba servers,
1484 # so we filter out any placeholders that don't look like a
1485 # so we filter out any placeholders that don't look like a
1485 # symlink
1486 # symlink
1486 sane = []
1487 sane = []
1487 for f in files:
1488 for f in files:
1488 if self.flags(f) == 'l':
1489 if self.flags(f) == 'l':
1489 d = self[f].data()
1490 d = self[f].data()
1490 if (d == '' or len(d) >= 1024 or '\n' in d
1491 if (d == '' or len(d) >= 1024 or '\n' in d
1491 or stringutil.binary(d)):
1492 or stringutil.binary(d)):
1492 self._repo.ui.debug('ignoring suspect symlink placeholder'
1493 self._repo.ui.debug('ignoring suspect symlink placeholder'
1493 ' "%s"\n' % f)
1494 ' "%s"\n' % f)
1494 continue
1495 continue
1495 sane.append(f)
1496 sane.append(f)
1496 return sane
1497 return sane
1497
1498
1498 def _checklookup(self, files):
1499 def _checklookup(self, files):
1499 # check for any possibly clean files
1500 # check for any possibly clean files
1500 if not files:
1501 if not files:
1501 return [], [], []
1502 return [], [], []
1502
1503
1503 modified = []
1504 modified = []
1504 deleted = []
1505 deleted = []
1505 fixup = []
1506 fixup = []
1506 pctx = self._parents[0]
1507 pctx = self._parents[0]
1507 # do a full compare of any files that might have changed
1508 # do a full compare of any files that might have changed
1508 for f in sorted(files):
1509 for f in sorted(files):
1509 try:
1510 try:
1510 # This will return True for a file that got replaced by a
1511 # This will return True for a file that got replaced by a
1511 # directory in the interim, but fixing that is pretty hard.
1512 # directory in the interim, but fixing that is pretty hard.
1512 if (f not in pctx or self.flags(f) != pctx.flags(f)
1513 if (f not in pctx or self.flags(f) != pctx.flags(f)
1513 or pctx[f].cmp(self[f])):
1514 or pctx[f].cmp(self[f])):
1514 modified.append(f)
1515 modified.append(f)
1515 else:
1516 else:
1516 fixup.append(f)
1517 fixup.append(f)
1517 except (IOError, OSError):
1518 except (IOError, OSError):
1518 # A file become inaccessible in between? Mark it as deleted,
1519 # A file become inaccessible in between? Mark it as deleted,
1519 # matching dirstate behavior (issue5584).
1520 # matching dirstate behavior (issue5584).
1520 # The dirstate has more complex behavior around whether a
1521 # The dirstate has more complex behavior around whether a
1521 # missing file matches a directory, etc, but we don't need to
1522 # missing file matches a directory, etc, but we don't need to
1522 # bother with that: if f has made it to this point, we're sure
1523 # bother with that: if f has made it to this point, we're sure
1523 # it's in the dirstate.
1524 # it's in the dirstate.
1524 deleted.append(f)
1525 deleted.append(f)
1525
1526
1526 return modified, deleted, fixup
1527 return modified, deleted, fixup
1527
1528
1528 def _poststatusfixup(self, status, fixup):
1529 def _poststatusfixup(self, status, fixup):
1529 """update dirstate for files that are actually clean"""
1530 """update dirstate for files that are actually clean"""
1530 poststatus = self._repo.postdsstatus()
1531 poststatus = self._repo.postdsstatus()
1531 if fixup or poststatus:
1532 if fixup or poststatus:
1532 try:
1533 try:
1533 oldid = self._repo.dirstate.identity()
1534 oldid = self._repo.dirstate.identity()
1534
1535
1535 # updating the dirstate is optional
1536 # updating the dirstate is optional
1536 # so we don't wait on the lock
1537 # so we don't wait on the lock
1537 # wlock can invalidate the dirstate, so cache normal _after_
1538 # wlock can invalidate the dirstate, so cache normal _after_
1538 # taking the lock
1539 # taking the lock
1539 with self._repo.wlock(False):
1540 with self._repo.wlock(False):
1540 if self._repo.dirstate.identity() == oldid:
1541 if self._repo.dirstate.identity() == oldid:
1541 if fixup:
1542 if fixup:
1542 normal = self._repo.dirstate.normal
1543 normal = self._repo.dirstate.normal
1543 for f in fixup:
1544 for f in fixup:
1544 normal(f)
1545 normal(f)
1545 # write changes out explicitly, because nesting
1546 # write changes out explicitly, because nesting
1546 # wlock at runtime may prevent 'wlock.release()'
1547 # wlock at runtime may prevent 'wlock.release()'
1547 # after this block from doing so for subsequent
1548 # after this block from doing so for subsequent
1548 # changing files
1549 # changing files
1549 tr = self._repo.currenttransaction()
1550 tr = self._repo.currenttransaction()
1550 self._repo.dirstate.write(tr)
1551 self._repo.dirstate.write(tr)
1551
1552
1552 if poststatus:
1553 if poststatus:
1553 for ps in poststatus:
1554 for ps in poststatus:
1554 ps(self, status)
1555 ps(self, status)
1555 else:
1556 else:
1556 # in this case, writing changes out breaks
1557 # in this case, writing changes out breaks
1557 # consistency, because .hg/dirstate was
1558 # consistency, because .hg/dirstate was
1558 # already changed simultaneously after last
1559 # already changed simultaneously after last
1559 # caching (see also issue5584 for detail)
1560 # caching (see also issue5584 for detail)
1560 self._repo.ui.debug('skip updating dirstate: '
1561 self._repo.ui.debug('skip updating dirstate: '
1561 'identity mismatch\n')
1562 'identity mismatch\n')
1562 except error.LockError:
1563 except error.LockError:
1563 pass
1564 pass
1564 finally:
1565 finally:
1565 # Even if the wlock couldn't be grabbed, clear out the list.
1566 # Even if the wlock couldn't be grabbed, clear out the list.
1566 self._repo.clearpostdsstatus()
1567 self._repo.clearpostdsstatus()
1567
1568
1568 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1569 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1569 '''Gets the status from the dirstate -- internal use only.'''
1570 '''Gets the status from the dirstate -- internal use only.'''
1570 subrepos = []
1571 subrepos = []
1571 if '.hgsub' in self:
1572 if '.hgsub' in self:
1572 subrepos = sorted(self.substate)
1573 subrepos = sorted(self.substate)
1573 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1574 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1574 clean=clean, unknown=unknown)
1575 clean=clean, unknown=unknown)
1575
1576
1576 # check for any possibly clean files
1577 # check for any possibly clean files
1577 fixup = []
1578 fixup = []
1578 if cmp:
1579 if cmp:
1579 modified2, deleted2, fixup = self._checklookup(cmp)
1580 modified2, deleted2, fixup = self._checklookup(cmp)
1580 s.modified.extend(modified2)
1581 s.modified.extend(modified2)
1581 s.deleted.extend(deleted2)
1582 s.deleted.extend(deleted2)
1582
1583
1583 if fixup and clean:
1584 if fixup and clean:
1584 s.clean.extend(fixup)
1585 s.clean.extend(fixup)
1585
1586
1586 self._poststatusfixup(s, fixup)
1587 self._poststatusfixup(s, fixup)
1587
1588
1588 if match.always():
1589 if match.always():
1589 # cache for performance
1590 # cache for performance
1590 if s.unknown or s.ignored or s.clean:
1591 if s.unknown or s.ignored or s.clean:
1591 # "_status" is cached with list*=False in the normal route
1592 # "_status" is cached with list*=False in the normal route
1592 self._status = scmutil.status(s.modified, s.added, s.removed,
1593 self._status = scmutil.status(s.modified, s.added, s.removed,
1593 s.deleted, [], [], [])
1594 s.deleted, [], [], [])
1594 else:
1595 else:
1595 self._status = s
1596 self._status = s
1596
1597
1597 return s
1598 return s
1598
1599
1599 @propertycache
1600 @propertycache
1600 def _manifest(self):
1601 def _manifest(self):
1601 """generate a manifest corresponding to the values in self._status
1602 """generate a manifest corresponding to the values in self._status
1602
1603
1603 This reuse the file nodeid from parent, but we use special node
1604 This reuse the file nodeid from parent, but we use special node
1604 identifiers for added and modified files. This is used by manifests
1605 identifiers for added and modified files. This is used by manifests
1605 merge to see that files are different and by update logic to avoid
1606 merge to see that files are different and by update logic to avoid
1606 deleting newly added files.
1607 deleting newly added files.
1607 """
1608 """
1608 return self._buildstatusmanifest(self._status)
1609 return self._buildstatusmanifest(self._status)
1609
1610
1610 def _buildstatusmanifest(self, status):
1611 def _buildstatusmanifest(self, status):
1611 """Builds a manifest that includes the given status results."""
1612 """Builds a manifest that includes the given status results."""
1612 parents = self.parents()
1613 parents = self.parents()
1613
1614
1614 man = parents[0].manifest().copy()
1615 man = parents[0].manifest().copy()
1615
1616
1616 ff = self._flagfunc
1617 ff = self._flagfunc
1617 for i, l in ((addednodeid, status.added),
1618 for i, l in ((addednodeid, status.added),
1618 (modifiednodeid, status.modified)):
1619 (modifiednodeid, status.modified)):
1619 for f in l:
1620 for f in l:
1620 man[f] = i
1621 man[f] = i
1621 try:
1622 try:
1622 man.setflag(f, ff(f))
1623 man.setflag(f, ff(f))
1623 except OSError:
1624 except OSError:
1624 pass
1625 pass
1625
1626
1626 for f in status.deleted + status.removed:
1627 for f in status.deleted + status.removed:
1627 if f in man:
1628 if f in man:
1628 del man[f]
1629 del man[f]
1629
1630
1630 return man
1631 return man
1631
1632
1632 def _buildstatus(self, other, s, match, listignored, listclean,
1633 def _buildstatus(self, other, s, match, listignored, listclean,
1633 listunknown):
1634 listunknown):
1634 """build a status with respect to another context
1635 """build a status with respect to another context
1635
1636
1636 This includes logic for maintaining the fast path of status when
1637 This includes logic for maintaining the fast path of status when
1637 comparing the working directory against its parent, which is to skip
1638 comparing the working directory against its parent, which is to skip
1638 building a new manifest if self (working directory) is not comparing
1639 building a new manifest if self (working directory) is not comparing
1639 against its parent (repo['.']).
1640 against its parent (repo['.']).
1640 """
1641 """
1641 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1642 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1642 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1643 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1643 # might have accidentally ended up with the entire contents of the file
1644 # might have accidentally ended up with the entire contents of the file
1644 # they are supposed to be linking to.
1645 # they are supposed to be linking to.
1645 s.modified[:] = self._filtersuspectsymlink(s.modified)
1646 s.modified[:] = self._filtersuspectsymlink(s.modified)
1646 if other != self._repo['.']:
1647 if other != self._repo['.']:
1647 s = super(workingctx, self)._buildstatus(other, s, match,
1648 s = super(workingctx, self)._buildstatus(other, s, match,
1648 listignored, listclean,
1649 listignored, listclean,
1649 listunknown)
1650 listunknown)
1650 return s
1651 return s
1651
1652
1652 def _matchstatus(self, other, match):
1653 def _matchstatus(self, other, match):
1653 """override the match method with a filter for directory patterns
1654 """override the match method with a filter for directory patterns
1654
1655
1655 We use inheritance to customize the match.bad method only in cases of
1656 We use inheritance to customize the match.bad method only in cases of
1656 workingctx since it belongs only to the working directory when
1657 workingctx since it belongs only to the working directory when
1657 comparing against the parent changeset.
1658 comparing against the parent changeset.
1658
1659
1659 If we aren't comparing against the working directory's parent, then we
1660 If we aren't comparing against the working directory's parent, then we
1660 just use the default match object sent to us.
1661 just use the default match object sent to us.
1661 """
1662 """
1662 if other != self._repo['.']:
1663 if other != self._repo['.']:
1663 def bad(f, msg):
1664 def bad(f, msg):
1664 # 'f' may be a directory pattern from 'match.files()',
1665 # 'f' may be a directory pattern from 'match.files()',
1665 # so 'f not in ctx1' is not enough
1666 # so 'f not in ctx1' is not enough
1666 if f not in other and not other.hasdir(f):
1667 if f not in other and not other.hasdir(f):
1667 self._repo.ui.warn('%s: %s\n' %
1668 self._repo.ui.warn('%s: %s\n' %
1668 (self._repo.dirstate.pathto(f), msg))
1669 (self._repo.dirstate.pathto(f), msg))
1669 match.bad = bad
1670 match.bad = bad
1670 return match
1671 return match
1671
1672
1672 def markcommitted(self, node):
1673 def markcommitted(self, node):
1673 super(workingctx, self).markcommitted(node)
1674 super(workingctx, self).markcommitted(node)
1674
1675
1675 sparse.aftercommit(self._repo, node)
1676 sparse.aftercommit(self._repo, node)
1676
1677
1677 class committablefilectx(basefilectx):
1678 class committablefilectx(basefilectx):
1678 """A committablefilectx provides common functionality for a file context
1679 """A committablefilectx provides common functionality for a file context
1679 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1680 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1680 def __init__(self, repo, path, filelog=None, ctx=None):
1681 def __init__(self, repo, path, filelog=None, ctx=None):
1681 self._repo = repo
1682 self._repo = repo
1682 self._path = path
1683 self._path = path
1683 self._changeid = None
1684 self._changeid = None
1684 self._filerev = self._filenode = None
1685 self._filerev = self._filenode = None
1685
1686
1686 if filelog is not None:
1687 if filelog is not None:
1687 self._filelog = filelog
1688 self._filelog = filelog
1688 if ctx:
1689 if ctx:
1689 self._changectx = ctx
1690 self._changectx = ctx
1690
1691
1691 def __nonzero__(self):
1692 def __nonzero__(self):
1692 return True
1693 return True
1693
1694
1694 __bool__ = __nonzero__
1695 __bool__ = __nonzero__
1695
1696
1696 def linkrev(self):
1697 def linkrev(self):
1697 # linked to self._changectx no matter if file is modified or not
1698 # linked to self._changectx no matter if file is modified or not
1698 return self.rev()
1699 return self.rev()
1699
1700
1700 def parents(self):
1701 def parents(self):
1701 '''return parent filectxs, following copies if necessary'''
1702 '''return parent filectxs, following copies if necessary'''
1702 def filenode(ctx, path):
1703 def filenode(ctx, path):
1703 return ctx._manifest.get(path, nullid)
1704 return ctx._manifest.get(path, nullid)
1704
1705
1705 path = self._path
1706 path = self._path
1706 fl = self._filelog
1707 fl = self._filelog
1707 pcl = self._changectx._parents
1708 pcl = self._changectx._parents
1708 renamed = self.renamed()
1709 renamed = self.renamed()
1709
1710
1710 if renamed:
1711 if renamed:
1711 pl = [renamed + (None,)]
1712 pl = [renamed + (None,)]
1712 else:
1713 else:
1713 pl = [(path, filenode(pcl[0], path), fl)]
1714 pl = [(path, filenode(pcl[0], path), fl)]
1714
1715
1715 for pc in pcl[1:]:
1716 for pc in pcl[1:]:
1716 pl.append((path, filenode(pc, path), fl))
1717 pl.append((path, filenode(pc, path), fl))
1717
1718
1718 return [self._parentfilectx(p, fileid=n, filelog=l)
1719 return [self._parentfilectx(p, fileid=n, filelog=l)
1719 for p, n, l in pl if n != nullid]
1720 for p, n, l in pl if n != nullid]
1720
1721
1721 def children(self):
1722 def children(self):
1722 return []
1723 return []
1723
1724
1724 class workingfilectx(committablefilectx):
1725 class workingfilectx(committablefilectx):
1725 """A workingfilectx object makes access to data related to a particular
1726 """A workingfilectx object makes access to data related to a particular
1726 file in the working directory convenient."""
1727 file in the working directory convenient."""
1727 def __init__(self, repo, path, filelog=None, workingctx=None):
1728 def __init__(self, repo, path, filelog=None, workingctx=None):
1728 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1729 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1729
1730
1730 @propertycache
1731 @propertycache
1731 def _changectx(self):
1732 def _changectx(self):
1732 return workingctx(self._repo)
1733 return workingctx(self._repo)
1733
1734
1734 def data(self):
1735 def data(self):
1735 return self._repo.wread(self._path)
1736 return self._repo.wread(self._path)
1736 def renamed(self):
1737 def renamed(self):
1737 rp = self._repo.dirstate.copied(self._path)
1738 rp = self._repo.dirstate.copied(self._path)
1738 if not rp:
1739 if not rp:
1739 return None
1740 return None
1740 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1741 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1741
1742
1742 def size(self):
1743 def size(self):
1743 return self._repo.wvfs.lstat(self._path).st_size
1744 return self._repo.wvfs.lstat(self._path).st_size
1744 def date(self):
1745 def date(self):
1745 t, tz = self._changectx.date()
1746 t, tz = self._changectx.date()
1746 try:
1747 try:
1747 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1748 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1748 except OSError as err:
1749 except OSError as err:
1749 if err.errno != errno.ENOENT:
1750 if err.errno != errno.ENOENT:
1750 raise
1751 raise
1751 return (t, tz)
1752 return (t, tz)
1752
1753
1753 def exists(self):
1754 def exists(self):
1754 return self._repo.wvfs.exists(self._path)
1755 return self._repo.wvfs.exists(self._path)
1755
1756
1756 def lexists(self):
1757 def lexists(self):
1757 return self._repo.wvfs.lexists(self._path)
1758 return self._repo.wvfs.lexists(self._path)
1758
1759
1759 def audit(self):
1760 def audit(self):
1760 return self._repo.wvfs.audit(self._path)
1761 return self._repo.wvfs.audit(self._path)
1761
1762
1762 def cmp(self, fctx):
1763 def cmp(self, fctx):
1763 """compare with other file context
1764 """compare with other file context
1764
1765
1765 returns True if different than fctx.
1766 returns True if different than fctx.
1766 """
1767 """
1767 # fctx should be a filectx (not a workingfilectx)
1768 # fctx should be a filectx (not a workingfilectx)
1768 # invert comparison to reuse the same code path
1769 # invert comparison to reuse the same code path
1769 return fctx.cmp(self)
1770 return fctx.cmp(self)
1770
1771
1771 def remove(self, ignoremissing=False):
1772 def remove(self, ignoremissing=False):
1772 """wraps unlink for a repo's working directory"""
1773 """wraps unlink for a repo's working directory"""
1773 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774
1775
1775 def write(self, data, flags, backgroundclose=False, **kwargs):
1776 def write(self, data, flags, backgroundclose=False, **kwargs):
1776 """wraps repo.wwrite"""
1777 """wraps repo.wwrite"""
1777 self._repo.wwrite(self._path, data, flags,
1778 self._repo.wwrite(self._path, data, flags,
1778 backgroundclose=backgroundclose,
1779 backgroundclose=backgroundclose,
1779 **kwargs)
1780 **kwargs)
1780
1781
1781 def markcopied(self, src):
1782 def markcopied(self, src):
1782 """marks this file a copy of `src`"""
1783 """marks this file a copy of `src`"""
1783 if self._repo.dirstate[self._path] in "nma":
1784 if self._repo.dirstate[self._path] in "nma":
1784 self._repo.dirstate.copy(src, self._path)
1785 self._repo.dirstate.copy(src, self._path)
1785
1786
1786 def clearunknown(self):
1787 def clearunknown(self):
1787 """Removes conflicting items in the working directory so that
1788 """Removes conflicting items in the working directory so that
1788 ``write()`` can be called successfully.
1789 ``write()`` can be called successfully.
1789 """
1790 """
1790 wvfs = self._repo.wvfs
1791 wvfs = self._repo.wvfs
1791 f = self._path
1792 f = self._path
1792 wvfs.audit(f)
1793 wvfs.audit(f)
1793 if wvfs.isdir(f) and not wvfs.islink(f):
1794 if wvfs.isdir(f) and not wvfs.islink(f):
1794 wvfs.rmtree(f, forcibly=True)
1795 wvfs.rmtree(f, forcibly=True)
1795 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1796 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1796 for p in reversed(list(util.finddirs(f))):
1797 for p in reversed(list(util.finddirs(f))):
1797 if wvfs.isfileorlink(p):
1798 if wvfs.isfileorlink(p):
1798 wvfs.unlink(p)
1799 wvfs.unlink(p)
1799 break
1800 break
1800
1801
1801 def setflags(self, l, x):
1802 def setflags(self, l, x):
1802 self._repo.wvfs.setflags(self._path, l, x)
1803 self._repo.wvfs.setflags(self._path, l, x)
1803
1804
1804 class overlayworkingctx(committablectx):
1805 class overlayworkingctx(committablectx):
1805 """Wraps another mutable context with a write-back cache that can be
1806 """Wraps another mutable context with a write-back cache that can be
1806 converted into a commit context.
1807 converted into a commit context.
1807
1808
1808 self._cache[path] maps to a dict with keys: {
1809 self._cache[path] maps to a dict with keys: {
1809 'exists': bool?
1810 'exists': bool?
1810 'date': date?
1811 'date': date?
1811 'data': str?
1812 'data': str?
1812 'flags': str?
1813 'flags': str?
1813 'copied': str? (path or None)
1814 'copied': str? (path or None)
1814 }
1815 }
1815 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1816 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1816 is `False`, the file was deleted.
1817 is `False`, the file was deleted.
1817 """
1818 """
1818
1819
1819 def __init__(self, repo):
1820 def __init__(self, repo):
1820 super(overlayworkingctx, self).__init__(repo)
1821 super(overlayworkingctx, self).__init__(repo)
1821 self.clean()
1822 self.clean()
1822
1823
1823 def setbase(self, wrappedctx):
1824 def setbase(self, wrappedctx):
1824 self._wrappedctx = wrappedctx
1825 self._wrappedctx = wrappedctx
1825 self._parents = [wrappedctx]
1826 self._parents = [wrappedctx]
1826 # Drop old manifest cache as it is now out of date.
1827 # Drop old manifest cache as it is now out of date.
1827 # This is necessary when, e.g., rebasing several nodes with one
1828 # This is necessary when, e.g., rebasing several nodes with one
1828 # ``overlayworkingctx`` (e.g. with --collapse).
1829 # ``overlayworkingctx`` (e.g. with --collapse).
1829 util.clearcachedproperty(self, '_manifest')
1830 util.clearcachedproperty(self, '_manifest')
1830
1831
1831 def data(self, path):
1832 def data(self, path):
1832 if self.isdirty(path):
1833 if self.isdirty(path):
1833 if self._cache[path]['exists']:
1834 if self._cache[path]['exists']:
1834 if self._cache[path]['data']:
1835 if self._cache[path]['data']:
1835 return self._cache[path]['data']
1836 return self._cache[path]['data']
1836 else:
1837 else:
1837 # Must fallback here, too, because we only set flags.
1838 # Must fallback here, too, because we only set flags.
1838 return self._wrappedctx[path].data()
1839 return self._wrappedctx[path].data()
1839 else:
1840 else:
1840 raise error.ProgrammingError("No such file or directory: %s" %
1841 raise error.ProgrammingError("No such file or directory: %s" %
1841 path)
1842 path)
1842 else:
1843 else:
1843 return self._wrappedctx[path].data()
1844 return self._wrappedctx[path].data()
1844
1845
1845 @propertycache
1846 @propertycache
1846 def _manifest(self):
1847 def _manifest(self):
1847 parents = self.parents()
1848 parents = self.parents()
1848 man = parents[0].manifest().copy()
1849 man = parents[0].manifest().copy()
1849
1850
1850 flag = self._flagfunc
1851 flag = self._flagfunc
1851 for path in self.added():
1852 for path in self.added():
1852 man[path] = addednodeid
1853 man[path] = addednodeid
1853 man.setflag(path, flag(path))
1854 man.setflag(path, flag(path))
1854 for path in self.modified():
1855 for path in self.modified():
1855 man[path] = modifiednodeid
1856 man[path] = modifiednodeid
1856 man.setflag(path, flag(path))
1857 man.setflag(path, flag(path))
1857 for path in self.removed():
1858 for path in self.removed():
1858 del man[path]
1859 del man[path]
1859 return man
1860 return man
1860
1861
1861 @propertycache
1862 @propertycache
1862 def _flagfunc(self):
1863 def _flagfunc(self):
1863 def f(path):
1864 def f(path):
1864 return self._cache[path]['flags']
1865 return self._cache[path]['flags']
1865 return f
1866 return f
1866
1867
1867 def files(self):
1868 def files(self):
1868 return sorted(self.added() + self.modified() + self.removed())
1869 return sorted(self.added() + self.modified() + self.removed())
1869
1870
1870 def modified(self):
1871 def modified(self):
1871 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1872 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1872 self._existsinparent(f)]
1873 self._existsinparent(f)]
1873
1874
1874 def added(self):
1875 def added(self):
1875 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1876 not self._existsinparent(f)]
1877 not self._existsinparent(f)]
1877
1878
1878 def removed(self):
1879 def removed(self):
1879 return [f for f in self._cache.keys() if
1880 return [f for f in self._cache.keys() if
1880 not self._cache[f]['exists'] and self._existsinparent(f)]
1881 not self._cache[f]['exists'] and self._existsinparent(f)]
1881
1882
1882 def isinmemory(self):
1883 def isinmemory(self):
1883 return True
1884 return True
1884
1885
1885 def filedate(self, path):
1886 def filedate(self, path):
1886 if self.isdirty(path):
1887 if self.isdirty(path):
1887 return self._cache[path]['date']
1888 return self._cache[path]['date']
1888 else:
1889 else:
1889 return self._wrappedctx[path].date()
1890 return self._wrappedctx[path].date()
1890
1891
1891 def markcopied(self, path, origin):
1892 def markcopied(self, path, origin):
1892 if self.isdirty(path):
1893 if self.isdirty(path):
1893 self._cache[path]['copied'] = origin
1894 self._cache[path]['copied'] = origin
1894 else:
1895 else:
1895 raise error.ProgrammingError('markcopied() called on clean context')
1896 raise error.ProgrammingError('markcopied() called on clean context')
1896
1897
1897 def copydata(self, path):
1898 def copydata(self, path):
1898 if self.isdirty(path):
1899 if self.isdirty(path):
1899 return self._cache[path]['copied']
1900 return self._cache[path]['copied']
1900 else:
1901 else:
1901 raise error.ProgrammingError('copydata() called on clean context')
1902 raise error.ProgrammingError('copydata() called on clean context')
1902
1903
1903 def flags(self, path):
1904 def flags(self, path):
1904 if self.isdirty(path):
1905 if self.isdirty(path):
1905 if self._cache[path]['exists']:
1906 if self._cache[path]['exists']:
1906 return self._cache[path]['flags']
1907 return self._cache[path]['flags']
1907 else:
1908 else:
1908 raise error.ProgrammingError("No such file or directory: %s" %
1909 raise error.ProgrammingError("No such file or directory: %s" %
1909 self._path)
1910 self._path)
1910 else:
1911 else:
1911 return self._wrappedctx[path].flags()
1912 return self._wrappedctx[path].flags()
1912
1913
1913 def _existsinparent(self, path):
1914 def _existsinparent(self, path):
1914 try:
1915 try:
1915 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1916 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1916 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1917 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1917 # with an ``exists()`` function.
1918 # with an ``exists()`` function.
1918 self._wrappedctx[path]
1919 self._wrappedctx[path]
1919 return True
1920 return True
1920 except error.ManifestLookupError:
1921 except error.ManifestLookupError:
1921 return False
1922 return False
1922
1923
1923 def _auditconflicts(self, path):
1924 def _auditconflicts(self, path):
1924 """Replicates conflict checks done by wvfs.write().
1925 """Replicates conflict checks done by wvfs.write().
1925
1926
1926 Since we never write to the filesystem and never call `applyupdates` in
1927 Since we never write to the filesystem and never call `applyupdates` in
1927 IMM, we'll never check that a path is actually writable -- e.g., because
1928 IMM, we'll never check that a path is actually writable -- e.g., because
1928 it adds `a/foo`, but `a` is actually a file in the other commit.
1929 it adds `a/foo`, but `a` is actually a file in the other commit.
1929 """
1930 """
1930 def fail(path, component):
1931 def fail(path, component):
1931 # p1() is the base and we're receiving "writes" for p2()'s
1932 # p1() is the base and we're receiving "writes" for p2()'s
1932 # files.
1933 # files.
1933 if 'l' in self.p1()[component].flags():
1934 if 'l' in self.p1()[component].flags():
1934 raise error.Abort("error: %s conflicts with symlink %s "
1935 raise error.Abort("error: %s conflicts with symlink %s "
1935 "in %s." % (path, component,
1936 "in %s." % (path, component,
1936 self.p1().rev()))
1937 self.p1().rev()))
1937 else:
1938 else:
1938 raise error.Abort("error: '%s' conflicts with file '%s' in "
1939 raise error.Abort("error: '%s' conflicts with file '%s' in "
1939 "%s." % (path, component,
1940 "%s." % (path, component,
1940 self.p1().rev()))
1941 self.p1().rev()))
1941
1942
1942 # Test that each new directory to be created to write this path from p2
1943 # Test that each new directory to be created to write this path from p2
1943 # is not a file in p1.
1944 # is not a file in p1.
1944 components = path.split('/')
1945 components = path.split('/')
1945 for i in xrange(len(components)):
1946 for i in xrange(len(components)):
1946 component = "/".join(components[0:i])
1947 component = "/".join(components[0:i])
1947 if component in self.p1():
1948 if component in self.p1():
1948 fail(path, component)
1949 fail(path, component)
1949
1950
1950 # Test the other direction -- that this path from p2 isn't a directory
1951 # Test the other direction -- that this path from p2 isn't a directory
1951 # in p1 (test that p1 doesn't any paths matching `path/*`).
1952 # in p1 (test that p1 doesn't any paths matching `path/*`).
1952 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1953 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1953 matches = self.p1().manifest().matches(match)
1954 matches = self.p1().manifest().matches(match)
1954 if len(matches) > 0:
1955 if len(matches) > 0:
1955 if len(matches) == 1 and matches.keys()[0] == path:
1956 if len(matches) == 1 and matches.keys()[0] == path:
1956 return
1957 return
1957 raise error.Abort("error: file '%s' cannot be written because "
1958 raise error.Abort("error: file '%s' cannot be written because "
1958 " '%s/' is a folder in %s (containing %d "
1959 " '%s/' is a folder in %s (containing %d "
1959 "entries: %s)"
1960 "entries: %s)"
1960 % (path, path, self.p1(), len(matches),
1961 % (path, path, self.p1(), len(matches),
1961 ', '.join(matches.keys())))
1962 ', '.join(matches.keys())))
1962
1963
1963 def write(self, path, data, flags='', **kwargs):
1964 def write(self, path, data, flags='', **kwargs):
1964 if data is None:
1965 if data is None:
1965 raise error.ProgrammingError("data must be non-None")
1966 raise error.ProgrammingError("data must be non-None")
1966 self._auditconflicts(path)
1967 self._auditconflicts(path)
1967 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1968 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1968 flags=flags)
1969 flags=flags)
1969
1970
1970 def setflags(self, path, l, x):
1971 def setflags(self, path, l, x):
1971 self._markdirty(path, exists=True, date=dateutil.makedate(),
1972 self._markdirty(path, exists=True, date=dateutil.makedate(),
1972 flags=(l and 'l' or '') + (x and 'x' or ''))
1973 flags=(l and 'l' or '') + (x and 'x' or ''))
1973
1974
1974 def remove(self, path):
1975 def remove(self, path):
1975 self._markdirty(path, exists=False)
1976 self._markdirty(path, exists=False)
1976
1977
1977 def exists(self, path):
1978 def exists(self, path):
1978 """exists behaves like `lexists`, but needs to follow symlinks and
1979 """exists behaves like `lexists`, but needs to follow symlinks and
1979 return False if they are broken.
1980 return False if they are broken.
1980 """
1981 """
1981 if self.isdirty(path):
1982 if self.isdirty(path):
1982 # If this path exists and is a symlink, "follow" it by calling
1983 # If this path exists and is a symlink, "follow" it by calling
1983 # exists on the destination path.
1984 # exists on the destination path.
1984 if (self._cache[path]['exists'] and
1985 if (self._cache[path]['exists'] and
1985 'l' in self._cache[path]['flags']):
1986 'l' in self._cache[path]['flags']):
1986 return self.exists(self._cache[path]['data'].strip())
1987 return self.exists(self._cache[path]['data'].strip())
1987 else:
1988 else:
1988 return self._cache[path]['exists']
1989 return self._cache[path]['exists']
1989
1990
1990 return self._existsinparent(path)
1991 return self._existsinparent(path)
1991
1992
1992 def lexists(self, path):
1993 def lexists(self, path):
1993 """lexists returns True if the path exists"""
1994 """lexists returns True if the path exists"""
1994 if self.isdirty(path):
1995 if self.isdirty(path):
1995 return self._cache[path]['exists']
1996 return self._cache[path]['exists']
1996
1997
1997 return self._existsinparent(path)
1998 return self._existsinparent(path)
1998
1999
1999 def size(self, path):
2000 def size(self, path):
2000 if self.isdirty(path):
2001 if self.isdirty(path):
2001 if self._cache[path]['exists']:
2002 if self._cache[path]['exists']:
2002 return len(self._cache[path]['data'])
2003 return len(self._cache[path]['data'])
2003 else:
2004 else:
2004 raise error.ProgrammingError("No such file or directory: %s" %
2005 raise error.ProgrammingError("No such file or directory: %s" %
2005 self._path)
2006 self._path)
2006 return self._wrappedctx[path].size()
2007 return self._wrappedctx[path].size()
2007
2008
2008 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2009 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2009 user=None, editor=None):
2010 user=None, editor=None):
2010 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2011 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2011 committed.
2012 committed.
2012
2013
2013 ``text`` is the commit message.
2014 ``text`` is the commit message.
2014 ``parents`` (optional) are rev numbers.
2015 ``parents`` (optional) are rev numbers.
2015 """
2016 """
2016 # Default parents to the wrapped contexts' if not passed.
2017 # Default parents to the wrapped contexts' if not passed.
2017 if parents is None:
2018 if parents is None:
2018 parents = self._wrappedctx.parents()
2019 parents = self._wrappedctx.parents()
2019 if len(parents) == 1:
2020 if len(parents) == 1:
2020 parents = (parents[0], None)
2021 parents = (parents[0], None)
2021
2022
2022 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2023 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2023 if parents[1] is None:
2024 if parents[1] is None:
2024 parents = (self._repo[parents[0]], None)
2025 parents = (self._repo[parents[0]], None)
2025 else:
2026 else:
2026 parents = (self._repo[parents[0]], self._repo[parents[1]])
2027 parents = (self._repo[parents[0]], self._repo[parents[1]])
2027
2028
2028 files = self._cache.keys()
2029 files = self._cache.keys()
2029 def getfile(repo, memctx, path):
2030 def getfile(repo, memctx, path):
2030 if self._cache[path]['exists']:
2031 if self._cache[path]['exists']:
2031 return memfilectx(repo, memctx, path,
2032 return memfilectx(repo, memctx, path,
2032 self._cache[path]['data'],
2033 self._cache[path]['data'],
2033 'l' in self._cache[path]['flags'],
2034 'l' in self._cache[path]['flags'],
2034 'x' in self._cache[path]['flags'],
2035 'x' in self._cache[path]['flags'],
2035 self._cache[path]['copied'])
2036 self._cache[path]['copied'])
2036 else:
2037 else:
2037 # Returning None, but including the path in `files`, is
2038 # Returning None, but including the path in `files`, is
2038 # necessary for memctx to register a deletion.
2039 # necessary for memctx to register a deletion.
2039 return None
2040 return None
2040 return memctx(self._repo, parents, text, files, getfile, date=date,
2041 return memctx(self._repo, parents, text, files, getfile, date=date,
2041 extra=extra, user=user, branch=branch, editor=editor)
2042 extra=extra, user=user, branch=branch, editor=editor)
2042
2043
2043 def isdirty(self, path):
2044 def isdirty(self, path):
2044 return path in self._cache
2045 return path in self._cache
2045
2046
2046 def isempty(self):
2047 def isempty(self):
2047 # We need to discard any keys that are actually clean before the empty
2048 # We need to discard any keys that are actually clean before the empty
2048 # commit check.
2049 # commit check.
2049 self._compact()
2050 self._compact()
2050 return len(self._cache) == 0
2051 return len(self._cache) == 0
2051
2052
2052 def clean(self):
2053 def clean(self):
2053 self._cache = {}
2054 self._cache = {}
2054
2055
2055 def _compact(self):
2056 def _compact(self):
2056 """Removes keys from the cache that are actually clean, by comparing
2057 """Removes keys from the cache that are actually clean, by comparing
2057 them with the underlying context.
2058 them with the underlying context.
2058
2059
2059 This can occur during the merge process, e.g. by passing --tool :local
2060 This can occur during the merge process, e.g. by passing --tool :local
2060 to resolve a conflict.
2061 to resolve a conflict.
2061 """
2062 """
2062 keys = []
2063 keys = []
2063 for path in self._cache.keys():
2064 for path in self._cache.keys():
2064 cache = self._cache[path]
2065 cache = self._cache[path]
2065 try:
2066 try:
2066 underlying = self._wrappedctx[path]
2067 underlying = self._wrappedctx[path]
2067 if (underlying.data() == cache['data'] and
2068 if (underlying.data() == cache['data'] and
2068 underlying.flags() == cache['flags']):
2069 underlying.flags() == cache['flags']):
2069 keys.append(path)
2070 keys.append(path)
2070 except error.ManifestLookupError:
2071 except error.ManifestLookupError:
2071 # Path not in the underlying manifest (created).
2072 # Path not in the underlying manifest (created).
2072 continue
2073 continue
2073
2074
2074 for path in keys:
2075 for path in keys:
2075 del self._cache[path]
2076 del self._cache[path]
2076 return keys
2077 return keys
2077
2078
2078 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2079 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2079 self._cache[path] = {
2080 self._cache[path] = {
2080 'exists': exists,
2081 'exists': exists,
2081 'data': data,
2082 'data': data,
2082 'date': date,
2083 'date': date,
2083 'flags': flags,
2084 'flags': flags,
2084 'copied': None,
2085 'copied': None,
2085 }
2086 }
2086
2087
2087 def filectx(self, path, filelog=None):
2088 def filectx(self, path, filelog=None):
2088 return overlayworkingfilectx(self._repo, path, parent=self,
2089 return overlayworkingfilectx(self._repo, path, parent=self,
2089 filelog=filelog)
2090 filelog=filelog)
2090
2091
2091 class overlayworkingfilectx(committablefilectx):
2092 class overlayworkingfilectx(committablefilectx):
2092 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2093 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2093 cache, which can be flushed through later by calling ``flush()``."""
2094 cache, which can be flushed through later by calling ``flush()``."""
2094
2095
2095 def __init__(self, repo, path, filelog=None, parent=None):
2096 def __init__(self, repo, path, filelog=None, parent=None):
2096 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2097 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2097 parent)
2098 parent)
2098 self._repo = repo
2099 self._repo = repo
2099 self._parent = parent
2100 self._parent = parent
2100 self._path = path
2101 self._path = path
2101
2102
2102 def cmp(self, fctx):
2103 def cmp(self, fctx):
2103 return self.data() != fctx.data()
2104 return self.data() != fctx.data()
2104
2105
2105 def changectx(self):
2106 def changectx(self):
2106 return self._parent
2107 return self._parent
2107
2108
2108 def data(self):
2109 def data(self):
2109 return self._parent.data(self._path)
2110 return self._parent.data(self._path)
2110
2111
2111 def date(self):
2112 def date(self):
2112 return self._parent.filedate(self._path)
2113 return self._parent.filedate(self._path)
2113
2114
2114 def exists(self):
2115 def exists(self):
2115 return self.lexists()
2116 return self.lexists()
2116
2117
2117 def lexists(self):
2118 def lexists(self):
2118 return self._parent.exists(self._path)
2119 return self._parent.exists(self._path)
2119
2120
2120 def renamed(self):
2121 def renamed(self):
2121 path = self._parent.copydata(self._path)
2122 path = self._parent.copydata(self._path)
2122 if not path:
2123 if not path:
2123 return None
2124 return None
2124 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2125 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2125
2126
2126 def size(self):
2127 def size(self):
2127 return self._parent.size(self._path)
2128 return self._parent.size(self._path)
2128
2129
2129 def markcopied(self, origin):
2130 def markcopied(self, origin):
2130 self._parent.markcopied(self._path, origin)
2131 self._parent.markcopied(self._path, origin)
2131
2132
2132 def audit(self):
2133 def audit(self):
2133 pass
2134 pass
2134
2135
2135 def flags(self):
2136 def flags(self):
2136 return self._parent.flags(self._path)
2137 return self._parent.flags(self._path)
2137
2138
2138 def setflags(self, islink, isexec):
2139 def setflags(self, islink, isexec):
2139 return self._parent.setflags(self._path, islink, isexec)
2140 return self._parent.setflags(self._path, islink, isexec)
2140
2141
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 return self._parent.write(self._path, data, flags, **kwargs)
2143 return self._parent.write(self._path, data, flags, **kwargs)
2143
2144
2144 def remove(self, ignoremissing=False):
2145 def remove(self, ignoremissing=False):
2145 return self._parent.remove(self._path)
2146 return self._parent.remove(self._path)
2146
2147
2147 def clearunknown(self):
2148 def clearunknown(self):
2148 pass
2149 pass
2149
2150
2150 class workingcommitctx(workingctx):
2151 class workingcommitctx(workingctx):
2151 """A workingcommitctx object makes access to data related to
2152 """A workingcommitctx object makes access to data related to
2152 the revision being committed convenient.
2153 the revision being committed convenient.
2153
2154
2154 This hides changes in the working directory, if they aren't
2155 This hides changes in the working directory, if they aren't
2155 committed in this context.
2156 committed in this context.
2156 """
2157 """
2157 def __init__(self, repo, changes,
2158 def __init__(self, repo, changes,
2158 text="", user=None, date=None, extra=None):
2159 text="", user=None, date=None, extra=None):
2159 super(workingctx, self).__init__(repo, text, user, date, extra,
2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2160 changes)
2161 changes)
2161
2162
2162 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2163 """Return matched files only in ``self._status``
2164 """Return matched files only in ``self._status``
2164
2165
2165 Uncommitted files appear "clean" via this context, even if
2166 Uncommitted files appear "clean" via this context, even if
2166 they aren't actually so in the working directory.
2167 they aren't actually so in the working directory.
2167 """
2168 """
2168 if clean:
2169 if clean:
2169 clean = [f for f in self._manifest if f not in self._changedset]
2170 clean = [f for f in self._manifest if f not in self._changedset]
2170 else:
2171 else:
2171 clean = []
2172 clean = []
2172 return scmutil.status([f for f in self._status.modified if match(f)],
2173 return scmutil.status([f for f in self._status.modified if match(f)],
2173 [f for f in self._status.added if match(f)],
2174 [f for f in self._status.added if match(f)],
2174 [f for f in self._status.removed if match(f)],
2175 [f for f in self._status.removed if match(f)],
2175 [], [], [], clean)
2176 [], [], [], clean)
2176
2177
2177 @propertycache
2178 @propertycache
2178 def _changedset(self):
2179 def _changedset(self):
2179 """Return the set of files changed in this context
2180 """Return the set of files changed in this context
2180 """
2181 """
2181 changed = set(self._status.modified)
2182 changed = set(self._status.modified)
2182 changed.update(self._status.added)
2183 changed.update(self._status.added)
2183 changed.update(self._status.removed)
2184 changed.update(self._status.removed)
2184 return changed
2185 return changed
2185
2186
2186 def makecachingfilectxfn(func):
2187 def makecachingfilectxfn(func):
2187 """Create a filectxfn that caches based on the path.
2188 """Create a filectxfn that caches based on the path.
2188
2189
2189 We can't use util.cachefunc because it uses all arguments as the cache
2190 We can't use util.cachefunc because it uses all arguments as the cache
2190 key and this creates a cycle since the arguments include the repo and
2191 key and this creates a cycle since the arguments include the repo and
2191 memctx.
2192 memctx.
2192 """
2193 """
2193 cache = {}
2194 cache = {}
2194
2195
2195 def getfilectx(repo, memctx, path):
2196 def getfilectx(repo, memctx, path):
2196 if path not in cache:
2197 if path not in cache:
2197 cache[path] = func(repo, memctx, path)
2198 cache[path] = func(repo, memctx, path)
2198 return cache[path]
2199 return cache[path]
2199
2200
2200 return getfilectx
2201 return getfilectx
2201
2202
2202 def memfilefromctx(ctx):
2203 def memfilefromctx(ctx):
2203 """Given a context return a memfilectx for ctx[path]
2204 """Given a context return a memfilectx for ctx[path]
2204
2205
2205 This is a convenience method for building a memctx based on another
2206 This is a convenience method for building a memctx based on another
2206 context.
2207 context.
2207 """
2208 """
2208 def getfilectx(repo, memctx, path):
2209 def getfilectx(repo, memctx, path):
2209 fctx = ctx[path]
2210 fctx = ctx[path]
2210 # this is weird but apparently we only keep track of one parent
2211 # this is weird but apparently we only keep track of one parent
2211 # (why not only store that instead of a tuple?)
2212 # (why not only store that instead of a tuple?)
2212 copied = fctx.renamed()
2213 copied = fctx.renamed()
2213 if copied:
2214 if copied:
2214 copied = copied[0]
2215 copied = copied[0]
2215 return memfilectx(repo, memctx, path, fctx.data(),
2216 return memfilectx(repo, memctx, path, fctx.data(),
2216 islink=fctx.islink(), isexec=fctx.isexec(),
2217 islink=fctx.islink(), isexec=fctx.isexec(),
2217 copied=copied)
2218 copied=copied)
2218
2219
2219 return getfilectx
2220 return getfilectx
2220
2221
2221 def memfilefrompatch(patchstore):
2222 def memfilefrompatch(patchstore):
2222 """Given a patch (e.g. patchstore object) return a memfilectx
2223 """Given a patch (e.g. patchstore object) return a memfilectx
2223
2224
2224 This is a convenience method for building a memctx based on a patchstore.
2225 This is a convenience method for building a memctx based on a patchstore.
2225 """
2226 """
2226 def getfilectx(repo, memctx, path):
2227 def getfilectx(repo, memctx, path):
2227 data, mode, copied = patchstore.getfile(path)
2228 data, mode, copied = patchstore.getfile(path)
2228 if data is None:
2229 if data is None:
2229 return None
2230 return None
2230 islink, isexec = mode
2231 islink, isexec = mode
2231 return memfilectx(repo, memctx, path, data, islink=islink,
2232 return memfilectx(repo, memctx, path, data, islink=islink,
2232 isexec=isexec, copied=copied)
2233 isexec=isexec, copied=copied)
2233
2234
2234 return getfilectx
2235 return getfilectx
2235
2236
2236 class memctx(committablectx):
2237 class memctx(committablectx):
2237 """Use memctx to perform in-memory commits via localrepo.commitctx().
2238 """Use memctx to perform in-memory commits via localrepo.commitctx().
2238
2239
2239 Revision information is supplied at initialization time while
2240 Revision information is supplied at initialization time while
2240 related files data and is made available through a callback
2241 related files data and is made available through a callback
2241 mechanism. 'repo' is the current localrepo, 'parents' is a
2242 mechanism. 'repo' is the current localrepo, 'parents' is a
2242 sequence of two parent revisions identifiers (pass None for every
2243 sequence of two parent revisions identifiers (pass None for every
2243 missing parent), 'text' is the commit message and 'files' lists
2244 missing parent), 'text' is the commit message and 'files' lists
2244 names of files touched by the revision (normalized and relative to
2245 names of files touched by the revision (normalized and relative to
2245 repository root).
2246 repository root).
2246
2247
2247 filectxfn(repo, memctx, path) is a callable receiving the
2248 filectxfn(repo, memctx, path) is a callable receiving the
2248 repository, the current memctx object and the normalized path of
2249 repository, the current memctx object and the normalized path of
2249 requested file, relative to repository root. It is fired by the
2250 requested file, relative to repository root. It is fired by the
2250 commit function for every file in 'files', but calls order is
2251 commit function for every file in 'files', but calls order is
2251 undefined. If the file is available in the revision being
2252 undefined. If the file is available in the revision being
2252 committed (updated or added), filectxfn returns a memfilectx
2253 committed (updated or added), filectxfn returns a memfilectx
2253 object. If the file was removed, filectxfn return None for recent
2254 object. If the file was removed, filectxfn return None for recent
2254 Mercurial. Moved files are represented by marking the source file
2255 Mercurial. Moved files are represented by marking the source file
2255 removed and the new file added with copy information (see
2256 removed and the new file added with copy information (see
2256 memfilectx).
2257 memfilectx).
2257
2258
2258 user receives the committer name and defaults to current
2259 user receives the committer name and defaults to current
2259 repository username, date is the commit date in any format
2260 repository username, date is the commit date in any format
2260 supported by dateutil.parsedate() and defaults to current date, extra
2261 supported by dateutil.parsedate() and defaults to current date, extra
2261 is a dictionary of metadata or is left empty.
2262 is a dictionary of metadata or is left empty.
2262 """
2263 """
2263
2264
2264 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2265 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2265 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2266 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2266 # this field to determine what to do in filectxfn.
2267 # this field to determine what to do in filectxfn.
2267 _returnnoneformissingfiles = True
2268 _returnnoneformissingfiles = True
2268
2269
2269 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2270 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2270 date=None, extra=None, branch=None, editor=False):
2271 date=None, extra=None, branch=None, editor=False):
2271 super(memctx, self).__init__(repo, text, user, date, extra)
2272 super(memctx, self).__init__(repo, text, user, date, extra)
2272 self._rev = None
2273 self._rev = None
2273 self._node = None
2274 self._node = None
2274 parents = [(p or nullid) for p in parents]
2275 parents = [(p or nullid) for p in parents]
2275 p1, p2 = parents
2276 p1, p2 = parents
2276 self._parents = [self._repo[p] for p in (p1, p2)]
2277 self._parents = [self._repo[p] for p in (p1, p2)]
2277 files = sorted(set(files))
2278 files = sorted(set(files))
2278 self._files = files
2279 self._files = files
2279 if branch is not None:
2280 if branch is not None:
2280 self._extra['branch'] = encoding.fromlocal(branch)
2281 self._extra['branch'] = encoding.fromlocal(branch)
2281 self.substate = {}
2282 self.substate = {}
2282
2283
2283 if isinstance(filectxfn, patch.filestore):
2284 if isinstance(filectxfn, patch.filestore):
2284 filectxfn = memfilefrompatch(filectxfn)
2285 filectxfn = memfilefrompatch(filectxfn)
2285 elif not callable(filectxfn):
2286 elif not callable(filectxfn):
2286 # if store is not callable, wrap it in a function
2287 # if store is not callable, wrap it in a function
2287 filectxfn = memfilefromctx(filectxfn)
2288 filectxfn = memfilefromctx(filectxfn)
2288
2289
2289 # memoizing increases performance for e.g. vcs convert scenarios.
2290 # memoizing increases performance for e.g. vcs convert scenarios.
2290 self._filectxfn = makecachingfilectxfn(filectxfn)
2291 self._filectxfn = makecachingfilectxfn(filectxfn)
2291
2292
2292 if editor:
2293 if editor:
2293 self._text = editor(self._repo, self, [])
2294 self._text = editor(self._repo, self, [])
2294 self._repo.savecommitmessage(self._text)
2295 self._repo.savecommitmessage(self._text)
2295
2296
2296 def filectx(self, path, filelog=None):
2297 def filectx(self, path, filelog=None):
2297 """get a file context from the working directory
2298 """get a file context from the working directory
2298
2299
2299 Returns None if file doesn't exist and should be removed."""
2300 Returns None if file doesn't exist and should be removed."""
2300 return self._filectxfn(self._repo, self, path)
2301 return self._filectxfn(self._repo, self, path)
2301
2302
2302 def commit(self):
2303 def commit(self):
2303 """commit context to the repo"""
2304 """commit context to the repo"""
2304 return self._repo.commitctx(self)
2305 return self._repo.commitctx(self)
2305
2306
2306 @propertycache
2307 @propertycache
2307 def _manifest(self):
2308 def _manifest(self):
2308 """generate a manifest based on the return values of filectxfn"""
2309 """generate a manifest based on the return values of filectxfn"""
2309
2310
2310 # keep this simple for now; just worry about p1
2311 # keep this simple for now; just worry about p1
2311 pctx = self._parents[0]
2312 pctx = self._parents[0]
2312 man = pctx.manifest().copy()
2313 man = pctx.manifest().copy()
2313
2314
2314 for f in self._status.modified:
2315 for f in self._status.modified:
2315 p1node = nullid
2316 p1node = nullid
2316 p2node = nullid
2317 p2node = nullid
2317 p = pctx[f].parents() # if file isn't in pctx, check p2?
2318 p = pctx[f].parents() # if file isn't in pctx, check p2?
2318 if len(p) > 0:
2319 if len(p) > 0:
2319 p1node = p[0].filenode()
2320 p1node = p[0].filenode()
2320 if len(p) > 1:
2321 if len(p) > 1:
2321 p2node = p[1].filenode()
2322 p2node = p[1].filenode()
2322 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2323 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2323
2324
2324 for f in self._status.added:
2325 for f in self._status.added:
2325 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2326 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2326
2327
2327 for f in self._status.removed:
2328 for f in self._status.removed:
2328 if f in man:
2329 if f in man:
2329 del man[f]
2330 del man[f]
2330
2331
2331 return man
2332 return man
2332
2333
2333 @propertycache
2334 @propertycache
2334 def _status(self):
2335 def _status(self):
2335 """Calculate exact status from ``files`` specified at construction
2336 """Calculate exact status from ``files`` specified at construction
2336 """
2337 """
2337 man1 = self.p1().manifest()
2338 man1 = self.p1().manifest()
2338 p2 = self._parents[1]
2339 p2 = self._parents[1]
2339 # "1 < len(self._parents)" can't be used for checking
2340 # "1 < len(self._parents)" can't be used for checking
2340 # existence of the 2nd parent, because "memctx._parents" is
2341 # existence of the 2nd parent, because "memctx._parents" is
2341 # explicitly initialized by the list, of which length is 2.
2342 # explicitly initialized by the list, of which length is 2.
2342 if p2.node() != nullid:
2343 if p2.node() != nullid:
2343 man2 = p2.manifest()
2344 man2 = p2.manifest()
2344 managing = lambda f: f in man1 or f in man2
2345 managing = lambda f: f in man1 or f in man2
2345 else:
2346 else:
2346 managing = lambda f: f in man1
2347 managing = lambda f: f in man1
2347
2348
2348 modified, added, removed = [], [], []
2349 modified, added, removed = [], [], []
2349 for f in self._files:
2350 for f in self._files:
2350 if not managing(f):
2351 if not managing(f):
2351 added.append(f)
2352 added.append(f)
2352 elif self[f]:
2353 elif self[f]:
2353 modified.append(f)
2354 modified.append(f)
2354 else:
2355 else:
2355 removed.append(f)
2356 removed.append(f)
2356
2357
2357 return scmutil.status(modified, added, removed, [], [], [], [])
2358 return scmutil.status(modified, added, removed, [], [], [], [])
2358
2359
2359 class memfilectx(committablefilectx):
2360 class memfilectx(committablefilectx):
2360 """memfilectx represents an in-memory file to commit.
2361 """memfilectx represents an in-memory file to commit.
2361
2362
2362 See memctx and committablefilectx for more details.
2363 See memctx and committablefilectx for more details.
2363 """
2364 """
2364 def __init__(self, repo, changectx, path, data, islink=False,
2365 def __init__(self, repo, changectx, path, data, islink=False,
2365 isexec=False, copied=None):
2366 isexec=False, copied=None):
2366 """
2367 """
2367 path is the normalized file path relative to repository root.
2368 path is the normalized file path relative to repository root.
2368 data is the file content as a string.
2369 data is the file content as a string.
2369 islink is True if the file is a symbolic link.
2370 islink is True if the file is a symbolic link.
2370 isexec is True if the file is executable.
2371 isexec is True if the file is executable.
2371 copied is the source file path if current file was copied in the
2372 copied is the source file path if current file was copied in the
2372 revision being committed, or None."""
2373 revision being committed, or None."""
2373 super(memfilectx, self).__init__(repo, path, None, changectx)
2374 super(memfilectx, self).__init__(repo, path, None, changectx)
2374 self._data = data
2375 self._data = data
2375 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2376 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2376 self._copied = None
2377 self._copied = None
2377 if copied:
2378 if copied:
2378 self._copied = (copied, nullid)
2379 self._copied = (copied, nullid)
2379
2380
2380 def data(self):
2381 def data(self):
2381 return self._data
2382 return self._data
2382
2383
2383 def remove(self, ignoremissing=False):
2384 def remove(self, ignoremissing=False):
2384 """wraps unlink for a repo's working directory"""
2385 """wraps unlink for a repo's working directory"""
2385 # need to figure out what to do here
2386 # need to figure out what to do here
2386 del self._changectx[self._path]
2387 del self._changectx[self._path]
2387
2388
2388 def write(self, data, flags, **kwargs):
2389 def write(self, data, flags, **kwargs):
2389 """wraps repo.wwrite"""
2390 """wraps repo.wwrite"""
2390 self._data = data
2391 self._data = data
2391
2392
2392 class overlayfilectx(committablefilectx):
2393 class overlayfilectx(committablefilectx):
2393 """Like memfilectx but take an original filectx and optional parameters to
2394 """Like memfilectx but take an original filectx and optional parameters to
2394 override parts of it. This is useful when fctx.data() is expensive (i.e.
2395 override parts of it. This is useful when fctx.data() is expensive (i.e.
2395 flag processor is expensive) and raw data, flags, and filenode could be
2396 flag processor is expensive) and raw data, flags, and filenode could be
2396 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2397 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2397 """
2398 """
2398
2399
2399 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2400 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2400 copied=None, ctx=None):
2401 copied=None, ctx=None):
2401 """originalfctx: filecontext to duplicate
2402 """originalfctx: filecontext to duplicate
2402
2403
2403 datafunc: None or a function to override data (file content). It is a
2404 datafunc: None or a function to override data (file content). It is a
2404 function to be lazy. path, flags, copied, ctx: None or overridden value
2405 function to be lazy. path, flags, copied, ctx: None or overridden value
2405
2406
2406 copied could be (path, rev), or False. copied could also be just path,
2407 copied could be (path, rev), or False. copied could also be just path,
2407 and will be converted to (path, nullid). This simplifies some callers.
2408 and will be converted to (path, nullid). This simplifies some callers.
2408 """
2409 """
2409
2410
2410 if path is None:
2411 if path is None:
2411 path = originalfctx.path()
2412 path = originalfctx.path()
2412 if ctx is None:
2413 if ctx is None:
2413 ctx = originalfctx.changectx()
2414 ctx = originalfctx.changectx()
2414 ctxmatch = lambda: True
2415 ctxmatch = lambda: True
2415 else:
2416 else:
2416 ctxmatch = lambda: ctx == originalfctx.changectx()
2417 ctxmatch = lambda: ctx == originalfctx.changectx()
2417
2418
2418 repo = originalfctx.repo()
2419 repo = originalfctx.repo()
2419 flog = originalfctx.filelog()
2420 flog = originalfctx.filelog()
2420 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2421 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2421
2422
2422 if copied is None:
2423 if copied is None:
2423 copied = originalfctx.renamed()
2424 copied = originalfctx.renamed()
2424 copiedmatch = lambda: True
2425 copiedmatch = lambda: True
2425 else:
2426 else:
2426 if copied and not isinstance(copied, tuple):
2427 if copied and not isinstance(copied, tuple):
2427 # repo._filecommit will recalculate copyrev so nullid is okay
2428 # repo._filecommit will recalculate copyrev so nullid is okay
2428 copied = (copied, nullid)
2429 copied = (copied, nullid)
2429 copiedmatch = lambda: copied == originalfctx.renamed()
2430 copiedmatch = lambda: copied == originalfctx.renamed()
2430
2431
2431 # When data, copied (could affect data), ctx (could affect filelog
2432 # When data, copied (could affect data), ctx (could affect filelog
2432 # parents) are not overridden, rawdata, rawflags, and filenode may be
2433 # parents) are not overridden, rawdata, rawflags, and filenode may be
2433 # reused (repo._filecommit should double check filelog parents).
2434 # reused (repo._filecommit should double check filelog parents).
2434 #
2435 #
2435 # path, flags are not hashed in filelog (but in manifestlog) so they do
2436 # path, flags are not hashed in filelog (but in manifestlog) so they do
2436 # not affect reusable here.
2437 # not affect reusable here.
2437 #
2438 #
2438 # If ctx or copied is overridden to a same value with originalfctx,
2439 # If ctx or copied is overridden to a same value with originalfctx,
2439 # still consider it's reusable. originalfctx.renamed() may be a bit
2440 # still consider it's reusable. originalfctx.renamed() may be a bit
2440 # expensive so it's not called unless necessary. Assuming datafunc is
2441 # expensive so it's not called unless necessary. Assuming datafunc is
2441 # always expensive, do not call it for this "reusable" test.
2442 # always expensive, do not call it for this "reusable" test.
2442 reusable = datafunc is None and ctxmatch() and copiedmatch()
2443 reusable = datafunc is None and ctxmatch() and copiedmatch()
2443
2444
2444 if datafunc is None:
2445 if datafunc is None:
2445 datafunc = originalfctx.data
2446 datafunc = originalfctx.data
2446 if flags is None:
2447 if flags is None:
2447 flags = originalfctx.flags()
2448 flags = originalfctx.flags()
2448
2449
2449 self._datafunc = datafunc
2450 self._datafunc = datafunc
2450 self._flags = flags
2451 self._flags = flags
2451 self._copied = copied
2452 self._copied = copied
2452
2453
2453 if reusable:
2454 if reusable:
2454 # copy extra fields from originalfctx
2455 # copy extra fields from originalfctx
2455 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2456 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2456 for attr_ in attrs:
2457 for attr_ in attrs:
2457 if util.safehasattr(originalfctx, attr_):
2458 if util.safehasattr(originalfctx, attr_):
2458 setattr(self, attr_, getattr(originalfctx, attr_))
2459 setattr(self, attr_, getattr(originalfctx, attr_))
2459
2460
2460 def data(self):
2461 def data(self):
2461 return self._datafunc()
2462 return self._datafunc()
2462
2463
2463 class metadataonlyctx(committablectx):
2464 class metadataonlyctx(committablectx):
2464 """Like memctx but it's reusing the manifest of different commit.
2465 """Like memctx but it's reusing the manifest of different commit.
2465 Intended to be used by lightweight operations that are creating
2466 Intended to be used by lightweight operations that are creating
2466 metadata-only changes.
2467 metadata-only changes.
2467
2468
2468 Revision information is supplied at initialization time. 'repo' is the
2469 Revision information is supplied at initialization time. 'repo' is the
2469 current localrepo, 'ctx' is original revision which manifest we're reuisng
2470 current localrepo, 'ctx' is original revision which manifest we're reuisng
2470 'parents' is a sequence of two parent revisions identifiers (pass None for
2471 'parents' is a sequence of two parent revisions identifiers (pass None for
2471 every missing parent), 'text' is the commit.
2472 every missing parent), 'text' is the commit.
2472
2473
2473 user receives the committer name and defaults to current repository
2474 user receives the committer name and defaults to current repository
2474 username, date is the commit date in any format supported by
2475 username, date is the commit date in any format supported by
2475 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2476 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2476 metadata or is left empty.
2477 metadata or is left empty.
2477 """
2478 """
2478 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2479 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2479 date=None, extra=None, editor=False):
2480 date=None, extra=None, editor=False):
2480 if text is None:
2481 if text is None:
2481 text = originalctx.description()
2482 text = originalctx.description()
2482 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2483 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2483 self._rev = None
2484 self._rev = None
2484 self._node = None
2485 self._node = None
2485 self._originalctx = originalctx
2486 self._originalctx = originalctx
2486 self._manifestnode = originalctx.manifestnode()
2487 self._manifestnode = originalctx.manifestnode()
2487 if parents is None:
2488 if parents is None:
2488 parents = originalctx.parents()
2489 parents = originalctx.parents()
2489 else:
2490 else:
2490 parents = [repo[p] for p in parents if p is not None]
2491 parents = [repo[p] for p in parents if p is not None]
2491 parents = parents[:]
2492 parents = parents[:]
2492 while len(parents) < 2:
2493 while len(parents) < 2:
2493 parents.append(repo[nullid])
2494 parents.append(repo[nullid])
2494 p1, p2 = self._parents = parents
2495 p1, p2 = self._parents = parents
2495
2496
2496 # sanity check to ensure that the reused manifest parents are
2497 # sanity check to ensure that the reused manifest parents are
2497 # manifests of our commit parents
2498 # manifests of our commit parents
2498 mp1, mp2 = self.manifestctx().parents
2499 mp1, mp2 = self.manifestctx().parents
2499 if p1 != nullid and p1.manifestnode() != mp1:
2500 if p1 != nullid and p1.manifestnode() != mp1:
2500 raise RuntimeError('can\'t reuse the manifest: '
2501 raise RuntimeError('can\'t reuse the manifest: '
2501 'its p1 doesn\'t match the new ctx p1')
2502 'its p1 doesn\'t match the new ctx p1')
2502 if p2 != nullid and p2.manifestnode() != mp2:
2503 if p2 != nullid and p2.manifestnode() != mp2:
2503 raise RuntimeError('can\'t reuse the manifest: '
2504 raise RuntimeError('can\'t reuse the manifest: '
2504 'its p2 doesn\'t match the new ctx p2')
2505 'its p2 doesn\'t match the new ctx p2')
2505
2506
2506 self._files = originalctx.files()
2507 self._files = originalctx.files()
2507 self.substate = {}
2508 self.substate = {}
2508
2509
2509 if editor:
2510 if editor:
2510 self._text = editor(self._repo, self, [])
2511 self._text = editor(self._repo, self, [])
2511 self._repo.savecommitmessage(self._text)
2512 self._repo.savecommitmessage(self._text)
2512
2513
2513 def manifestnode(self):
2514 def manifestnode(self):
2514 return self._manifestnode
2515 return self._manifestnode
2515
2516
2516 @property
2517 @property
2517 def _manifestctx(self):
2518 def _manifestctx(self):
2518 return self._repo.manifestlog[self._manifestnode]
2519 return self._repo.manifestlog[self._manifestnode]
2519
2520
2520 def filectx(self, path, filelog=None):
2521 def filectx(self, path, filelog=None):
2521 return self._originalctx.filectx(path, filelog=filelog)
2522 return self._originalctx.filectx(path, filelog=filelog)
2522
2523
2523 def commit(self):
2524 def commit(self):
2524 """commit context to the repo"""
2525 """commit context to the repo"""
2525 return self._repo.commitctx(self)
2526 return self._repo.commitctx(self)
2526
2527
2527 @property
2528 @property
2528 def _manifest(self):
2529 def _manifest(self):
2529 return self._originalctx.manifest()
2530 return self._originalctx.manifest()
2530
2531
2531 @propertycache
2532 @propertycache
2532 def _status(self):
2533 def _status(self):
2533 """Calculate exact status from ``files`` specified in the ``origctx``
2534 """Calculate exact status from ``files`` specified in the ``origctx``
2534 and parents manifests.
2535 and parents manifests.
2535 """
2536 """
2536 man1 = self.p1().manifest()
2537 man1 = self.p1().manifest()
2537 p2 = self._parents[1]
2538 p2 = self._parents[1]
2538 # "1 < len(self._parents)" can't be used for checking
2539 # "1 < len(self._parents)" can't be used for checking
2539 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2540 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2540 # explicitly initialized by the list, of which length is 2.
2541 # explicitly initialized by the list, of which length is 2.
2541 if p2.node() != nullid:
2542 if p2.node() != nullid:
2542 man2 = p2.manifest()
2543 man2 = p2.manifest()
2543 managing = lambda f: f in man1 or f in man2
2544 managing = lambda f: f in man1 or f in man2
2544 else:
2545 else:
2545 managing = lambda f: f in man1
2546 managing = lambda f: f in man1
2546
2547
2547 modified, added, removed = [], [], []
2548 modified, added, removed = [], [], []
2548 for f in self._files:
2549 for f in self._files:
2549 if not managing(f):
2550 if not managing(f):
2550 added.append(f)
2551 added.append(f)
2551 elif f in self:
2552 elif f in self:
2552 modified.append(f)
2553 modified.append(f)
2553 else:
2554 else:
2554 removed.append(f)
2555 removed.append(f)
2555
2556
2556 return scmutil.status(modified, added, removed, [], [], [], [])
2557 return scmutil.status(modified, added, removed, [], [], [], [])
2557
2558
2558 class arbitraryfilectx(object):
2559 class arbitraryfilectx(object):
2559 """Allows you to use filectx-like functions on a file in an arbitrary
2560 """Allows you to use filectx-like functions on a file in an arbitrary
2560 location on disk, possibly not in the working directory.
2561 location on disk, possibly not in the working directory.
2561 """
2562 """
2562 def __init__(self, path, repo=None):
2563 def __init__(self, path, repo=None):
2563 # Repo is optional because contrib/simplemerge uses this class.
2564 # Repo is optional because contrib/simplemerge uses this class.
2564 self._repo = repo
2565 self._repo = repo
2565 self._path = path
2566 self._path = path
2566
2567
2567 def cmp(self, fctx):
2568 def cmp(self, fctx):
2568 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2569 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2569 # path if either side is a symlink.
2570 # path if either side is a symlink.
2570 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2571 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2571 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2572 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2572 # Add a fast-path for merge if both sides are disk-backed.
2573 # Add a fast-path for merge if both sides are disk-backed.
2573 # Note that filecmp uses the opposite return values (True if same)
2574 # Note that filecmp uses the opposite return values (True if same)
2574 # from our cmp functions (True if different).
2575 # from our cmp functions (True if different).
2575 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2576 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2576 return self.data() != fctx.data()
2577 return self.data() != fctx.data()
2577
2578
2578 def path(self):
2579 def path(self):
2579 return self._path
2580 return self._path
2580
2581
2581 def flags(self):
2582 def flags(self):
2582 return ''
2583 return ''
2583
2584
2584 def data(self):
2585 def data(self):
2585 return util.readfile(self._path)
2586 return util.readfile(self._path)
2586
2587
2587 def decodeddata(self):
2588 def decodeddata(self):
2588 with open(self._path, "rb") as f:
2589 with open(self._path, "rb") as f:
2589 return f.read()
2590 return f.read()
2590
2591
2591 def remove(self):
2592 def remove(self):
2592 util.unlink(self._path)
2593 util.unlink(self._path)
2593
2594
2594 def write(self, data, flags, **kwargs):
2595 def write(self, data, flags, **kwargs):
2595 assert not flags
2596 assert not flags
2596 with open(self._path, "w") as f:
2597 with open(self._path, "w") as f:
2597 f.write(data)
2598 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now