##// END OF EJS Templates
context: clarify the various mode in the _copies property cache...
marmoute -
r43293:8af90989 default
parent child Browse files
Show More
@@ -1,2592 +1,2600 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 copies,
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52 class basectx(object):
52 class basectx(object):
53 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
54 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
55 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
56 be committed,
56 be committed,
57 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
58 be committed."""
58 be committed."""
59
59
60 def __init__(self, repo):
60 def __init__(self, repo):
61 self._repo = repo
61 self._repo = repo
62
62
63 def __bytes__(self):
63 def __bytes__(self):
64 return short(self.node())
64 return short(self.node())
65
65
66 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
67
67
68 def __repr__(self):
68 def __repr__(self):
69 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
70
70
71 def __eq__(self, other):
71 def __eq__(self, other):
72 try:
72 try:
73 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
74 except AttributeError:
74 except AttributeError:
75 return False
75 return False
76
76
77 def __ne__(self, other):
77 def __ne__(self, other):
78 return not (self == other)
78 return not (self == other)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 return iter(self._manifest)
87 return iter(self._manifest)
88
88
89 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
90 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
91 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
92 the normal manifest."""
92 the normal manifest."""
93 return self.manifest()
93 return self.manifest()
94
94
95 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
96 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
97 match operator.
97 match operator.
98 """
98 """
99 return match
99 return match
100
100
101 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
102 listunknown):
102 listunknown):
103 """build a status with respect to another context"""
103 """build a status with respect to another context"""
104 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta application.
109 # delta application.
110 mf2 = None
110 mf2 = None
111 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
112 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
114 if mf2 is None:
114 if mf2 is None:
115 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
116
116
117 modified, added = [], []
117 modified, added = [], []
118 removed = []
118 removed = []
119 clean = []
119 clean = []
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deletedset = set(deleted)
121 deletedset = set(deleted)
122 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
123 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
124 if fn in deletedset:
124 if fn in deletedset:
125 continue
125 continue
126 if value is None:
126 if value is None:
127 clean.append(fn)
127 clean.append(fn)
128 continue
128 continue
129 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
130 if node1 is None:
130 if node1 is None:
131 added.append(fn)
131 added.append(fn)
132 elif node2 is None:
132 elif node2 is None:
133 removed.append(fn)
133 removed.append(fn)
134 elif flag1 != flag2:
134 elif flag1 != flag2:
135 modified.append(fn)
135 modified.append(fn)
136 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
137 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
138 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
139 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
140 # to a file as a modification.
140 # to a file as a modification.
141 modified.append(fn)
141 modified.append(fn)
142 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
143 modified.append(fn)
143 modified.append(fn)
144 else:
144 else:
145 clean.append(fn)
145 clean.append(fn)
146
146
147 if removed:
147 if removed:
148 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
149 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
150 (not match or match(fn))]
150 (not match or match(fn))]
151 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
152 (not match or match(fn))]
152 (not match or match(fn))]
153 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
154 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
155
155
156 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
157 ignored, clean)
157 ignored, clean)
158
158
159 @propertycache
159 @propertycache
160 def substate(self):
160 def substate(self):
161 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
162
162
163 def subrev(self, subpath):
163 def subrev(self, subpath):
164 return self.substate[subpath][1]
164 return self.substate[subpath][1]
165
165
166 def rev(self):
166 def rev(self):
167 return self._rev
167 return self._rev
168 def node(self):
168 def node(self):
169 return self._node
169 return self._node
170 def hex(self):
170 def hex(self):
171 return hex(self.node())
171 return hex(self.node())
172 def manifest(self):
172 def manifest(self):
173 return self._manifest
173 return self._manifest
174 def manifestctx(self):
174 def manifestctx(self):
175 return self._manifestctx
175 return self._manifestctx
176 def repo(self):
176 def repo(self):
177 return self._repo
177 return self._repo
178 def phasestr(self):
178 def phasestr(self):
179 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
180 def mutable(self):
180 def mutable(self):
181 return self.phase() > phases.public
181 return self.phase() > phases.public
182
182
183 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
184 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
185
185
186 def obsolete(self):
186 def obsolete(self):
187 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
189
190 def extinct(self):
190 def extinct(self):
191 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
193
194 def orphan(self):
194 def orphan(self):
195 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197
197
198 def phasedivergent(self):
198 def phasedivergent(self):
199 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
200
200
201 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
202 """
202 """
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204
204
205 def contentdivergent(self):
205 def contentdivergent(self):
206 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
207
207
208 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
209 """
209 """
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211
211
212 def isunstable(self):
212 def isunstable(self):
213 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
214 content-divergent"""
214 content-divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return self._repo[nullrev]
245 return self._repo[nullrev]
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 @propertycache
276 @propertycache
277 def _copies(self):
277 def _copies(self):
278 return copies.computechangesetcopies(self)
278 return copies.computechangesetcopies(self)
279 def p1copies(self):
279 def p1copies(self):
280 return self._copies[0]
280 return self._copies[0]
281 def p2copies(self):
281 def p2copies(self):
282 return self._copies[1]
282 return self._copies[1]
283
283
284 def sub(self, path, allowcreate=True):
284 def sub(self, path, allowcreate=True):
285 '''return a subrepo for the stored revision of path, never wdir()'''
285 '''return a subrepo for the stored revision of path, never wdir()'''
286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287
287
288 def nullsub(self, path, pctx):
288 def nullsub(self, path, pctx):
289 return subrepo.nullsubrepo(self, path, pctx)
289 return subrepo.nullsubrepo(self, path, pctx)
290
290
291 def workingsub(self, path):
291 def workingsub(self, path):
292 '''return a subrepo for the stored revision, or wdir if this is a wdir
292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 context.
293 context.
294 '''
294 '''
295 return subrepo.subrepo(self, path, allowwdir=True)
295 return subrepo.subrepo(self, path, allowwdir=True)
296
296
297 def match(self, pats=None, include=None, exclude=None, default='glob',
297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 listsubrepos=False, badfn=None):
298 listsubrepos=False, badfn=None):
299 r = self._repo
299 r = self._repo
300 return matchmod.match(r.root, r.getcwd(), pats,
300 return matchmod.match(r.root, r.getcwd(), pats,
301 include, exclude, default,
301 include, exclude, default,
302 auditor=r.nofsauditor, ctx=self,
302 auditor=r.nofsauditor, ctx=self,
303 listsubrepos=listsubrepos, badfn=badfn)
303 listsubrepos=listsubrepos, badfn=badfn)
304
304
305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 losedatafn=None, pathfn=None, copy=None,
306 losedatafn=None, pathfn=None, copy=None,
307 copysourcematch=None, hunksfilterfn=None):
307 copysourcematch=None, hunksfilterfn=None):
308 """Returns a diff generator for the given contexts and matcher"""
308 """Returns a diff generator for the given contexts and matcher"""
309 if ctx2 is None:
309 if ctx2 is None:
310 ctx2 = self.p1()
310 ctx2 = self.p1()
311 if ctx2 is not None:
311 if ctx2 is not None:
312 ctx2 = self._repo[ctx2]
312 ctx2 = self._repo[ctx2]
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 copy=copy, copysourcematch=copysourcematch,
315 copy=copy, copysourcematch=copysourcematch,
316 hunksfilterfn=hunksfilterfn)
316 hunksfilterfn=hunksfilterfn)
317
317
318 def dirs(self):
318 def dirs(self):
319 return self._manifest.dirs()
319 return self._manifest.dirs()
320
320
321 def hasdir(self, dir):
321 def hasdir(self, dir):
322 return self._manifest.hasdir(dir)
322 return self._manifest.hasdir(dir)
323
323
324 def status(self, other=None, match=None, listignored=False,
324 def status(self, other=None, match=None, listignored=False,
325 listclean=False, listunknown=False, listsubrepos=False):
325 listclean=False, listunknown=False, listsubrepos=False):
326 """return status of files between two nodes or node and working
326 """return status of files between two nodes or node and working
327 directory.
327 directory.
328
328
329 If other is None, compare this node with working directory.
329 If other is None, compare this node with working directory.
330
330
331 returns (modified, added, removed, deleted, unknown, ignored, clean)
331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 """
332 """
333
333
334 ctx1 = self
334 ctx1 = self
335 ctx2 = self._repo[other]
335 ctx2 = self._repo[other]
336
336
337 # This next code block is, admittedly, fragile logic that tests for
337 # This next code block is, admittedly, fragile logic that tests for
338 # reversing the contexts and wouldn't need to exist if it weren't for
338 # reversing the contexts and wouldn't need to exist if it weren't for
339 # the fast (and common) code path of comparing the working directory
339 # the fast (and common) code path of comparing the working directory
340 # with its first parent.
340 # with its first parent.
341 #
341 #
342 # What we're aiming for here is the ability to call:
342 # What we're aiming for here is the ability to call:
343 #
343 #
344 # workingctx.status(parentctx)
344 # workingctx.status(parentctx)
345 #
345 #
346 # If we always built the manifest for each context and compared those,
346 # If we always built the manifest for each context and compared those,
347 # then we'd be done. But the special case of the above call means we
347 # then we'd be done. But the special case of the above call means we
348 # just copy the manifest of the parent.
348 # just copy the manifest of the parent.
349 reversed = False
349 reversed = False
350 if (not isinstance(ctx1, changectx)
350 if (not isinstance(ctx1, changectx)
351 and isinstance(ctx2, changectx)):
351 and isinstance(ctx2, changectx)):
352 reversed = True
352 reversed = True
353 ctx1, ctx2 = ctx2, ctx1
353 ctx1, ctx2 = ctx2, ctx1
354
354
355 match = self._repo.narrowmatch(match)
355 match = self._repo.narrowmatch(match)
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388 class changectx(basectx):
388 class changectx(basectx):
389 """A changecontext object makes access to data related to a particular
389 """A changecontext object makes access to data related to a particular
390 changeset convenient. It represents a read-only context already present in
390 changeset convenient. It represents a read-only context already present in
391 the repo."""
391 the repo."""
392 def __init__(self, repo, rev, node):
392 def __init__(self, repo, rev, node):
393 super(changectx, self).__init__(repo)
393 super(changectx, self).__init__(repo)
394 self._rev = rev
394 self._rev = rev
395 self._node = node
395 self._node = node
396
396
397 def __hash__(self):
397 def __hash__(self):
398 try:
398 try:
399 return hash(self._rev)
399 return hash(self._rev)
400 except AttributeError:
400 except AttributeError:
401 return id(self)
401 return id(self)
402
402
403 def __nonzero__(self):
403 def __nonzero__(self):
404 return self._rev != nullrev
404 return self._rev != nullrev
405
405
406 __bool__ = __nonzero__
406 __bool__ = __nonzero__
407
407
408 @propertycache
408 @propertycache
409 def _changeset(self):
409 def _changeset(self):
410 return self._repo.changelog.changelogrevision(self.rev())
410 return self._repo.changelog.changelogrevision(self.rev())
411
411
412 @propertycache
412 @propertycache
413 def _manifest(self):
413 def _manifest(self):
414 return self._manifestctx.read()
414 return self._manifestctx.read()
415
415
416 @property
416 @property
417 def _manifestctx(self):
417 def _manifestctx(self):
418 return self._repo.manifestlog[self._changeset.manifest]
418 return self._repo.manifestlog[self._changeset.manifest]
419
419
420 @propertycache
420 @propertycache
421 def _manifestdelta(self):
421 def _manifestdelta(self):
422 return self._manifestctx.readdelta()
422 return self._manifestctx.readdelta()
423
423
424 @propertycache
424 @propertycache
425 def _parents(self):
425 def _parents(self):
426 repo = self._repo
426 repo = self._repo
427 p1, p2 = repo.changelog.parentrevs(self._rev)
427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 if p2 == nullrev:
428 if p2 == nullrev:
429 return [repo[p1]]
429 return [repo[p1]]
430 return [repo[p1], repo[p2]]
430 return [repo[p1], repo[p2]]
431
431
432 def changeset(self):
432 def changeset(self):
433 c = self._changeset
433 c = self._changeset
434 return (
434 return (
435 c.manifest,
435 c.manifest,
436 c.user,
436 c.user,
437 c.date,
437 c.date,
438 c.files,
438 c.files,
439 c.description,
439 c.description,
440 c.extra,
440 c.extra,
441 )
441 )
442 def manifestnode(self):
442 def manifestnode(self):
443 return self._changeset.manifest
443 return self._changeset.manifest
444
444
445 def user(self):
445 def user(self):
446 return self._changeset.user
446 return self._changeset.user
447 def date(self):
447 def date(self):
448 return self._changeset.date
448 return self._changeset.date
449 def files(self):
449 def files(self):
450 return self._changeset.files
450 return self._changeset.files
451 def filesmodified(self):
451 def filesmodified(self):
452 modified = set(self.files())
452 modified = set(self.files())
453 modified.difference_update(self.filesadded())
453 modified.difference_update(self.filesadded())
454 modified.difference_update(self.filesremoved())
454 modified.difference_update(self.filesremoved())
455 return sorted(modified)
455 return sorted(modified)
456
456
457 def filesadded(self):
457 def filesadded(self):
458 source = self._repo.ui.config('experimental', 'copies.read-from')
458 source = self._repo.ui.config('experimental', 'copies.read-from')
459 filesadded = self._changeset.filesadded
459 filesadded = self._changeset.filesadded
460 if source == 'changeset-only':
460 if source == 'changeset-only':
461 if filesadded is None:
461 if filesadded is None:
462 filesadded = []
462 filesadded = []
463 elif source == 'compatibility':
463 elif source == 'compatibility':
464 if filesadded is None:
464 if filesadded is None:
465 filesadded = scmutil.computechangesetfilesadded(self)
465 filesadded = scmutil.computechangesetfilesadded(self)
466 else:
466 else:
467 filesadded = scmutil.computechangesetfilesadded(self)
467 filesadded = scmutil.computechangesetfilesadded(self)
468 return filesadded
468 return filesadded
469
469
470 def filesremoved(self):
470 def filesremoved(self):
471 source = self._repo.ui.config('experimental', 'copies.read-from')
471 source = self._repo.ui.config('experimental', 'copies.read-from')
472 filesremoved = self._changeset.filesremoved
472 filesremoved = self._changeset.filesremoved
473 if source == 'changeset-only':
473 if source == 'changeset-only':
474 if filesremoved is None:
474 if filesremoved is None:
475 filesremoved = []
475 filesremoved = []
476 elif source == 'compatibility':
476 elif source == 'compatibility':
477 if filesremoved is None:
477 if filesremoved is None:
478 filesremoved = scmutil.computechangesetfilesremoved(self)
478 filesremoved = scmutil.computechangesetfilesremoved(self)
479 else:
479 else:
480 filesremoved = scmutil.computechangesetfilesremoved(self)
480 filesremoved = scmutil.computechangesetfilesremoved(self)
481 return filesremoved
481 return filesremoved
482
482
483 @propertycache
483 @propertycache
484 def _copies(self):
484 def _copies(self):
485 source = self._repo.ui.config('experimental', 'copies.read-from')
485 source = self._repo.ui.config('experimental', 'copies.read-from')
486 p1copies = self._changeset.p1copies
486 p1copies = self._changeset.p1copies
487 p2copies = self._changeset.p2copies
487 p2copies = self._changeset.p2copies
488 # If config says to get copy metadata only from changeset, then return
488 # If config says to get copy metadata only from changeset, then return
489 # that, defaulting to {} if there was no copy metadata.
489 # that, defaulting to {} if there was no copy metadata.
490 # In compatibility mode, we return copy data from the changeset if
490 # In compatibility mode, we return copy data from the changeset if
491 # it was recorded there, and otherwise we fall back to getting it from
491 # it was recorded there, and otherwise we fall back to getting it from
492 # the filelogs (below).
492 # the filelogs (below).
493 if (source == 'changeset-only' or
493 if source == 'changeset-only':
494 (source == 'compatibility' and p1copies is not None)):
494 if p1copies is None:
495 return p1copies or {}, p2copies or {}
495 p1copies = {}
496
496 if p2copies is None:
497 # Otherwise (config said to read only from filelog, or we are in
497 p2copies = {}
498 # compatiblity mode and there is not data in the changeset), we get
498 elif source == 'compatibility':
499 # the copy metadata from the filelogs.
499 if p1copies is None:
500 return super(changectx, self)._copies
500 # we are in compatiblity mode and there is not data in the
501 # changeset), we get the copy metadata from the filelogs.
502 p1copies, p2copies = super(changectx, self)._copies
503 else:
504 # config said to read only from filelog, we get the copy metadata
505 # from the filelogs.
506 p1copies, p2copies = super(changectx, self)._copies
507 return p1copies, p2copies
508
501 def description(self):
509 def description(self):
502 return self._changeset.description
510 return self._changeset.description
503 def branch(self):
511 def branch(self):
504 return encoding.tolocal(self._changeset.extra.get("branch"))
512 return encoding.tolocal(self._changeset.extra.get("branch"))
505 def closesbranch(self):
513 def closesbranch(self):
506 return 'close' in self._changeset.extra
514 return 'close' in self._changeset.extra
507 def extra(self):
515 def extra(self):
508 """Return a dict of extra information."""
516 """Return a dict of extra information."""
509 return self._changeset.extra
517 return self._changeset.extra
510 def tags(self):
518 def tags(self):
511 """Return a list of byte tag names"""
519 """Return a list of byte tag names"""
512 return self._repo.nodetags(self._node)
520 return self._repo.nodetags(self._node)
513 def bookmarks(self):
521 def bookmarks(self):
514 """Return a list of byte bookmark names."""
522 """Return a list of byte bookmark names."""
515 return self._repo.nodebookmarks(self._node)
523 return self._repo.nodebookmarks(self._node)
516 def phase(self):
524 def phase(self):
517 return self._repo._phasecache.phase(self._repo, self._rev)
525 return self._repo._phasecache.phase(self._repo, self._rev)
518 def hidden(self):
526 def hidden(self):
519 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 return self._rev in repoview.filterrevs(self._repo, 'visible')
520
528
521 def isinmemory(self):
529 def isinmemory(self):
522 return False
530 return False
523
531
524 def children(self):
532 def children(self):
525 """return list of changectx contexts for each child changeset.
533 """return list of changectx contexts for each child changeset.
526
534
527 This returns only the immediate child changesets. Use descendants() to
535 This returns only the immediate child changesets. Use descendants() to
528 recursively walk children.
536 recursively walk children.
529 """
537 """
530 c = self._repo.changelog.children(self._node)
538 c = self._repo.changelog.children(self._node)
531 return [self._repo[x] for x in c]
539 return [self._repo[x] for x in c]
532
540
533 def ancestors(self):
541 def ancestors(self):
534 for a in self._repo.changelog.ancestors([self._rev]):
542 for a in self._repo.changelog.ancestors([self._rev]):
535 yield self._repo[a]
543 yield self._repo[a]
536
544
537 def descendants(self):
545 def descendants(self):
538 """Recursively yield all children of the changeset.
546 """Recursively yield all children of the changeset.
539
547
540 For just the immediate children, use children()
548 For just the immediate children, use children()
541 """
549 """
542 for d in self._repo.changelog.descendants([self._rev]):
550 for d in self._repo.changelog.descendants([self._rev]):
543 yield self._repo[d]
551 yield self._repo[d]
544
552
545 def filectx(self, path, fileid=None, filelog=None):
553 def filectx(self, path, fileid=None, filelog=None):
546 """get a file context from this changeset"""
554 """get a file context from this changeset"""
547 if fileid is None:
555 if fileid is None:
548 fileid = self.filenode(path)
556 fileid = self.filenode(path)
549 return filectx(self._repo, path, fileid=fileid,
557 return filectx(self._repo, path, fileid=fileid,
550 changectx=self, filelog=filelog)
558 changectx=self, filelog=filelog)
551
559
552 def ancestor(self, c2, warn=False):
560 def ancestor(self, c2, warn=False):
553 """return the "best" ancestor context of self and c2
561 """return the "best" ancestor context of self and c2
554
562
555 If there are multiple candidates, it will show a message and check
563 If there are multiple candidates, it will show a message and check
556 merge.preferancestor configuration before falling back to the
564 merge.preferancestor configuration before falling back to the
557 revlog ancestor."""
565 revlog ancestor."""
558 # deal with workingctxs
566 # deal with workingctxs
559 n2 = c2._node
567 n2 = c2._node
560 if n2 is None:
568 if n2 is None:
561 n2 = c2._parents[0]._node
569 n2 = c2._parents[0]._node
562 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
570 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
563 if not cahs:
571 if not cahs:
564 anc = nullid
572 anc = nullid
565 elif len(cahs) == 1:
573 elif len(cahs) == 1:
566 anc = cahs[0]
574 anc = cahs[0]
567 else:
575 else:
568 # experimental config: merge.preferancestor
576 # experimental config: merge.preferancestor
569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
577 for r in self._repo.ui.configlist('merge', 'preferancestor'):
570 try:
578 try:
571 ctx = scmutil.revsymbol(self._repo, r)
579 ctx = scmutil.revsymbol(self._repo, r)
572 except error.RepoLookupError:
580 except error.RepoLookupError:
573 continue
581 continue
574 anc = ctx.node()
582 anc = ctx.node()
575 if anc in cahs:
583 if anc in cahs:
576 break
584 break
577 else:
585 else:
578 anc = self._repo.changelog.ancestor(self._node, n2)
586 anc = self._repo.changelog.ancestor(self._node, n2)
579 if warn:
587 if warn:
580 self._repo.ui.status(
588 self._repo.ui.status(
581 (_("note: using %s as ancestor of %s and %s\n") %
589 (_("note: using %s as ancestor of %s and %s\n") %
582 (short(anc), short(self._node), short(n2))) +
590 (short(anc), short(self._node), short(n2))) +
583 ''.join(_(" alternatively, use --config "
591 ''.join(_(" alternatively, use --config "
584 "merge.preferancestor=%s\n") %
592 "merge.preferancestor=%s\n") %
585 short(n) for n in sorted(cahs) if n != anc))
593 short(n) for n in sorted(cahs) if n != anc))
586 return self._repo[anc]
594 return self._repo[anc]
587
595
588 def isancestorof(self, other):
596 def isancestorof(self, other):
589 """True if this changeset is an ancestor of other"""
597 """True if this changeset is an ancestor of other"""
590 return self._repo.changelog.isancestorrev(self._rev, other._rev)
598 return self._repo.changelog.isancestorrev(self._rev, other._rev)
591
599
592 def walk(self, match):
600 def walk(self, match):
593 '''Generates matching file names.'''
601 '''Generates matching file names.'''
594
602
595 # Wrap match.bad method to have message with nodeid
603 # Wrap match.bad method to have message with nodeid
596 def bad(fn, msg):
604 def bad(fn, msg):
597 # The manifest doesn't know about subrepos, so don't complain about
605 # The manifest doesn't know about subrepos, so don't complain about
598 # paths into valid subrepos.
606 # paths into valid subrepos.
599 if any(fn == s or fn.startswith(s + '/')
607 if any(fn == s or fn.startswith(s + '/')
600 for s in self.substate):
608 for s in self.substate):
601 return
609 return
602 match.bad(fn, _('no such file in rev %s') % self)
610 match.bad(fn, _('no such file in rev %s') % self)
603
611
604 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
612 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
605 return self._manifest.walk(m)
613 return self._manifest.walk(m)
606
614
607 def matches(self, match):
615 def matches(self, match):
608 return self.walk(match)
616 return self.walk(match)
609
617
610 class basefilectx(object):
618 class basefilectx(object):
611 """A filecontext object represents the common logic for its children:
619 """A filecontext object represents the common logic for its children:
612 filectx: read-only access to a filerevision that is already present
620 filectx: read-only access to a filerevision that is already present
613 in the repo,
621 in the repo,
614 workingfilectx: a filecontext that represents files from the working
622 workingfilectx: a filecontext that represents files from the working
615 directory,
623 directory,
616 memfilectx: a filecontext that represents files in-memory,
624 memfilectx: a filecontext that represents files in-memory,
617 """
625 """
618 @propertycache
626 @propertycache
619 def _filelog(self):
627 def _filelog(self):
620 return self._repo.file(self._path)
628 return self._repo.file(self._path)
621
629
622 @propertycache
630 @propertycache
623 def _changeid(self):
631 def _changeid(self):
624 if r'_changectx' in self.__dict__:
632 if r'_changectx' in self.__dict__:
625 return self._changectx.rev()
633 return self._changectx.rev()
626 elif r'_descendantrev' in self.__dict__:
634 elif r'_descendantrev' in self.__dict__:
627 # this file context was created from a revision with a known
635 # this file context was created from a revision with a known
628 # descendant, we can (lazily) correct for linkrev aliases
636 # descendant, we can (lazily) correct for linkrev aliases
629 return self._adjustlinkrev(self._descendantrev)
637 return self._adjustlinkrev(self._descendantrev)
630 else:
638 else:
631 return self._filelog.linkrev(self._filerev)
639 return self._filelog.linkrev(self._filerev)
632
640
633 @propertycache
641 @propertycache
634 def _filenode(self):
642 def _filenode(self):
635 if r'_fileid' in self.__dict__:
643 if r'_fileid' in self.__dict__:
636 return self._filelog.lookup(self._fileid)
644 return self._filelog.lookup(self._fileid)
637 else:
645 else:
638 return self._changectx.filenode(self._path)
646 return self._changectx.filenode(self._path)
639
647
640 @propertycache
648 @propertycache
641 def _filerev(self):
649 def _filerev(self):
642 return self._filelog.rev(self._filenode)
650 return self._filelog.rev(self._filenode)
643
651
644 @propertycache
652 @propertycache
645 def _repopath(self):
653 def _repopath(self):
646 return self._path
654 return self._path
647
655
648 def __nonzero__(self):
656 def __nonzero__(self):
649 try:
657 try:
650 self._filenode
658 self._filenode
651 return True
659 return True
652 except error.LookupError:
660 except error.LookupError:
653 # file is missing
661 # file is missing
654 return False
662 return False
655
663
656 __bool__ = __nonzero__
664 __bool__ = __nonzero__
657
665
658 def __bytes__(self):
666 def __bytes__(self):
659 try:
667 try:
660 return "%s@%s" % (self.path(), self._changectx)
668 return "%s@%s" % (self.path(), self._changectx)
661 except error.LookupError:
669 except error.LookupError:
662 return "%s@???" % self.path()
670 return "%s@???" % self.path()
663
671
664 __str__ = encoding.strmethod(__bytes__)
672 __str__ = encoding.strmethod(__bytes__)
665
673
666 def __repr__(self):
674 def __repr__(self):
667 return r"<%s %s>" % (type(self).__name__, str(self))
675 return r"<%s %s>" % (type(self).__name__, str(self))
668
676
669 def __hash__(self):
677 def __hash__(self):
670 try:
678 try:
671 return hash((self._path, self._filenode))
679 return hash((self._path, self._filenode))
672 except AttributeError:
680 except AttributeError:
673 return id(self)
681 return id(self)
674
682
675 def __eq__(self, other):
683 def __eq__(self, other):
676 try:
684 try:
677 return (type(self) == type(other) and self._path == other._path
685 return (type(self) == type(other) and self._path == other._path
678 and self._filenode == other._filenode)
686 and self._filenode == other._filenode)
679 except AttributeError:
687 except AttributeError:
680 return False
688 return False
681
689
682 def __ne__(self, other):
690 def __ne__(self, other):
683 return not (self == other)
691 return not (self == other)
684
692
685 def filerev(self):
693 def filerev(self):
686 return self._filerev
694 return self._filerev
687 def filenode(self):
695 def filenode(self):
688 return self._filenode
696 return self._filenode
689 @propertycache
697 @propertycache
690 def _flags(self):
698 def _flags(self):
691 return self._changectx.flags(self._path)
699 return self._changectx.flags(self._path)
692 def flags(self):
700 def flags(self):
693 return self._flags
701 return self._flags
694 def filelog(self):
702 def filelog(self):
695 return self._filelog
703 return self._filelog
696 def rev(self):
704 def rev(self):
697 return self._changeid
705 return self._changeid
698 def linkrev(self):
706 def linkrev(self):
699 return self._filelog.linkrev(self._filerev)
707 return self._filelog.linkrev(self._filerev)
700 def node(self):
708 def node(self):
701 return self._changectx.node()
709 return self._changectx.node()
702 def hex(self):
710 def hex(self):
703 return self._changectx.hex()
711 return self._changectx.hex()
704 def user(self):
712 def user(self):
705 return self._changectx.user()
713 return self._changectx.user()
706 def date(self):
714 def date(self):
707 return self._changectx.date()
715 return self._changectx.date()
708 def files(self):
716 def files(self):
709 return self._changectx.files()
717 return self._changectx.files()
710 def description(self):
718 def description(self):
711 return self._changectx.description()
719 return self._changectx.description()
712 def branch(self):
720 def branch(self):
713 return self._changectx.branch()
721 return self._changectx.branch()
714 def extra(self):
722 def extra(self):
715 return self._changectx.extra()
723 return self._changectx.extra()
716 def phase(self):
724 def phase(self):
717 return self._changectx.phase()
725 return self._changectx.phase()
718 def phasestr(self):
726 def phasestr(self):
719 return self._changectx.phasestr()
727 return self._changectx.phasestr()
720 def obsolete(self):
728 def obsolete(self):
721 return self._changectx.obsolete()
729 return self._changectx.obsolete()
722 def instabilities(self):
730 def instabilities(self):
723 return self._changectx.instabilities()
731 return self._changectx.instabilities()
724 def manifest(self):
732 def manifest(self):
725 return self._changectx.manifest()
733 return self._changectx.manifest()
726 def changectx(self):
734 def changectx(self):
727 return self._changectx
735 return self._changectx
728 def renamed(self):
736 def renamed(self):
729 return self._copied
737 return self._copied
730 def copysource(self):
738 def copysource(self):
731 return self._copied and self._copied[0]
739 return self._copied and self._copied[0]
732 def repo(self):
740 def repo(self):
733 return self._repo
741 return self._repo
734 def size(self):
742 def size(self):
735 return len(self.data())
743 return len(self.data())
736
744
737 def path(self):
745 def path(self):
738 return self._path
746 return self._path
739
747
740 def isbinary(self):
748 def isbinary(self):
741 try:
749 try:
742 return stringutil.binary(self.data())
750 return stringutil.binary(self.data())
743 except IOError:
751 except IOError:
744 return False
752 return False
745 def isexec(self):
753 def isexec(self):
746 return 'x' in self.flags()
754 return 'x' in self.flags()
747 def islink(self):
755 def islink(self):
748 return 'l' in self.flags()
756 return 'l' in self.flags()
749
757
750 def isabsent(self):
758 def isabsent(self):
751 """whether this filectx represents a file not in self._changectx
759 """whether this filectx represents a file not in self._changectx
752
760
753 This is mainly for merge code to detect change/delete conflicts. This is
761 This is mainly for merge code to detect change/delete conflicts. This is
754 expected to be True for all subclasses of basectx."""
762 expected to be True for all subclasses of basectx."""
755 return False
763 return False
756
764
757 _customcmp = False
765 _customcmp = False
758 def cmp(self, fctx):
766 def cmp(self, fctx):
759 """compare with other file context
767 """compare with other file context
760
768
761 returns True if different than fctx.
769 returns True if different than fctx.
762 """
770 """
763 if fctx._customcmp:
771 if fctx._customcmp:
764 return fctx.cmp(self)
772 return fctx.cmp(self)
765
773
766 if self._filenode is None:
774 if self._filenode is None:
767 raise error.ProgrammingError(
775 raise error.ProgrammingError(
768 'filectx.cmp() must be reimplemented if not backed by revlog')
776 'filectx.cmp() must be reimplemented if not backed by revlog')
769
777
770 if fctx._filenode is None:
778 if fctx._filenode is None:
771 if self._repo._encodefilterpats:
779 if self._repo._encodefilterpats:
772 # can't rely on size() because wdir content may be decoded
780 # can't rely on size() because wdir content may be decoded
773 return self._filelog.cmp(self._filenode, fctx.data())
781 return self._filelog.cmp(self._filenode, fctx.data())
774 if self.size() - 4 == fctx.size():
782 if self.size() - 4 == fctx.size():
775 # size() can match:
783 # size() can match:
776 # if file data starts with '\1\n', empty metadata block is
784 # if file data starts with '\1\n', empty metadata block is
777 # prepended, which adds 4 bytes to filelog.size().
785 # prepended, which adds 4 bytes to filelog.size().
778 return self._filelog.cmp(self._filenode, fctx.data())
786 return self._filelog.cmp(self._filenode, fctx.data())
779 if self.size() == fctx.size():
787 if self.size() == fctx.size():
780 # size() matches: need to compare content
788 # size() matches: need to compare content
781 return self._filelog.cmp(self._filenode, fctx.data())
789 return self._filelog.cmp(self._filenode, fctx.data())
782
790
783 # size() differs
791 # size() differs
784 return True
792 return True
785
793
786 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
794 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
787 """return the first ancestor of <srcrev> introducing <fnode>
795 """return the first ancestor of <srcrev> introducing <fnode>
788
796
789 If the linkrev of the file revision does not point to an ancestor of
797 If the linkrev of the file revision does not point to an ancestor of
790 srcrev, we'll walk down the ancestors until we find one introducing
798 srcrev, we'll walk down the ancestors until we find one introducing
791 this file revision.
799 this file revision.
792
800
793 :srcrev: the changeset revision we search ancestors from
801 :srcrev: the changeset revision we search ancestors from
794 :inclusive: if true, the src revision will also be checked
802 :inclusive: if true, the src revision will also be checked
795 :stoprev: an optional revision to stop the walk at. If no introduction
803 :stoprev: an optional revision to stop the walk at. If no introduction
796 of this file content could be found before this floor
804 of this file content could be found before this floor
797 revision, the function will returns "None" and stops its
805 revision, the function will returns "None" and stops its
798 iteration.
806 iteration.
799 """
807 """
800 repo = self._repo
808 repo = self._repo
801 cl = repo.unfiltered().changelog
809 cl = repo.unfiltered().changelog
802 mfl = repo.manifestlog
810 mfl = repo.manifestlog
803 # fetch the linkrev
811 # fetch the linkrev
804 lkr = self.linkrev()
812 lkr = self.linkrev()
805 if srcrev == lkr:
813 if srcrev == lkr:
806 return lkr
814 return lkr
807 # hack to reuse ancestor computation when searching for renames
815 # hack to reuse ancestor computation when searching for renames
808 memberanc = getattr(self, '_ancestrycontext', None)
816 memberanc = getattr(self, '_ancestrycontext', None)
809 iteranc = None
817 iteranc = None
810 if srcrev is None:
818 if srcrev is None:
811 # wctx case, used by workingfilectx during mergecopy
819 # wctx case, used by workingfilectx during mergecopy
812 revs = [p.rev() for p in self._repo[None].parents()]
820 revs = [p.rev() for p in self._repo[None].parents()]
813 inclusive = True # we skipped the real (revless) source
821 inclusive = True # we skipped the real (revless) source
814 else:
822 else:
815 revs = [srcrev]
823 revs = [srcrev]
816 if memberanc is None:
824 if memberanc is None:
817 memberanc = iteranc = cl.ancestors(revs, lkr,
825 memberanc = iteranc = cl.ancestors(revs, lkr,
818 inclusive=inclusive)
826 inclusive=inclusive)
819 # check if this linkrev is an ancestor of srcrev
827 # check if this linkrev is an ancestor of srcrev
820 if lkr not in memberanc:
828 if lkr not in memberanc:
821 if iteranc is None:
829 if iteranc is None:
822 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
830 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
823 fnode = self._filenode
831 fnode = self._filenode
824 path = self._path
832 path = self._path
825 for a in iteranc:
833 for a in iteranc:
826 if stoprev is not None and a < stoprev:
834 if stoprev is not None and a < stoprev:
827 return None
835 return None
828 ac = cl.read(a) # get changeset data (we avoid object creation)
836 ac = cl.read(a) # get changeset data (we avoid object creation)
829 if path in ac[3]: # checking the 'files' field.
837 if path in ac[3]: # checking the 'files' field.
830 # The file has been touched, check if the content is
838 # The file has been touched, check if the content is
831 # similar to the one we search for.
839 # similar to the one we search for.
832 if fnode == mfl[ac[0]].readfast().get(path):
840 if fnode == mfl[ac[0]].readfast().get(path):
833 return a
841 return a
834 # In theory, we should never get out of that loop without a result.
842 # In theory, we should never get out of that loop without a result.
835 # But if manifest uses a buggy file revision (not children of the
843 # But if manifest uses a buggy file revision (not children of the
836 # one it replaces) we could. Such a buggy situation will likely
844 # one it replaces) we could. Such a buggy situation will likely
837 # result is crash somewhere else at to some point.
845 # result is crash somewhere else at to some point.
838 return lkr
846 return lkr
839
847
840 def isintroducedafter(self, changelogrev):
848 def isintroducedafter(self, changelogrev):
841 """True if a filectx has been introduced after a given floor revision
849 """True if a filectx has been introduced after a given floor revision
842 """
850 """
843 if self.linkrev() >= changelogrev:
851 if self.linkrev() >= changelogrev:
844 return True
852 return True
845 introrev = self._introrev(stoprev=changelogrev)
853 introrev = self._introrev(stoprev=changelogrev)
846 if introrev is None:
854 if introrev is None:
847 return False
855 return False
848 return introrev >= changelogrev
856 return introrev >= changelogrev
849
857
850 def introrev(self):
858 def introrev(self):
851 """return the rev of the changeset which introduced this file revision
859 """return the rev of the changeset which introduced this file revision
852
860
853 This method is different from linkrev because it take into account the
861 This method is different from linkrev because it take into account the
854 changeset the filectx was created from. It ensures the returned
862 changeset the filectx was created from. It ensures the returned
855 revision is one of its ancestors. This prevents bugs from
863 revision is one of its ancestors. This prevents bugs from
856 'linkrev-shadowing' when a file revision is used by multiple
864 'linkrev-shadowing' when a file revision is used by multiple
857 changesets.
865 changesets.
858 """
866 """
859 return self._introrev()
867 return self._introrev()
860
868
861 def _introrev(self, stoprev=None):
869 def _introrev(self, stoprev=None):
862 """
870 """
863 Same as `introrev` but, with an extra argument to limit changelog
871 Same as `introrev` but, with an extra argument to limit changelog
864 iteration range in some internal usecase.
872 iteration range in some internal usecase.
865
873
866 If `stoprev` is set, the `introrev` will not be searched past that
874 If `stoprev` is set, the `introrev` will not be searched past that
867 `stoprev` revision and "None" might be returned. This is useful to
875 `stoprev` revision and "None" might be returned. This is useful to
868 limit the iteration range.
876 limit the iteration range.
869 """
877 """
870 toprev = None
878 toprev = None
871 attrs = vars(self)
879 attrs = vars(self)
872 if r'_changeid' in attrs:
880 if r'_changeid' in attrs:
873 # We have a cached value already
881 # We have a cached value already
874 toprev = self._changeid
882 toprev = self._changeid
875 elif r'_changectx' in attrs:
883 elif r'_changectx' in attrs:
876 # We know which changelog entry we are coming from
884 # We know which changelog entry we are coming from
877 toprev = self._changectx.rev()
885 toprev = self._changectx.rev()
878
886
879 if toprev is not None:
887 if toprev is not None:
880 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
888 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
881 elif r'_descendantrev' in attrs:
889 elif r'_descendantrev' in attrs:
882 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
890 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
883 # be nice and cache the result of the computation
891 # be nice and cache the result of the computation
884 if introrev is not None:
892 if introrev is not None:
885 self._changeid = introrev
893 self._changeid = introrev
886 return introrev
894 return introrev
887 else:
895 else:
888 return self.linkrev()
896 return self.linkrev()
889
897
890 def introfilectx(self):
898 def introfilectx(self):
891 """Return filectx having identical contents, but pointing to the
899 """Return filectx having identical contents, but pointing to the
892 changeset revision where this filectx was introduced"""
900 changeset revision where this filectx was introduced"""
893 introrev = self.introrev()
901 introrev = self.introrev()
894 if self.rev() == introrev:
902 if self.rev() == introrev:
895 return self
903 return self
896 return self.filectx(self.filenode(), changeid=introrev)
904 return self.filectx(self.filenode(), changeid=introrev)
897
905
898 def _parentfilectx(self, path, fileid, filelog):
906 def _parentfilectx(self, path, fileid, filelog):
899 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
907 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
900 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
908 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
901 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
909 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
902 # If self is associated with a changeset (probably explicitly
910 # If self is associated with a changeset (probably explicitly
903 # fed), ensure the created filectx is associated with a
911 # fed), ensure the created filectx is associated with a
904 # changeset that is an ancestor of self.changectx.
912 # changeset that is an ancestor of self.changectx.
905 # This lets us later use _adjustlinkrev to get a correct link.
913 # This lets us later use _adjustlinkrev to get a correct link.
906 fctx._descendantrev = self.rev()
914 fctx._descendantrev = self.rev()
907 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
908 elif r'_descendantrev' in vars(self):
916 elif r'_descendantrev' in vars(self):
909 # Otherwise propagate _descendantrev if we have one associated.
917 # Otherwise propagate _descendantrev if we have one associated.
910 fctx._descendantrev = self._descendantrev
918 fctx._descendantrev = self._descendantrev
911 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
919 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
912 return fctx
920 return fctx
913
921
914 def parents(self):
922 def parents(self):
915 _path = self._path
923 _path = self._path
916 fl = self._filelog
924 fl = self._filelog
917 parents = self._filelog.parents(self._filenode)
925 parents = self._filelog.parents(self._filenode)
918 pl = [(_path, node, fl) for node in parents if node != nullid]
926 pl = [(_path, node, fl) for node in parents if node != nullid]
919
927
920 r = fl.renamed(self._filenode)
928 r = fl.renamed(self._filenode)
921 if r:
929 if r:
922 # - In the simple rename case, both parent are nullid, pl is empty.
930 # - In the simple rename case, both parent are nullid, pl is empty.
923 # - In case of merge, only one of the parent is null id and should
931 # - In case of merge, only one of the parent is null id and should
924 # be replaced with the rename information. This parent is -always-
932 # be replaced with the rename information. This parent is -always-
925 # the first one.
933 # the first one.
926 #
934 #
927 # As null id have always been filtered out in the previous list
935 # As null id have always been filtered out in the previous list
928 # comprehension, inserting to 0 will always result in "replacing
936 # comprehension, inserting to 0 will always result in "replacing
929 # first nullid parent with rename information.
937 # first nullid parent with rename information.
930 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
938 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
931
939
932 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
940 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
933
941
934 def p1(self):
942 def p1(self):
935 return self.parents()[0]
943 return self.parents()[0]
936
944
937 def p2(self):
945 def p2(self):
938 p = self.parents()
946 p = self.parents()
939 if len(p) == 2:
947 if len(p) == 2:
940 return p[1]
948 return p[1]
941 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
949 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
942
950
943 def annotate(self, follow=False, skiprevs=None, diffopts=None):
951 def annotate(self, follow=False, skiprevs=None, diffopts=None):
944 """Returns a list of annotateline objects for each line in the file
952 """Returns a list of annotateline objects for each line in the file
945
953
946 - line.fctx is the filectx of the node where that line was last changed
954 - line.fctx is the filectx of the node where that line was last changed
947 - line.lineno is the line number at the first appearance in the managed
955 - line.lineno is the line number at the first appearance in the managed
948 file
956 file
949 - line.text is the data on that line (including newline character)
957 - line.text is the data on that line (including newline character)
950 """
958 """
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
959 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
960
953 def parents(f):
961 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
962 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
963 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
964 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
965 # isn't an ancestor of the srcrev.
958 f._changeid
966 f._changeid
959 pl = f.parents()
967 pl = f.parents()
960
968
961 # Don't return renamed parents if we aren't following.
969 # Don't return renamed parents if we aren't following.
962 if not follow:
970 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
971 pl = [p for p in pl if p.path() == f.path()]
964
972
965 # renamed filectx won't have a filelog yet, so set it
973 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
974 # from the cache to save time
967 for p in pl:
975 for p in pl:
968 if not r'_filelog' in p.__dict__:
976 if not r'_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
977 p._filelog = getlog(p.path())
970
978
971 return pl
979 return pl
972
980
973 # use linkrev to find the first changeset where self appeared
981 # use linkrev to find the first changeset where self appeared
974 base = self.introfilectx()
982 base = self.introfilectx()
975 if getattr(base, '_ancestrycontext', None) is None:
983 if getattr(base, '_ancestrycontext', None) is None:
976 cl = self._repo.changelog
984 cl = self._repo.changelog
977 if base.rev() is None:
985 if base.rev() is None:
978 # wctx is not inclusive, but works because _ancestrycontext
986 # wctx is not inclusive, but works because _ancestrycontext
979 # is used to test filelog revisions
987 # is used to test filelog revisions
980 ac = cl.ancestors([p.rev() for p in base.parents()],
988 ac = cl.ancestors([p.rev() for p in base.parents()],
981 inclusive=True)
989 inclusive=True)
982 else:
990 else:
983 ac = cl.ancestors([base.rev()], inclusive=True)
991 ac = cl.ancestors([base.rev()], inclusive=True)
984 base._ancestrycontext = ac
992 base._ancestrycontext = ac
985
993
986 return dagop.annotate(base, parents, skiprevs=skiprevs,
994 return dagop.annotate(base, parents, skiprevs=skiprevs,
987 diffopts=diffopts)
995 diffopts=diffopts)
988
996
989 def ancestors(self, followfirst=False):
997 def ancestors(self, followfirst=False):
990 visit = {}
998 visit = {}
991 c = self
999 c = self
992 if followfirst:
1000 if followfirst:
993 cut = 1
1001 cut = 1
994 else:
1002 else:
995 cut = None
1003 cut = None
996
1004
997 while True:
1005 while True:
998 for parent in c.parents()[:cut]:
1006 for parent in c.parents()[:cut]:
999 visit[(parent.linkrev(), parent.filenode())] = parent
1007 visit[(parent.linkrev(), parent.filenode())] = parent
1000 if not visit:
1008 if not visit:
1001 break
1009 break
1002 c = visit.pop(max(visit))
1010 c = visit.pop(max(visit))
1003 yield c
1011 yield c
1004
1012
1005 def decodeddata(self):
1013 def decodeddata(self):
1006 """Returns `data()` after running repository decoding filters.
1014 """Returns `data()` after running repository decoding filters.
1007
1015
1008 This is often equivalent to how the data would be expressed on disk.
1016 This is often equivalent to how the data would be expressed on disk.
1009 """
1017 """
1010 return self._repo.wwritedata(self.path(), self.data())
1018 return self._repo.wwritedata(self.path(), self.data())
1011
1019
1012 class filectx(basefilectx):
1020 class filectx(basefilectx):
1013 """A filecontext object makes access to data related to a particular
1021 """A filecontext object makes access to data related to a particular
1014 filerevision convenient."""
1022 filerevision convenient."""
1015 def __init__(self, repo, path, changeid=None, fileid=None,
1023 def __init__(self, repo, path, changeid=None, fileid=None,
1016 filelog=None, changectx=None):
1024 filelog=None, changectx=None):
1017 """changeid must be a revision number, if specified.
1025 """changeid must be a revision number, if specified.
1018 fileid can be a file revision or node."""
1026 fileid can be a file revision or node."""
1019 self._repo = repo
1027 self._repo = repo
1020 self._path = path
1028 self._path = path
1021
1029
1022 assert (changeid is not None
1030 assert (changeid is not None
1023 or fileid is not None
1031 or fileid is not None
1024 or changectx is not None), (
1032 or changectx is not None), (
1025 "bad args: changeid=%r, fileid=%r, changectx=%r"
1033 "bad args: changeid=%r, fileid=%r, changectx=%r"
1026 % (changeid, fileid, changectx))
1034 % (changeid, fileid, changectx))
1027
1035
1028 if filelog is not None:
1036 if filelog is not None:
1029 self._filelog = filelog
1037 self._filelog = filelog
1030
1038
1031 if changeid is not None:
1039 if changeid is not None:
1032 self._changeid = changeid
1040 self._changeid = changeid
1033 if changectx is not None:
1041 if changectx is not None:
1034 self._changectx = changectx
1042 self._changectx = changectx
1035 if fileid is not None:
1043 if fileid is not None:
1036 self._fileid = fileid
1044 self._fileid = fileid
1037
1045
1038 @propertycache
1046 @propertycache
1039 def _changectx(self):
1047 def _changectx(self):
1040 try:
1048 try:
1041 return self._repo[self._changeid]
1049 return self._repo[self._changeid]
1042 except error.FilteredRepoLookupError:
1050 except error.FilteredRepoLookupError:
1043 # Linkrev may point to any revision in the repository. When the
1051 # Linkrev may point to any revision in the repository. When the
1044 # repository is filtered this may lead to `filectx` trying to build
1052 # repository is filtered this may lead to `filectx` trying to build
1045 # `changectx` for filtered revision. In such case we fallback to
1053 # `changectx` for filtered revision. In such case we fallback to
1046 # creating `changectx` on the unfiltered version of the reposition.
1054 # creating `changectx` on the unfiltered version of the reposition.
1047 # This fallback should not be an issue because `changectx` from
1055 # This fallback should not be an issue because `changectx` from
1048 # `filectx` are not used in complex operations that care about
1056 # `filectx` are not used in complex operations that care about
1049 # filtering.
1057 # filtering.
1050 #
1058 #
1051 # This fallback is a cheap and dirty fix that prevent several
1059 # This fallback is a cheap and dirty fix that prevent several
1052 # crashes. It does not ensure the behavior is correct. However the
1060 # crashes. It does not ensure the behavior is correct. However the
1053 # behavior was not correct before filtering either and "incorrect
1061 # behavior was not correct before filtering either and "incorrect
1054 # behavior" is seen as better as "crash"
1062 # behavior" is seen as better as "crash"
1055 #
1063 #
1056 # Linkrevs have several serious troubles with filtering that are
1064 # Linkrevs have several serious troubles with filtering that are
1057 # complicated to solve. Proper handling of the issue here should be
1065 # complicated to solve. Proper handling of the issue here should be
1058 # considered when solving linkrev issue are on the table.
1066 # considered when solving linkrev issue are on the table.
1059 return self._repo.unfiltered()[self._changeid]
1067 return self._repo.unfiltered()[self._changeid]
1060
1068
1061 def filectx(self, fileid, changeid=None):
1069 def filectx(self, fileid, changeid=None):
1062 '''opens an arbitrary revision of the file without
1070 '''opens an arbitrary revision of the file without
1063 opening a new filelog'''
1071 opening a new filelog'''
1064 return filectx(self._repo, self._path, fileid=fileid,
1072 return filectx(self._repo, self._path, fileid=fileid,
1065 filelog=self._filelog, changeid=changeid)
1073 filelog=self._filelog, changeid=changeid)
1066
1074
1067 def rawdata(self):
1075 def rawdata(self):
1068 return self._filelog.rawdata(self._filenode)
1076 return self._filelog.rawdata(self._filenode)
1069
1077
1070 def rawflags(self):
1078 def rawflags(self):
1071 """low-level revlog flags"""
1079 """low-level revlog flags"""
1072 return self._filelog.flags(self._filerev)
1080 return self._filelog.flags(self._filerev)
1073
1081
1074 def data(self):
1082 def data(self):
1075 try:
1083 try:
1076 return self._filelog.read(self._filenode)
1084 return self._filelog.read(self._filenode)
1077 except error.CensoredNodeError:
1085 except error.CensoredNodeError:
1078 if self._repo.ui.config("censor", "policy") == "ignore":
1086 if self._repo.ui.config("censor", "policy") == "ignore":
1079 return ""
1087 return ""
1080 raise error.Abort(_("censored node: %s") % short(self._filenode),
1088 raise error.Abort(_("censored node: %s") % short(self._filenode),
1081 hint=_("set censor.policy to ignore errors"))
1089 hint=_("set censor.policy to ignore errors"))
1082
1090
1083 def size(self):
1091 def size(self):
1084 return self._filelog.size(self._filerev)
1092 return self._filelog.size(self._filerev)
1085
1093
1086 @propertycache
1094 @propertycache
1087 def _copied(self):
1095 def _copied(self):
1088 """check if file was actually renamed in this changeset revision
1096 """check if file was actually renamed in this changeset revision
1089
1097
1090 If rename logged in file revision, we report copy for changeset only
1098 If rename logged in file revision, we report copy for changeset only
1091 if file revisions linkrev points back to the changeset in question
1099 if file revisions linkrev points back to the changeset in question
1092 or both changeset parents contain different file revisions.
1100 or both changeset parents contain different file revisions.
1093 """
1101 """
1094
1102
1095 renamed = self._filelog.renamed(self._filenode)
1103 renamed = self._filelog.renamed(self._filenode)
1096 if not renamed:
1104 if not renamed:
1097 return None
1105 return None
1098
1106
1099 if self.rev() == self.linkrev():
1107 if self.rev() == self.linkrev():
1100 return renamed
1108 return renamed
1101
1109
1102 name = self.path()
1110 name = self.path()
1103 fnode = self._filenode
1111 fnode = self._filenode
1104 for p in self._changectx.parents():
1112 for p in self._changectx.parents():
1105 try:
1113 try:
1106 if fnode == p.filenode(name):
1114 if fnode == p.filenode(name):
1107 return None
1115 return None
1108 except error.LookupError:
1116 except error.LookupError:
1109 pass
1117 pass
1110 return renamed
1118 return renamed
1111
1119
1112 def children(self):
1120 def children(self):
1113 # hard for renames
1121 # hard for renames
1114 c = self._filelog.children(self._filenode)
1122 c = self._filelog.children(self._filenode)
1115 return [filectx(self._repo, self._path, fileid=x,
1123 return [filectx(self._repo, self._path, fileid=x,
1116 filelog=self._filelog) for x in c]
1124 filelog=self._filelog) for x in c]
1117
1125
1118 class committablectx(basectx):
1126 class committablectx(basectx):
1119 """A committablectx object provides common functionality for a context that
1127 """A committablectx object provides common functionality for a context that
1120 wants the ability to commit, e.g. workingctx or memctx."""
1128 wants the ability to commit, e.g. workingctx or memctx."""
1121 def __init__(self, repo, text="", user=None, date=None, extra=None,
1129 def __init__(self, repo, text="", user=None, date=None, extra=None,
1122 changes=None, branch=None):
1130 changes=None, branch=None):
1123 super(committablectx, self).__init__(repo)
1131 super(committablectx, self).__init__(repo)
1124 self._rev = None
1132 self._rev = None
1125 self._node = None
1133 self._node = None
1126 self._text = text
1134 self._text = text
1127 if date:
1135 if date:
1128 self._date = dateutil.parsedate(date)
1136 self._date = dateutil.parsedate(date)
1129 if user:
1137 if user:
1130 self._user = user
1138 self._user = user
1131 if changes:
1139 if changes:
1132 self._status = changes
1140 self._status = changes
1133
1141
1134 self._extra = {}
1142 self._extra = {}
1135 if extra:
1143 if extra:
1136 self._extra = extra.copy()
1144 self._extra = extra.copy()
1137 if branch is not None:
1145 if branch is not None:
1138 self._extra['branch'] = encoding.fromlocal(branch)
1146 self._extra['branch'] = encoding.fromlocal(branch)
1139 if not self._extra.get('branch'):
1147 if not self._extra.get('branch'):
1140 self._extra['branch'] = 'default'
1148 self._extra['branch'] = 'default'
1141
1149
1142 def __bytes__(self):
1150 def __bytes__(self):
1143 return bytes(self._parents[0]) + "+"
1151 return bytes(self._parents[0]) + "+"
1144
1152
1145 __str__ = encoding.strmethod(__bytes__)
1153 __str__ = encoding.strmethod(__bytes__)
1146
1154
1147 def __nonzero__(self):
1155 def __nonzero__(self):
1148 return True
1156 return True
1149
1157
1150 __bool__ = __nonzero__
1158 __bool__ = __nonzero__
1151
1159
1152 @propertycache
1160 @propertycache
1153 def _status(self):
1161 def _status(self):
1154 return self._repo.status()
1162 return self._repo.status()
1155
1163
1156 @propertycache
1164 @propertycache
1157 def _user(self):
1165 def _user(self):
1158 return self._repo.ui.username()
1166 return self._repo.ui.username()
1159
1167
1160 @propertycache
1168 @propertycache
1161 def _date(self):
1169 def _date(self):
1162 ui = self._repo.ui
1170 ui = self._repo.ui
1163 date = ui.configdate('devel', 'default-date')
1171 date = ui.configdate('devel', 'default-date')
1164 if date is None:
1172 if date is None:
1165 date = dateutil.makedate()
1173 date = dateutil.makedate()
1166 return date
1174 return date
1167
1175
1168 def subrev(self, subpath):
1176 def subrev(self, subpath):
1169 return None
1177 return None
1170
1178
1171 def manifestnode(self):
1179 def manifestnode(self):
1172 return None
1180 return None
1173 def user(self):
1181 def user(self):
1174 return self._user or self._repo.ui.username()
1182 return self._user or self._repo.ui.username()
1175 def date(self):
1183 def date(self):
1176 return self._date
1184 return self._date
1177 def description(self):
1185 def description(self):
1178 return self._text
1186 return self._text
1179 def files(self):
1187 def files(self):
1180 return sorted(self._status.modified + self._status.added +
1188 return sorted(self._status.modified + self._status.added +
1181 self._status.removed)
1189 self._status.removed)
1182 def modified(self):
1190 def modified(self):
1183 return self._status.modified
1191 return self._status.modified
1184 def added(self):
1192 def added(self):
1185 return self._status.added
1193 return self._status.added
1186 def removed(self):
1194 def removed(self):
1187 return self._status.removed
1195 return self._status.removed
1188 def deleted(self):
1196 def deleted(self):
1189 return self._status.deleted
1197 return self._status.deleted
1190 filesmodified = modified
1198 filesmodified = modified
1191 filesadded = added
1199 filesadded = added
1192 filesremoved = removed
1200 filesremoved = removed
1193
1201
1194 def branch(self):
1202 def branch(self):
1195 return encoding.tolocal(self._extra['branch'])
1203 return encoding.tolocal(self._extra['branch'])
1196 def closesbranch(self):
1204 def closesbranch(self):
1197 return 'close' in self._extra
1205 return 'close' in self._extra
1198 def extra(self):
1206 def extra(self):
1199 return self._extra
1207 return self._extra
1200
1208
1201 def isinmemory(self):
1209 def isinmemory(self):
1202 return False
1210 return False
1203
1211
1204 def tags(self):
1212 def tags(self):
1205 return []
1213 return []
1206
1214
1207 def bookmarks(self):
1215 def bookmarks(self):
1208 b = []
1216 b = []
1209 for p in self.parents():
1217 for p in self.parents():
1210 b.extend(p.bookmarks())
1218 b.extend(p.bookmarks())
1211 return b
1219 return b
1212
1220
1213 def phase(self):
1221 def phase(self):
1214 phase = phases.draft # default phase to draft
1222 phase = phases.draft # default phase to draft
1215 for p in self.parents():
1223 for p in self.parents():
1216 phase = max(phase, p.phase())
1224 phase = max(phase, p.phase())
1217 return phase
1225 return phase
1218
1226
1219 def hidden(self):
1227 def hidden(self):
1220 return False
1228 return False
1221
1229
1222 def children(self):
1230 def children(self):
1223 return []
1231 return []
1224
1232
1225 def ancestor(self, c2):
1233 def ancestor(self, c2):
1226 """return the "best" ancestor context of self and c2"""
1234 """return the "best" ancestor context of self and c2"""
1227 return self._parents[0].ancestor(c2) # punt on two parents for now
1235 return self._parents[0].ancestor(c2) # punt on two parents for now
1228
1236
1229 def ancestors(self):
1237 def ancestors(self):
1230 for p in self._parents:
1238 for p in self._parents:
1231 yield p
1239 yield p
1232 for a in self._repo.changelog.ancestors(
1240 for a in self._repo.changelog.ancestors(
1233 [p.rev() for p in self._parents]):
1241 [p.rev() for p in self._parents]):
1234 yield self._repo[a]
1242 yield self._repo[a]
1235
1243
1236 def markcommitted(self, node):
1244 def markcommitted(self, node):
1237 """Perform post-commit cleanup necessary after committing this ctx
1245 """Perform post-commit cleanup necessary after committing this ctx
1238
1246
1239 Specifically, this updates backing stores this working context
1247 Specifically, this updates backing stores this working context
1240 wraps to reflect the fact that the changes reflected by this
1248 wraps to reflect the fact that the changes reflected by this
1241 workingctx have been committed. For example, it marks
1249 workingctx have been committed. For example, it marks
1242 modified and added files as normal in the dirstate.
1250 modified and added files as normal in the dirstate.
1243
1251
1244 """
1252 """
1245
1253
1246 def dirty(self, missing=False, merge=True, branch=True):
1254 def dirty(self, missing=False, merge=True, branch=True):
1247 return False
1255 return False
1248
1256
1249 class workingctx(committablectx):
1257 class workingctx(committablectx):
1250 """A workingctx object makes access to data related to
1258 """A workingctx object makes access to data related to
1251 the current working directory convenient.
1259 the current working directory convenient.
1252 date - any valid date string or (unixtime, offset), or None.
1260 date - any valid date string or (unixtime, offset), or None.
1253 user - username string, or None.
1261 user - username string, or None.
1254 extra - a dictionary of extra values, or None.
1262 extra - a dictionary of extra values, or None.
1255 changes - a list of file lists as returned by localrepo.status()
1263 changes - a list of file lists as returned by localrepo.status()
1256 or None to use the repository status.
1264 or None to use the repository status.
1257 """
1265 """
1258 def __init__(self, repo, text="", user=None, date=None, extra=None,
1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1259 changes=None):
1267 changes=None):
1260 branch = None
1268 branch = None
1261 if not extra or 'branch' not in extra:
1269 if not extra or 'branch' not in extra:
1262 try:
1270 try:
1263 branch = repo.dirstate.branch()
1271 branch = repo.dirstate.branch()
1264 except UnicodeDecodeError:
1272 except UnicodeDecodeError:
1265 raise error.Abort(_('branch name not in UTF-8!'))
1273 raise error.Abort(_('branch name not in UTF-8!'))
1266 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1274 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1267 branch=branch)
1275 branch=branch)
1268
1276
1269 def __iter__(self):
1277 def __iter__(self):
1270 d = self._repo.dirstate
1278 d = self._repo.dirstate
1271 for f in d:
1279 for f in d:
1272 if d[f] != 'r':
1280 if d[f] != 'r':
1273 yield f
1281 yield f
1274
1282
1275 def __contains__(self, key):
1283 def __contains__(self, key):
1276 return self._repo.dirstate[key] not in "?r"
1284 return self._repo.dirstate[key] not in "?r"
1277
1285
1278 def hex(self):
1286 def hex(self):
1279 return wdirhex
1287 return wdirhex
1280
1288
1281 @propertycache
1289 @propertycache
1282 def _parents(self):
1290 def _parents(self):
1283 p = self._repo.dirstate.parents()
1291 p = self._repo.dirstate.parents()
1284 if p[1] == nullid:
1292 if p[1] == nullid:
1285 p = p[:-1]
1293 p = p[:-1]
1286 # use unfiltered repo to delay/avoid loading obsmarkers
1294 # use unfiltered repo to delay/avoid loading obsmarkers
1287 unfi = self._repo.unfiltered()
1295 unfi = self._repo.unfiltered()
1288 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1296 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1289
1297
1290 def _fileinfo(self, path):
1298 def _fileinfo(self, path):
1291 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1299 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1292 self._manifest
1300 self._manifest
1293 return super(workingctx, self)._fileinfo(path)
1301 return super(workingctx, self)._fileinfo(path)
1294
1302
1295 def _buildflagfunc(self):
1303 def _buildflagfunc(self):
1296 # Create a fallback function for getting file flags when the
1304 # Create a fallback function for getting file flags when the
1297 # filesystem doesn't support them
1305 # filesystem doesn't support them
1298
1306
1299 copiesget = self._repo.dirstate.copies().get
1307 copiesget = self._repo.dirstate.copies().get
1300 parents = self.parents()
1308 parents = self.parents()
1301 if len(parents) < 2:
1309 if len(parents) < 2:
1302 # when we have one parent, it's easy: copy from parent
1310 # when we have one parent, it's easy: copy from parent
1303 man = parents[0].manifest()
1311 man = parents[0].manifest()
1304 def func(f):
1312 def func(f):
1305 f = copiesget(f, f)
1313 f = copiesget(f, f)
1306 return man.flags(f)
1314 return man.flags(f)
1307 else:
1315 else:
1308 # merges are tricky: we try to reconstruct the unstored
1316 # merges are tricky: we try to reconstruct the unstored
1309 # result from the merge (issue1802)
1317 # result from the merge (issue1802)
1310 p1, p2 = parents
1318 p1, p2 = parents
1311 pa = p1.ancestor(p2)
1319 pa = p1.ancestor(p2)
1312 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1320 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1313
1321
1314 def func(f):
1322 def func(f):
1315 f = copiesget(f, f) # may be wrong for merges with copies
1323 f = copiesget(f, f) # may be wrong for merges with copies
1316 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1324 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1317 if fl1 == fl2:
1325 if fl1 == fl2:
1318 return fl1
1326 return fl1
1319 if fl1 == fla:
1327 if fl1 == fla:
1320 return fl2
1328 return fl2
1321 if fl2 == fla:
1329 if fl2 == fla:
1322 return fl1
1330 return fl1
1323 return '' # punt for conflicts
1331 return '' # punt for conflicts
1324
1332
1325 return func
1333 return func
1326
1334
1327 @propertycache
1335 @propertycache
1328 def _flagfunc(self):
1336 def _flagfunc(self):
1329 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1337 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1330
1338
1331 def flags(self, path):
1339 def flags(self, path):
1332 if r'_manifest' in self.__dict__:
1340 if r'_manifest' in self.__dict__:
1333 try:
1341 try:
1334 return self._manifest.flags(path)
1342 return self._manifest.flags(path)
1335 except KeyError:
1343 except KeyError:
1336 return ''
1344 return ''
1337
1345
1338 try:
1346 try:
1339 return self._flagfunc(path)
1347 return self._flagfunc(path)
1340 except OSError:
1348 except OSError:
1341 return ''
1349 return ''
1342
1350
1343 def filectx(self, path, filelog=None):
1351 def filectx(self, path, filelog=None):
1344 """get a file context from the working directory"""
1352 """get a file context from the working directory"""
1345 return workingfilectx(self._repo, path, workingctx=self,
1353 return workingfilectx(self._repo, path, workingctx=self,
1346 filelog=filelog)
1354 filelog=filelog)
1347
1355
1348 def dirty(self, missing=False, merge=True, branch=True):
1356 def dirty(self, missing=False, merge=True, branch=True):
1349 "check whether a working directory is modified"
1357 "check whether a working directory is modified"
1350 # check subrepos first
1358 # check subrepos first
1351 for s in sorted(self.substate):
1359 for s in sorted(self.substate):
1352 if self.sub(s).dirty(missing=missing):
1360 if self.sub(s).dirty(missing=missing):
1353 return True
1361 return True
1354 # check current working dir
1362 # check current working dir
1355 return ((merge and self.p2()) or
1363 return ((merge and self.p2()) or
1356 (branch and self.branch() != self.p1().branch()) or
1364 (branch and self.branch() != self.p1().branch()) or
1357 self.modified() or self.added() or self.removed() or
1365 self.modified() or self.added() or self.removed() or
1358 (missing and self.deleted()))
1366 (missing and self.deleted()))
1359
1367
1360 def add(self, list, prefix=""):
1368 def add(self, list, prefix=""):
1361 with self._repo.wlock():
1369 with self._repo.wlock():
1362 ui, ds = self._repo.ui, self._repo.dirstate
1370 ui, ds = self._repo.ui, self._repo.dirstate
1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1371 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1364 rejected = []
1372 rejected = []
1365 lstat = self._repo.wvfs.lstat
1373 lstat = self._repo.wvfs.lstat
1366 for f in list:
1374 for f in list:
1367 # ds.pathto() returns an absolute file when this is invoked from
1375 # ds.pathto() returns an absolute file when this is invoked from
1368 # the keyword extension. That gets flagged as non-portable on
1376 # the keyword extension. That gets flagged as non-portable on
1369 # Windows, since it contains the drive letter and colon.
1377 # Windows, since it contains the drive letter and colon.
1370 scmutil.checkportable(ui, os.path.join(prefix, f))
1378 scmutil.checkportable(ui, os.path.join(prefix, f))
1371 try:
1379 try:
1372 st = lstat(f)
1380 st = lstat(f)
1373 except OSError:
1381 except OSError:
1374 ui.warn(_("%s does not exist!\n") % uipath(f))
1382 ui.warn(_("%s does not exist!\n") % uipath(f))
1375 rejected.append(f)
1383 rejected.append(f)
1376 continue
1384 continue
1377 limit = ui.configbytes('ui', 'large-file-limit')
1385 limit = ui.configbytes('ui', 'large-file-limit')
1378 if limit != 0 and st.st_size > limit:
1386 if limit != 0 and st.st_size > limit:
1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1387 ui.warn(_("%s: up to %d MB of RAM may be required "
1380 "to manage this file\n"
1388 "to manage this file\n"
1381 "(use 'hg revert %s' to cancel the "
1389 "(use 'hg revert %s' to cancel the "
1382 "pending addition)\n")
1390 "pending addition)\n")
1383 % (f, 3 * st.st_size // 1000000, uipath(f)))
1391 % (f, 3 * st.st_size // 1000000, uipath(f)))
1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1392 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1385 ui.warn(_("%s not added: only files and symlinks "
1393 ui.warn(_("%s not added: only files and symlinks "
1386 "supported currently\n") % uipath(f))
1394 "supported currently\n") % uipath(f))
1387 rejected.append(f)
1395 rejected.append(f)
1388 elif ds[f] in 'amn':
1396 elif ds[f] in 'amn':
1389 ui.warn(_("%s already tracked!\n") % uipath(f))
1397 ui.warn(_("%s already tracked!\n") % uipath(f))
1390 elif ds[f] == 'r':
1398 elif ds[f] == 'r':
1391 ds.normallookup(f)
1399 ds.normallookup(f)
1392 else:
1400 else:
1393 ds.add(f)
1401 ds.add(f)
1394 return rejected
1402 return rejected
1395
1403
1396 def forget(self, files, prefix=""):
1404 def forget(self, files, prefix=""):
1397 with self._repo.wlock():
1405 with self._repo.wlock():
1398 ds = self._repo.dirstate
1406 ds = self._repo.dirstate
1399 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1407 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1400 rejected = []
1408 rejected = []
1401 for f in files:
1409 for f in files:
1402 if f not in ds:
1410 if f not in ds:
1403 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1411 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1404 rejected.append(f)
1412 rejected.append(f)
1405 elif ds[f] != 'a':
1413 elif ds[f] != 'a':
1406 ds.remove(f)
1414 ds.remove(f)
1407 else:
1415 else:
1408 ds.drop(f)
1416 ds.drop(f)
1409 return rejected
1417 return rejected
1410
1418
1411 def copy(self, source, dest):
1419 def copy(self, source, dest):
1412 try:
1420 try:
1413 st = self._repo.wvfs.lstat(dest)
1421 st = self._repo.wvfs.lstat(dest)
1414 except OSError as err:
1422 except OSError as err:
1415 if err.errno != errno.ENOENT:
1423 if err.errno != errno.ENOENT:
1416 raise
1424 raise
1417 self._repo.ui.warn(_("%s does not exist!\n")
1425 self._repo.ui.warn(_("%s does not exist!\n")
1418 % self._repo.dirstate.pathto(dest))
1426 % self._repo.dirstate.pathto(dest))
1419 return
1427 return
1420 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1428 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1421 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1429 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1422 "symbolic link\n")
1430 "symbolic link\n")
1423 % self._repo.dirstate.pathto(dest))
1431 % self._repo.dirstate.pathto(dest))
1424 else:
1432 else:
1425 with self._repo.wlock():
1433 with self._repo.wlock():
1426 ds = self._repo.dirstate
1434 ds = self._repo.dirstate
1427 if ds[dest] in '?':
1435 if ds[dest] in '?':
1428 ds.add(dest)
1436 ds.add(dest)
1429 elif ds[dest] in 'r':
1437 elif ds[dest] in 'r':
1430 ds.normallookup(dest)
1438 ds.normallookup(dest)
1431 ds.copy(source, dest)
1439 ds.copy(source, dest)
1432
1440
1433 def match(self, pats=None, include=None, exclude=None, default='glob',
1441 def match(self, pats=None, include=None, exclude=None, default='glob',
1434 listsubrepos=False, badfn=None):
1442 listsubrepos=False, badfn=None):
1435 r = self._repo
1443 r = self._repo
1436
1444
1437 # Only a case insensitive filesystem needs magic to translate user input
1445 # Only a case insensitive filesystem needs magic to translate user input
1438 # to actual case in the filesystem.
1446 # to actual case in the filesystem.
1439 icasefs = not util.fscasesensitive(r.root)
1447 icasefs = not util.fscasesensitive(r.root)
1440 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1448 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1441 default, auditor=r.auditor, ctx=self,
1449 default, auditor=r.auditor, ctx=self,
1442 listsubrepos=listsubrepos, badfn=badfn,
1450 listsubrepos=listsubrepos, badfn=badfn,
1443 icasefs=icasefs)
1451 icasefs=icasefs)
1444
1452
1445 def _filtersuspectsymlink(self, files):
1453 def _filtersuspectsymlink(self, files):
1446 if not files or self._repo.dirstate._checklink:
1454 if not files or self._repo.dirstate._checklink:
1447 return files
1455 return files
1448
1456
1449 # Symlink placeholders may get non-symlink-like contents
1457 # Symlink placeholders may get non-symlink-like contents
1450 # via user error or dereferencing by NFS or Samba servers,
1458 # via user error or dereferencing by NFS or Samba servers,
1451 # so we filter out any placeholders that don't look like a
1459 # so we filter out any placeholders that don't look like a
1452 # symlink
1460 # symlink
1453 sane = []
1461 sane = []
1454 for f in files:
1462 for f in files:
1455 if self.flags(f) == 'l':
1463 if self.flags(f) == 'l':
1456 d = self[f].data()
1464 d = self[f].data()
1457 if (d == '' or len(d) >= 1024 or '\n' in d
1465 if (d == '' or len(d) >= 1024 or '\n' in d
1458 or stringutil.binary(d)):
1466 or stringutil.binary(d)):
1459 self._repo.ui.debug('ignoring suspect symlink placeholder'
1467 self._repo.ui.debug('ignoring suspect symlink placeholder'
1460 ' "%s"\n' % f)
1468 ' "%s"\n' % f)
1461 continue
1469 continue
1462 sane.append(f)
1470 sane.append(f)
1463 return sane
1471 return sane
1464
1472
1465 def _checklookup(self, files):
1473 def _checklookup(self, files):
1466 # check for any possibly clean files
1474 # check for any possibly clean files
1467 if not files:
1475 if not files:
1468 return [], [], []
1476 return [], [], []
1469
1477
1470 modified = []
1478 modified = []
1471 deleted = []
1479 deleted = []
1472 fixup = []
1480 fixup = []
1473 pctx = self._parents[0]
1481 pctx = self._parents[0]
1474 # do a full compare of any files that might have changed
1482 # do a full compare of any files that might have changed
1475 for f in sorted(files):
1483 for f in sorted(files):
1476 try:
1484 try:
1477 # This will return True for a file that got replaced by a
1485 # This will return True for a file that got replaced by a
1478 # directory in the interim, but fixing that is pretty hard.
1486 # directory in the interim, but fixing that is pretty hard.
1479 if (f not in pctx or self.flags(f) != pctx.flags(f)
1487 if (f not in pctx or self.flags(f) != pctx.flags(f)
1480 or pctx[f].cmp(self[f])):
1488 or pctx[f].cmp(self[f])):
1481 modified.append(f)
1489 modified.append(f)
1482 else:
1490 else:
1483 fixup.append(f)
1491 fixup.append(f)
1484 except (IOError, OSError):
1492 except (IOError, OSError):
1485 # A file become inaccessible in between? Mark it as deleted,
1493 # A file become inaccessible in between? Mark it as deleted,
1486 # matching dirstate behavior (issue5584).
1494 # matching dirstate behavior (issue5584).
1487 # The dirstate has more complex behavior around whether a
1495 # The dirstate has more complex behavior around whether a
1488 # missing file matches a directory, etc, but we don't need to
1496 # missing file matches a directory, etc, but we don't need to
1489 # bother with that: if f has made it to this point, we're sure
1497 # bother with that: if f has made it to this point, we're sure
1490 # it's in the dirstate.
1498 # it's in the dirstate.
1491 deleted.append(f)
1499 deleted.append(f)
1492
1500
1493 return modified, deleted, fixup
1501 return modified, deleted, fixup
1494
1502
1495 def _poststatusfixup(self, status, fixup):
1503 def _poststatusfixup(self, status, fixup):
1496 """update dirstate for files that are actually clean"""
1504 """update dirstate for files that are actually clean"""
1497 poststatus = self._repo.postdsstatus()
1505 poststatus = self._repo.postdsstatus()
1498 if fixup or poststatus:
1506 if fixup or poststatus:
1499 try:
1507 try:
1500 oldid = self._repo.dirstate.identity()
1508 oldid = self._repo.dirstate.identity()
1501
1509
1502 # updating the dirstate is optional
1510 # updating the dirstate is optional
1503 # so we don't wait on the lock
1511 # so we don't wait on the lock
1504 # wlock can invalidate the dirstate, so cache normal _after_
1512 # wlock can invalidate the dirstate, so cache normal _after_
1505 # taking the lock
1513 # taking the lock
1506 with self._repo.wlock(False):
1514 with self._repo.wlock(False):
1507 if self._repo.dirstate.identity() == oldid:
1515 if self._repo.dirstate.identity() == oldid:
1508 if fixup:
1516 if fixup:
1509 normal = self._repo.dirstate.normal
1517 normal = self._repo.dirstate.normal
1510 for f in fixup:
1518 for f in fixup:
1511 normal(f)
1519 normal(f)
1512 # write changes out explicitly, because nesting
1520 # write changes out explicitly, because nesting
1513 # wlock at runtime may prevent 'wlock.release()'
1521 # wlock at runtime may prevent 'wlock.release()'
1514 # after this block from doing so for subsequent
1522 # after this block from doing so for subsequent
1515 # changing files
1523 # changing files
1516 tr = self._repo.currenttransaction()
1524 tr = self._repo.currenttransaction()
1517 self._repo.dirstate.write(tr)
1525 self._repo.dirstate.write(tr)
1518
1526
1519 if poststatus:
1527 if poststatus:
1520 for ps in poststatus:
1528 for ps in poststatus:
1521 ps(self, status)
1529 ps(self, status)
1522 else:
1530 else:
1523 # in this case, writing changes out breaks
1531 # in this case, writing changes out breaks
1524 # consistency, because .hg/dirstate was
1532 # consistency, because .hg/dirstate was
1525 # already changed simultaneously after last
1533 # already changed simultaneously after last
1526 # caching (see also issue5584 for detail)
1534 # caching (see also issue5584 for detail)
1527 self._repo.ui.debug('skip updating dirstate: '
1535 self._repo.ui.debug('skip updating dirstate: '
1528 'identity mismatch\n')
1536 'identity mismatch\n')
1529 except error.LockError:
1537 except error.LockError:
1530 pass
1538 pass
1531 finally:
1539 finally:
1532 # Even if the wlock couldn't be grabbed, clear out the list.
1540 # Even if the wlock couldn't be grabbed, clear out the list.
1533 self._repo.clearpostdsstatus()
1541 self._repo.clearpostdsstatus()
1534
1542
1535 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1543 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1536 '''Gets the status from the dirstate -- internal use only.'''
1544 '''Gets the status from the dirstate -- internal use only.'''
1537 subrepos = []
1545 subrepos = []
1538 if '.hgsub' in self:
1546 if '.hgsub' in self:
1539 subrepos = sorted(self.substate)
1547 subrepos = sorted(self.substate)
1540 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1548 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1541 clean=clean, unknown=unknown)
1549 clean=clean, unknown=unknown)
1542
1550
1543 # check for any possibly clean files
1551 # check for any possibly clean files
1544 fixup = []
1552 fixup = []
1545 if cmp:
1553 if cmp:
1546 modified2, deleted2, fixup = self._checklookup(cmp)
1554 modified2, deleted2, fixup = self._checklookup(cmp)
1547 s.modified.extend(modified2)
1555 s.modified.extend(modified2)
1548 s.deleted.extend(deleted2)
1556 s.deleted.extend(deleted2)
1549
1557
1550 if fixup and clean:
1558 if fixup and clean:
1551 s.clean.extend(fixup)
1559 s.clean.extend(fixup)
1552
1560
1553 self._poststatusfixup(s, fixup)
1561 self._poststatusfixup(s, fixup)
1554
1562
1555 if match.always():
1563 if match.always():
1556 # cache for performance
1564 # cache for performance
1557 if s.unknown or s.ignored or s.clean:
1565 if s.unknown or s.ignored or s.clean:
1558 # "_status" is cached with list*=False in the normal route
1566 # "_status" is cached with list*=False in the normal route
1559 self._status = scmutil.status(s.modified, s.added, s.removed,
1567 self._status = scmutil.status(s.modified, s.added, s.removed,
1560 s.deleted, [], [], [])
1568 s.deleted, [], [], [])
1561 else:
1569 else:
1562 self._status = s
1570 self._status = s
1563
1571
1564 return s
1572 return s
1565
1573
1566 @propertycache
1574 @propertycache
1567 def _copies(self):
1575 def _copies(self):
1568 p1copies = {}
1576 p1copies = {}
1569 p2copies = {}
1577 p2copies = {}
1570 parents = self._repo.dirstate.parents()
1578 parents = self._repo.dirstate.parents()
1571 p1manifest = self._repo[parents[0]].manifest()
1579 p1manifest = self._repo[parents[0]].manifest()
1572 p2manifest = self._repo[parents[1]].manifest()
1580 p2manifest = self._repo[parents[1]].manifest()
1573 changedset = set(self.added()) | set(self.modified())
1581 changedset = set(self.added()) | set(self.modified())
1574 narrowmatch = self._repo.narrowmatch()
1582 narrowmatch = self._repo.narrowmatch()
1575 for dst, src in self._repo.dirstate.copies().items():
1583 for dst, src in self._repo.dirstate.copies().items():
1576 if dst not in changedset or not narrowmatch(dst):
1584 if dst not in changedset or not narrowmatch(dst):
1577 continue
1585 continue
1578 if src in p1manifest:
1586 if src in p1manifest:
1579 p1copies[dst] = src
1587 p1copies[dst] = src
1580 elif src in p2manifest:
1588 elif src in p2manifest:
1581 p2copies[dst] = src
1589 p2copies[dst] = src
1582 return p1copies, p2copies
1590 return p1copies, p2copies
1583
1591
1584 @propertycache
1592 @propertycache
1585 def _manifest(self):
1593 def _manifest(self):
1586 """generate a manifest corresponding to the values in self._status
1594 """generate a manifest corresponding to the values in self._status
1587
1595
1588 This reuse the file nodeid from parent, but we use special node
1596 This reuse the file nodeid from parent, but we use special node
1589 identifiers for added and modified files. This is used by manifests
1597 identifiers for added and modified files. This is used by manifests
1590 merge to see that files are different and by update logic to avoid
1598 merge to see that files are different and by update logic to avoid
1591 deleting newly added files.
1599 deleting newly added files.
1592 """
1600 """
1593 return self._buildstatusmanifest(self._status)
1601 return self._buildstatusmanifest(self._status)
1594
1602
1595 def _buildstatusmanifest(self, status):
1603 def _buildstatusmanifest(self, status):
1596 """Builds a manifest that includes the given status results."""
1604 """Builds a manifest that includes the given status results."""
1597 parents = self.parents()
1605 parents = self.parents()
1598
1606
1599 man = parents[0].manifest().copy()
1607 man = parents[0].manifest().copy()
1600
1608
1601 ff = self._flagfunc
1609 ff = self._flagfunc
1602 for i, l in ((addednodeid, status.added),
1610 for i, l in ((addednodeid, status.added),
1603 (modifiednodeid, status.modified)):
1611 (modifiednodeid, status.modified)):
1604 for f in l:
1612 for f in l:
1605 man[f] = i
1613 man[f] = i
1606 try:
1614 try:
1607 man.setflag(f, ff(f))
1615 man.setflag(f, ff(f))
1608 except OSError:
1616 except OSError:
1609 pass
1617 pass
1610
1618
1611 for f in status.deleted + status.removed:
1619 for f in status.deleted + status.removed:
1612 if f in man:
1620 if f in man:
1613 del man[f]
1621 del man[f]
1614
1622
1615 return man
1623 return man
1616
1624
1617 def _buildstatus(self, other, s, match, listignored, listclean,
1625 def _buildstatus(self, other, s, match, listignored, listclean,
1618 listunknown):
1626 listunknown):
1619 """build a status with respect to another context
1627 """build a status with respect to another context
1620
1628
1621 This includes logic for maintaining the fast path of status when
1629 This includes logic for maintaining the fast path of status when
1622 comparing the working directory against its parent, which is to skip
1630 comparing the working directory against its parent, which is to skip
1623 building a new manifest if self (working directory) is not comparing
1631 building a new manifest if self (working directory) is not comparing
1624 against its parent (repo['.']).
1632 against its parent (repo['.']).
1625 """
1633 """
1626 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1634 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1627 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1635 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1628 # might have accidentally ended up with the entire contents of the file
1636 # might have accidentally ended up with the entire contents of the file
1629 # they are supposed to be linking to.
1637 # they are supposed to be linking to.
1630 s.modified[:] = self._filtersuspectsymlink(s.modified)
1638 s.modified[:] = self._filtersuspectsymlink(s.modified)
1631 if other != self._repo['.']:
1639 if other != self._repo['.']:
1632 s = super(workingctx, self)._buildstatus(other, s, match,
1640 s = super(workingctx, self)._buildstatus(other, s, match,
1633 listignored, listclean,
1641 listignored, listclean,
1634 listunknown)
1642 listunknown)
1635 return s
1643 return s
1636
1644
1637 def _matchstatus(self, other, match):
1645 def _matchstatus(self, other, match):
1638 """override the match method with a filter for directory patterns
1646 """override the match method with a filter for directory patterns
1639
1647
1640 We use inheritance to customize the match.bad method only in cases of
1648 We use inheritance to customize the match.bad method only in cases of
1641 workingctx since it belongs only to the working directory when
1649 workingctx since it belongs only to the working directory when
1642 comparing against the parent changeset.
1650 comparing against the parent changeset.
1643
1651
1644 If we aren't comparing against the working directory's parent, then we
1652 If we aren't comparing against the working directory's parent, then we
1645 just use the default match object sent to us.
1653 just use the default match object sent to us.
1646 """
1654 """
1647 if other != self._repo['.']:
1655 if other != self._repo['.']:
1648 def bad(f, msg):
1656 def bad(f, msg):
1649 # 'f' may be a directory pattern from 'match.files()',
1657 # 'f' may be a directory pattern from 'match.files()',
1650 # so 'f not in ctx1' is not enough
1658 # so 'f not in ctx1' is not enough
1651 if f not in other and not other.hasdir(f):
1659 if f not in other and not other.hasdir(f):
1652 self._repo.ui.warn('%s: %s\n' %
1660 self._repo.ui.warn('%s: %s\n' %
1653 (self._repo.dirstate.pathto(f), msg))
1661 (self._repo.dirstate.pathto(f), msg))
1654 match.bad = bad
1662 match.bad = bad
1655 return match
1663 return match
1656
1664
1657 def walk(self, match):
1665 def walk(self, match):
1658 '''Generates matching file names.'''
1666 '''Generates matching file names.'''
1659 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1667 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1660 subrepos=sorted(self.substate),
1668 subrepos=sorted(self.substate),
1661 unknown=True, ignored=False))
1669 unknown=True, ignored=False))
1662
1670
1663 def matches(self, match):
1671 def matches(self, match):
1664 match = self._repo.narrowmatch(match)
1672 match = self._repo.narrowmatch(match)
1665 ds = self._repo.dirstate
1673 ds = self._repo.dirstate
1666 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1674 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1667
1675
1668 def markcommitted(self, node):
1676 def markcommitted(self, node):
1669 with self._repo.dirstate.parentchange():
1677 with self._repo.dirstate.parentchange():
1670 for f in self.modified() + self.added():
1678 for f in self.modified() + self.added():
1671 self._repo.dirstate.normal(f)
1679 self._repo.dirstate.normal(f)
1672 for f in self.removed():
1680 for f in self.removed():
1673 self._repo.dirstate.drop(f)
1681 self._repo.dirstate.drop(f)
1674 self._repo.dirstate.setparents(node)
1682 self._repo.dirstate.setparents(node)
1675
1683
1676 # write changes out explicitly, because nesting wlock at
1684 # write changes out explicitly, because nesting wlock at
1677 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1685 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1678 # from immediately doing so for subsequent changing files
1686 # from immediately doing so for subsequent changing files
1679 self._repo.dirstate.write(self._repo.currenttransaction())
1687 self._repo.dirstate.write(self._repo.currenttransaction())
1680
1688
1681 sparse.aftercommit(self._repo, node)
1689 sparse.aftercommit(self._repo, node)
1682
1690
1683 class committablefilectx(basefilectx):
1691 class committablefilectx(basefilectx):
1684 """A committablefilectx provides common functionality for a file context
1692 """A committablefilectx provides common functionality for a file context
1685 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1693 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1686 def __init__(self, repo, path, filelog=None, ctx=None):
1694 def __init__(self, repo, path, filelog=None, ctx=None):
1687 self._repo = repo
1695 self._repo = repo
1688 self._path = path
1696 self._path = path
1689 self._changeid = None
1697 self._changeid = None
1690 self._filerev = self._filenode = None
1698 self._filerev = self._filenode = None
1691
1699
1692 if filelog is not None:
1700 if filelog is not None:
1693 self._filelog = filelog
1701 self._filelog = filelog
1694 if ctx:
1702 if ctx:
1695 self._changectx = ctx
1703 self._changectx = ctx
1696
1704
1697 def __nonzero__(self):
1705 def __nonzero__(self):
1698 return True
1706 return True
1699
1707
1700 __bool__ = __nonzero__
1708 __bool__ = __nonzero__
1701
1709
1702 def linkrev(self):
1710 def linkrev(self):
1703 # linked to self._changectx no matter if file is modified or not
1711 # linked to self._changectx no matter if file is modified or not
1704 return self.rev()
1712 return self.rev()
1705
1713
1706 def renamed(self):
1714 def renamed(self):
1707 path = self.copysource()
1715 path = self.copysource()
1708 if not path:
1716 if not path:
1709 return None
1717 return None
1710 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1718 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1711
1719
1712 def parents(self):
1720 def parents(self):
1713 '''return parent filectxs, following copies if necessary'''
1721 '''return parent filectxs, following copies if necessary'''
1714 def filenode(ctx, path):
1722 def filenode(ctx, path):
1715 return ctx._manifest.get(path, nullid)
1723 return ctx._manifest.get(path, nullid)
1716
1724
1717 path = self._path
1725 path = self._path
1718 fl = self._filelog
1726 fl = self._filelog
1719 pcl = self._changectx._parents
1727 pcl = self._changectx._parents
1720 renamed = self.renamed()
1728 renamed = self.renamed()
1721
1729
1722 if renamed:
1730 if renamed:
1723 pl = [renamed + (None,)]
1731 pl = [renamed + (None,)]
1724 else:
1732 else:
1725 pl = [(path, filenode(pcl[0], path), fl)]
1733 pl = [(path, filenode(pcl[0], path), fl)]
1726
1734
1727 for pc in pcl[1:]:
1735 for pc in pcl[1:]:
1728 pl.append((path, filenode(pc, path), fl))
1736 pl.append((path, filenode(pc, path), fl))
1729
1737
1730 return [self._parentfilectx(p, fileid=n, filelog=l)
1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1731 for p, n, l in pl if n != nullid]
1739 for p, n, l in pl if n != nullid]
1732
1740
1733 def children(self):
1741 def children(self):
1734 return []
1742 return []
1735
1743
1736 class workingfilectx(committablefilectx):
1744 class workingfilectx(committablefilectx):
1737 """A workingfilectx object makes access to data related to a particular
1745 """A workingfilectx object makes access to data related to a particular
1738 file in the working directory convenient."""
1746 file in the working directory convenient."""
1739 def __init__(self, repo, path, filelog=None, workingctx=None):
1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1740 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1741
1749
1742 @propertycache
1750 @propertycache
1743 def _changectx(self):
1751 def _changectx(self):
1744 return workingctx(self._repo)
1752 return workingctx(self._repo)
1745
1753
1746 def data(self):
1754 def data(self):
1747 return self._repo.wread(self._path)
1755 return self._repo.wread(self._path)
1748 def copysource(self):
1756 def copysource(self):
1749 return self._repo.dirstate.copied(self._path)
1757 return self._repo.dirstate.copied(self._path)
1750
1758
1751 def size(self):
1759 def size(self):
1752 return self._repo.wvfs.lstat(self._path).st_size
1760 return self._repo.wvfs.lstat(self._path).st_size
1753 def lstat(self):
1761 def lstat(self):
1754 return self._repo.wvfs.lstat(self._path)
1762 return self._repo.wvfs.lstat(self._path)
1755 def date(self):
1763 def date(self):
1756 t, tz = self._changectx.date()
1764 t, tz = self._changectx.date()
1757 try:
1765 try:
1758 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1766 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1759 except OSError as err:
1767 except OSError as err:
1760 if err.errno != errno.ENOENT:
1768 if err.errno != errno.ENOENT:
1761 raise
1769 raise
1762 return (t, tz)
1770 return (t, tz)
1763
1771
1764 def exists(self):
1772 def exists(self):
1765 return self._repo.wvfs.exists(self._path)
1773 return self._repo.wvfs.exists(self._path)
1766
1774
1767 def lexists(self):
1775 def lexists(self):
1768 return self._repo.wvfs.lexists(self._path)
1776 return self._repo.wvfs.lexists(self._path)
1769
1777
1770 def audit(self):
1778 def audit(self):
1771 return self._repo.wvfs.audit(self._path)
1779 return self._repo.wvfs.audit(self._path)
1772
1780
1773 def cmp(self, fctx):
1781 def cmp(self, fctx):
1774 """compare with other file context
1782 """compare with other file context
1775
1783
1776 returns True if different than fctx.
1784 returns True if different than fctx.
1777 """
1785 """
1778 # fctx should be a filectx (not a workingfilectx)
1786 # fctx should be a filectx (not a workingfilectx)
1779 # invert comparison to reuse the same code path
1787 # invert comparison to reuse the same code path
1780 return fctx.cmp(self)
1788 return fctx.cmp(self)
1781
1789
1782 def remove(self, ignoremissing=False):
1790 def remove(self, ignoremissing=False):
1783 """wraps unlink for a repo's working directory"""
1791 """wraps unlink for a repo's working directory"""
1784 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1792 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1785 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1793 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1786 rmdir=rmdir)
1794 rmdir=rmdir)
1787
1795
1788 def write(self, data, flags, backgroundclose=False, **kwargs):
1796 def write(self, data, flags, backgroundclose=False, **kwargs):
1789 """wraps repo.wwrite"""
1797 """wraps repo.wwrite"""
1790 return self._repo.wwrite(self._path, data, flags,
1798 return self._repo.wwrite(self._path, data, flags,
1791 backgroundclose=backgroundclose,
1799 backgroundclose=backgroundclose,
1792 **kwargs)
1800 **kwargs)
1793
1801
1794 def markcopied(self, src):
1802 def markcopied(self, src):
1795 """marks this file a copy of `src`"""
1803 """marks this file a copy of `src`"""
1796 self._repo.dirstate.copy(src, self._path)
1804 self._repo.dirstate.copy(src, self._path)
1797
1805
1798 def clearunknown(self):
1806 def clearunknown(self):
1799 """Removes conflicting items in the working directory so that
1807 """Removes conflicting items in the working directory so that
1800 ``write()`` can be called successfully.
1808 ``write()`` can be called successfully.
1801 """
1809 """
1802 wvfs = self._repo.wvfs
1810 wvfs = self._repo.wvfs
1803 f = self._path
1811 f = self._path
1804 wvfs.audit(f)
1812 wvfs.audit(f)
1805 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1813 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1806 # remove files under the directory as they should already be
1814 # remove files under the directory as they should already be
1807 # warned and backed up
1815 # warned and backed up
1808 if wvfs.isdir(f) and not wvfs.islink(f):
1816 if wvfs.isdir(f) and not wvfs.islink(f):
1809 wvfs.rmtree(f, forcibly=True)
1817 wvfs.rmtree(f, forcibly=True)
1810 for p in reversed(list(util.finddirs(f))):
1818 for p in reversed(list(util.finddirs(f))):
1811 if wvfs.isfileorlink(p):
1819 if wvfs.isfileorlink(p):
1812 wvfs.unlink(p)
1820 wvfs.unlink(p)
1813 break
1821 break
1814 else:
1822 else:
1815 # don't remove files if path conflicts are not processed
1823 # don't remove files if path conflicts are not processed
1816 if wvfs.isdir(f) and not wvfs.islink(f):
1824 if wvfs.isdir(f) and not wvfs.islink(f):
1817 wvfs.removedirs(f)
1825 wvfs.removedirs(f)
1818
1826
1819 def setflags(self, l, x):
1827 def setflags(self, l, x):
1820 self._repo.wvfs.setflags(self._path, l, x)
1828 self._repo.wvfs.setflags(self._path, l, x)
1821
1829
1822 class overlayworkingctx(committablectx):
1830 class overlayworkingctx(committablectx):
1823 """Wraps another mutable context with a write-back cache that can be
1831 """Wraps another mutable context with a write-back cache that can be
1824 converted into a commit context.
1832 converted into a commit context.
1825
1833
1826 self._cache[path] maps to a dict with keys: {
1834 self._cache[path] maps to a dict with keys: {
1827 'exists': bool?
1835 'exists': bool?
1828 'date': date?
1836 'date': date?
1829 'data': str?
1837 'data': str?
1830 'flags': str?
1838 'flags': str?
1831 'copied': str? (path or None)
1839 'copied': str? (path or None)
1832 }
1840 }
1833 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1841 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1834 is `False`, the file was deleted.
1842 is `False`, the file was deleted.
1835 """
1843 """
1836
1844
1837 def __init__(self, repo):
1845 def __init__(self, repo):
1838 super(overlayworkingctx, self).__init__(repo)
1846 super(overlayworkingctx, self).__init__(repo)
1839 self.clean()
1847 self.clean()
1840
1848
1841 def setbase(self, wrappedctx):
1849 def setbase(self, wrappedctx):
1842 self._wrappedctx = wrappedctx
1850 self._wrappedctx = wrappedctx
1843 self._parents = [wrappedctx]
1851 self._parents = [wrappedctx]
1844 # Drop old manifest cache as it is now out of date.
1852 # Drop old manifest cache as it is now out of date.
1845 # This is necessary when, e.g., rebasing several nodes with one
1853 # This is necessary when, e.g., rebasing several nodes with one
1846 # ``overlayworkingctx`` (e.g. with --collapse).
1854 # ``overlayworkingctx`` (e.g. with --collapse).
1847 util.clearcachedproperty(self, '_manifest')
1855 util.clearcachedproperty(self, '_manifest')
1848
1856
1849 def data(self, path):
1857 def data(self, path):
1850 if self.isdirty(path):
1858 if self.isdirty(path):
1851 if self._cache[path]['exists']:
1859 if self._cache[path]['exists']:
1852 if self._cache[path]['data'] is not None:
1860 if self._cache[path]['data'] is not None:
1853 return self._cache[path]['data']
1861 return self._cache[path]['data']
1854 else:
1862 else:
1855 # Must fallback here, too, because we only set flags.
1863 # Must fallback here, too, because we only set flags.
1856 return self._wrappedctx[path].data()
1864 return self._wrappedctx[path].data()
1857 else:
1865 else:
1858 raise error.ProgrammingError("No such file or directory: %s" %
1866 raise error.ProgrammingError("No such file or directory: %s" %
1859 path)
1867 path)
1860 else:
1868 else:
1861 return self._wrappedctx[path].data()
1869 return self._wrappedctx[path].data()
1862
1870
1863 @propertycache
1871 @propertycache
1864 def _manifest(self):
1872 def _manifest(self):
1865 parents = self.parents()
1873 parents = self.parents()
1866 man = parents[0].manifest().copy()
1874 man = parents[0].manifest().copy()
1867
1875
1868 flag = self._flagfunc
1876 flag = self._flagfunc
1869 for path in self.added():
1877 for path in self.added():
1870 man[path] = addednodeid
1878 man[path] = addednodeid
1871 man.setflag(path, flag(path))
1879 man.setflag(path, flag(path))
1872 for path in self.modified():
1880 for path in self.modified():
1873 man[path] = modifiednodeid
1881 man[path] = modifiednodeid
1874 man.setflag(path, flag(path))
1882 man.setflag(path, flag(path))
1875 for path in self.removed():
1883 for path in self.removed():
1876 del man[path]
1884 del man[path]
1877 return man
1885 return man
1878
1886
1879 @propertycache
1887 @propertycache
1880 def _flagfunc(self):
1888 def _flagfunc(self):
1881 def f(path):
1889 def f(path):
1882 return self._cache[path]['flags']
1890 return self._cache[path]['flags']
1883 return f
1891 return f
1884
1892
1885 def files(self):
1893 def files(self):
1886 return sorted(self.added() + self.modified() + self.removed())
1894 return sorted(self.added() + self.modified() + self.removed())
1887
1895
1888 def modified(self):
1896 def modified(self):
1889 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1897 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1890 self._existsinparent(f)]
1898 self._existsinparent(f)]
1891
1899
1892 def added(self):
1900 def added(self):
1893 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1901 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1894 not self._existsinparent(f)]
1902 not self._existsinparent(f)]
1895
1903
1896 def removed(self):
1904 def removed(self):
1897 return [f for f in self._cache.keys() if
1905 return [f for f in self._cache.keys() if
1898 not self._cache[f]['exists'] and self._existsinparent(f)]
1906 not self._cache[f]['exists'] and self._existsinparent(f)]
1899
1907
1900 def p1copies(self):
1908 def p1copies(self):
1901 copies = self._repo._wrappedctx.p1copies().copy()
1909 copies = self._repo._wrappedctx.p1copies().copy()
1902 narrowmatch = self._repo.narrowmatch()
1910 narrowmatch = self._repo.narrowmatch()
1903 for f in self._cache.keys():
1911 for f in self._cache.keys():
1904 if not narrowmatch(f):
1912 if not narrowmatch(f):
1905 continue
1913 continue
1906 copies.pop(f, None) # delete if it exists
1914 copies.pop(f, None) # delete if it exists
1907 source = self._cache[f]['copied']
1915 source = self._cache[f]['copied']
1908 if source:
1916 if source:
1909 copies[f] = source
1917 copies[f] = source
1910 return copies
1918 return copies
1911
1919
1912 def p2copies(self):
1920 def p2copies(self):
1913 copies = self._repo._wrappedctx.p2copies().copy()
1921 copies = self._repo._wrappedctx.p2copies().copy()
1914 narrowmatch = self._repo.narrowmatch()
1922 narrowmatch = self._repo.narrowmatch()
1915 for f in self._cache.keys():
1923 for f in self._cache.keys():
1916 if not narrowmatch(f):
1924 if not narrowmatch(f):
1917 continue
1925 continue
1918 copies.pop(f, None) # delete if it exists
1926 copies.pop(f, None) # delete if it exists
1919 source = self._cache[f]['copied']
1927 source = self._cache[f]['copied']
1920 if source:
1928 if source:
1921 copies[f] = source
1929 copies[f] = source
1922 return copies
1930 return copies
1923
1931
1924 def isinmemory(self):
1932 def isinmemory(self):
1925 return True
1933 return True
1926
1934
1927 def filedate(self, path):
1935 def filedate(self, path):
1928 if self.isdirty(path):
1936 if self.isdirty(path):
1929 return self._cache[path]['date']
1937 return self._cache[path]['date']
1930 else:
1938 else:
1931 return self._wrappedctx[path].date()
1939 return self._wrappedctx[path].date()
1932
1940
1933 def markcopied(self, path, origin):
1941 def markcopied(self, path, origin):
1934 self._markdirty(path, exists=True, date=self.filedate(path),
1942 self._markdirty(path, exists=True, date=self.filedate(path),
1935 flags=self.flags(path), copied=origin)
1943 flags=self.flags(path), copied=origin)
1936
1944
1937 def copydata(self, path):
1945 def copydata(self, path):
1938 if self.isdirty(path):
1946 if self.isdirty(path):
1939 return self._cache[path]['copied']
1947 return self._cache[path]['copied']
1940 else:
1948 else:
1941 return None
1949 return None
1942
1950
1943 def flags(self, path):
1951 def flags(self, path):
1944 if self.isdirty(path):
1952 if self.isdirty(path):
1945 if self._cache[path]['exists']:
1953 if self._cache[path]['exists']:
1946 return self._cache[path]['flags']
1954 return self._cache[path]['flags']
1947 else:
1955 else:
1948 raise error.ProgrammingError("No such file or directory: %s" %
1956 raise error.ProgrammingError("No such file or directory: %s" %
1949 self._path)
1957 self._path)
1950 else:
1958 else:
1951 return self._wrappedctx[path].flags()
1959 return self._wrappedctx[path].flags()
1952
1960
1953 def __contains__(self, key):
1961 def __contains__(self, key):
1954 if key in self._cache:
1962 if key in self._cache:
1955 return self._cache[key]['exists']
1963 return self._cache[key]['exists']
1956 return key in self.p1()
1964 return key in self.p1()
1957
1965
1958 def _existsinparent(self, path):
1966 def _existsinparent(self, path):
1959 try:
1967 try:
1960 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1968 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1961 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1969 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1962 # with an ``exists()`` function.
1970 # with an ``exists()`` function.
1963 self._wrappedctx[path]
1971 self._wrappedctx[path]
1964 return True
1972 return True
1965 except error.ManifestLookupError:
1973 except error.ManifestLookupError:
1966 return False
1974 return False
1967
1975
1968 def _auditconflicts(self, path):
1976 def _auditconflicts(self, path):
1969 """Replicates conflict checks done by wvfs.write().
1977 """Replicates conflict checks done by wvfs.write().
1970
1978
1971 Since we never write to the filesystem and never call `applyupdates` in
1979 Since we never write to the filesystem and never call `applyupdates` in
1972 IMM, we'll never check that a path is actually writable -- e.g., because
1980 IMM, we'll never check that a path is actually writable -- e.g., because
1973 it adds `a/foo`, but `a` is actually a file in the other commit.
1981 it adds `a/foo`, but `a` is actually a file in the other commit.
1974 """
1982 """
1975 def fail(path, component):
1983 def fail(path, component):
1976 # p1() is the base and we're receiving "writes" for p2()'s
1984 # p1() is the base and we're receiving "writes" for p2()'s
1977 # files.
1985 # files.
1978 if 'l' in self.p1()[component].flags():
1986 if 'l' in self.p1()[component].flags():
1979 raise error.Abort("error: %s conflicts with symlink %s "
1987 raise error.Abort("error: %s conflicts with symlink %s "
1980 "in %d." % (path, component,
1988 "in %d." % (path, component,
1981 self.p1().rev()))
1989 self.p1().rev()))
1982 else:
1990 else:
1983 raise error.Abort("error: '%s' conflicts with file '%s' in "
1991 raise error.Abort("error: '%s' conflicts with file '%s' in "
1984 "%d." % (path, component,
1992 "%d." % (path, component,
1985 self.p1().rev()))
1993 self.p1().rev()))
1986
1994
1987 # Test that each new directory to be created to write this path from p2
1995 # Test that each new directory to be created to write this path from p2
1988 # is not a file in p1.
1996 # is not a file in p1.
1989 components = path.split('/')
1997 components = path.split('/')
1990 for i in pycompat.xrange(len(components)):
1998 for i in pycompat.xrange(len(components)):
1991 component = "/".join(components[0:i])
1999 component = "/".join(components[0:i])
1992 if component in self:
2000 if component in self:
1993 fail(path, component)
2001 fail(path, component)
1994
2002
1995 # Test the other direction -- that this path from p2 isn't a directory
2003 # Test the other direction -- that this path from p2 isn't a directory
1996 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2004 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1997 match = self.match([path], default=b'path')
2005 match = self.match([path], default=b'path')
1998 matches = self.p1().manifest().matches(match)
2006 matches = self.p1().manifest().matches(match)
1999 mfiles = matches.keys()
2007 mfiles = matches.keys()
2000 if len(mfiles) > 0:
2008 if len(mfiles) > 0:
2001 if len(mfiles) == 1 and mfiles[0] == path:
2009 if len(mfiles) == 1 and mfiles[0] == path:
2002 return
2010 return
2003 # omit the files which are deleted in current IMM wctx
2011 # omit the files which are deleted in current IMM wctx
2004 mfiles = [m for m in mfiles if m in self]
2012 mfiles = [m for m in mfiles if m in self]
2005 if not mfiles:
2013 if not mfiles:
2006 return
2014 return
2007 raise error.Abort("error: file '%s' cannot be written because "
2015 raise error.Abort("error: file '%s' cannot be written because "
2008 " '%s/' is a directory in %s (containing %d "
2016 " '%s/' is a directory in %s (containing %d "
2009 "entries: %s)"
2017 "entries: %s)"
2010 % (path, path, self.p1(), len(mfiles),
2018 % (path, path, self.p1(), len(mfiles),
2011 ', '.join(mfiles)))
2019 ', '.join(mfiles)))
2012
2020
2013 def write(self, path, data, flags='', **kwargs):
2021 def write(self, path, data, flags='', **kwargs):
2014 if data is None:
2022 if data is None:
2015 raise error.ProgrammingError("data must be non-None")
2023 raise error.ProgrammingError("data must be non-None")
2016 self._auditconflicts(path)
2024 self._auditconflicts(path)
2017 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2025 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2018 flags=flags)
2026 flags=flags)
2019
2027
2020 def setflags(self, path, l, x):
2028 def setflags(self, path, l, x):
2021 flag = ''
2029 flag = ''
2022 if l:
2030 if l:
2023 flag = 'l'
2031 flag = 'l'
2024 elif x:
2032 elif x:
2025 flag = 'x'
2033 flag = 'x'
2026 self._markdirty(path, exists=True, date=dateutil.makedate(),
2034 self._markdirty(path, exists=True, date=dateutil.makedate(),
2027 flags=flag)
2035 flags=flag)
2028
2036
2029 def remove(self, path):
2037 def remove(self, path):
2030 self._markdirty(path, exists=False)
2038 self._markdirty(path, exists=False)
2031
2039
2032 def exists(self, path):
2040 def exists(self, path):
2033 """exists behaves like `lexists`, but needs to follow symlinks and
2041 """exists behaves like `lexists`, but needs to follow symlinks and
2034 return False if they are broken.
2042 return False if they are broken.
2035 """
2043 """
2036 if self.isdirty(path):
2044 if self.isdirty(path):
2037 # If this path exists and is a symlink, "follow" it by calling
2045 # If this path exists and is a symlink, "follow" it by calling
2038 # exists on the destination path.
2046 # exists on the destination path.
2039 if (self._cache[path]['exists'] and
2047 if (self._cache[path]['exists'] and
2040 'l' in self._cache[path]['flags']):
2048 'l' in self._cache[path]['flags']):
2041 return self.exists(self._cache[path]['data'].strip())
2049 return self.exists(self._cache[path]['data'].strip())
2042 else:
2050 else:
2043 return self._cache[path]['exists']
2051 return self._cache[path]['exists']
2044
2052
2045 return self._existsinparent(path)
2053 return self._existsinparent(path)
2046
2054
2047 def lexists(self, path):
2055 def lexists(self, path):
2048 """lexists returns True if the path exists"""
2056 """lexists returns True if the path exists"""
2049 if self.isdirty(path):
2057 if self.isdirty(path):
2050 return self._cache[path]['exists']
2058 return self._cache[path]['exists']
2051
2059
2052 return self._existsinparent(path)
2060 return self._existsinparent(path)
2053
2061
2054 def size(self, path):
2062 def size(self, path):
2055 if self.isdirty(path):
2063 if self.isdirty(path):
2056 if self._cache[path]['exists']:
2064 if self._cache[path]['exists']:
2057 return len(self._cache[path]['data'])
2065 return len(self._cache[path]['data'])
2058 else:
2066 else:
2059 raise error.ProgrammingError("No such file or directory: %s" %
2067 raise error.ProgrammingError("No such file or directory: %s" %
2060 self._path)
2068 self._path)
2061 return self._wrappedctx[path].size()
2069 return self._wrappedctx[path].size()
2062
2070
2063 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2071 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2064 user=None, editor=None):
2072 user=None, editor=None):
2065 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2073 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2066 committed.
2074 committed.
2067
2075
2068 ``text`` is the commit message.
2076 ``text`` is the commit message.
2069 ``parents`` (optional) are rev numbers.
2077 ``parents`` (optional) are rev numbers.
2070 """
2078 """
2071 # Default parents to the wrapped contexts' if not passed.
2079 # Default parents to the wrapped contexts' if not passed.
2072 if parents is None:
2080 if parents is None:
2073 parents = self._wrappedctx.parents()
2081 parents = self._wrappedctx.parents()
2074 if len(parents) == 1:
2082 if len(parents) == 1:
2075 parents = (parents[0], None)
2083 parents = (parents[0], None)
2076
2084
2077 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2085 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2078 if parents[1] is None:
2086 if parents[1] is None:
2079 parents = (self._repo[parents[0]], None)
2087 parents = (self._repo[parents[0]], None)
2080 else:
2088 else:
2081 parents = (self._repo[parents[0]], self._repo[parents[1]])
2089 parents = (self._repo[parents[0]], self._repo[parents[1]])
2082
2090
2083 files = self.files()
2091 files = self.files()
2084 def getfile(repo, memctx, path):
2092 def getfile(repo, memctx, path):
2085 if self._cache[path]['exists']:
2093 if self._cache[path]['exists']:
2086 return memfilectx(repo, memctx, path,
2094 return memfilectx(repo, memctx, path,
2087 self._cache[path]['data'],
2095 self._cache[path]['data'],
2088 'l' in self._cache[path]['flags'],
2096 'l' in self._cache[path]['flags'],
2089 'x' in self._cache[path]['flags'],
2097 'x' in self._cache[path]['flags'],
2090 self._cache[path]['copied'])
2098 self._cache[path]['copied'])
2091 else:
2099 else:
2092 # Returning None, but including the path in `files`, is
2100 # Returning None, but including the path in `files`, is
2093 # necessary for memctx to register a deletion.
2101 # necessary for memctx to register a deletion.
2094 return None
2102 return None
2095 return memctx(self._repo, parents, text, files, getfile, date=date,
2103 return memctx(self._repo, parents, text, files, getfile, date=date,
2096 extra=extra, user=user, branch=branch, editor=editor)
2104 extra=extra, user=user, branch=branch, editor=editor)
2097
2105
2098 def isdirty(self, path):
2106 def isdirty(self, path):
2099 return path in self._cache
2107 return path in self._cache
2100
2108
2101 def isempty(self):
2109 def isempty(self):
2102 # We need to discard any keys that are actually clean before the empty
2110 # We need to discard any keys that are actually clean before the empty
2103 # commit check.
2111 # commit check.
2104 self._compact()
2112 self._compact()
2105 return len(self._cache) == 0
2113 return len(self._cache) == 0
2106
2114
2107 def clean(self):
2115 def clean(self):
2108 self._cache = {}
2116 self._cache = {}
2109
2117
2110 def _compact(self):
2118 def _compact(self):
2111 """Removes keys from the cache that are actually clean, by comparing
2119 """Removes keys from the cache that are actually clean, by comparing
2112 them with the underlying context.
2120 them with the underlying context.
2113
2121
2114 This can occur during the merge process, e.g. by passing --tool :local
2122 This can occur during the merge process, e.g. by passing --tool :local
2115 to resolve a conflict.
2123 to resolve a conflict.
2116 """
2124 """
2117 keys = []
2125 keys = []
2118 # This won't be perfect, but can help performance significantly when
2126 # This won't be perfect, but can help performance significantly when
2119 # using things like remotefilelog.
2127 # using things like remotefilelog.
2120 scmutil.prefetchfiles(
2128 scmutil.prefetchfiles(
2121 self.repo(), [self.p1().rev()],
2129 self.repo(), [self.p1().rev()],
2122 scmutil.matchfiles(self.repo(), self._cache.keys()))
2130 scmutil.matchfiles(self.repo(), self._cache.keys()))
2123
2131
2124 for path in self._cache.keys():
2132 for path in self._cache.keys():
2125 cache = self._cache[path]
2133 cache = self._cache[path]
2126 try:
2134 try:
2127 underlying = self._wrappedctx[path]
2135 underlying = self._wrappedctx[path]
2128 if (underlying.data() == cache['data'] and
2136 if (underlying.data() == cache['data'] and
2129 underlying.flags() == cache['flags']):
2137 underlying.flags() == cache['flags']):
2130 keys.append(path)
2138 keys.append(path)
2131 except error.ManifestLookupError:
2139 except error.ManifestLookupError:
2132 # Path not in the underlying manifest (created).
2140 # Path not in the underlying manifest (created).
2133 continue
2141 continue
2134
2142
2135 for path in keys:
2143 for path in keys:
2136 del self._cache[path]
2144 del self._cache[path]
2137 return keys
2145 return keys
2138
2146
2139 def _markdirty(self, path, exists, data=None, date=None, flags='',
2147 def _markdirty(self, path, exists, data=None, date=None, flags='',
2140 copied=None):
2148 copied=None):
2141 # data not provided, let's see if we already have some; if not, let's
2149 # data not provided, let's see if we already have some; if not, let's
2142 # grab it from our underlying context, so that we always have data if
2150 # grab it from our underlying context, so that we always have data if
2143 # the file is marked as existing.
2151 # the file is marked as existing.
2144 if exists and data is None:
2152 if exists and data is None:
2145 oldentry = self._cache.get(path) or {}
2153 oldentry = self._cache.get(path) or {}
2146 data = oldentry.get('data')
2154 data = oldentry.get('data')
2147 if data is None:
2155 if data is None:
2148 data = self._wrappedctx[path].data()
2156 data = self._wrappedctx[path].data()
2149
2157
2150 self._cache[path] = {
2158 self._cache[path] = {
2151 'exists': exists,
2159 'exists': exists,
2152 'data': data,
2160 'data': data,
2153 'date': date,
2161 'date': date,
2154 'flags': flags,
2162 'flags': flags,
2155 'copied': copied,
2163 'copied': copied,
2156 }
2164 }
2157
2165
2158 def filectx(self, path, filelog=None):
2166 def filectx(self, path, filelog=None):
2159 return overlayworkingfilectx(self._repo, path, parent=self,
2167 return overlayworkingfilectx(self._repo, path, parent=self,
2160 filelog=filelog)
2168 filelog=filelog)
2161
2169
2162 class overlayworkingfilectx(committablefilectx):
2170 class overlayworkingfilectx(committablefilectx):
2163 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2171 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2164 cache, which can be flushed through later by calling ``flush()``."""
2172 cache, which can be flushed through later by calling ``flush()``."""
2165
2173
2166 def __init__(self, repo, path, filelog=None, parent=None):
2174 def __init__(self, repo, path, filelog=None, parent=None):
2167 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2175 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2168 parent)
2176 parent)
2169 self._repo = repo
2177 self._repo = repo
2170 self._parent = parent
2178 self._parent = parent
2171 self._path = path
2179 self._path = path
2172
2180
2173 def cmp(self, fctx):
2181 def cmp(self, fctx):
2174 return self.data() != fctx.data()
2182 return self.data() != fctx.data()
2175
2183
2176 def changectx(self):
2184 def changectx(self):
2177 return self._parent
2185 return self._parent
2178
2186
2179 def data(self):
2187 def data(self):
2180 return self._parent.data(self._path)
2188 return self._parent.data(self._path)
2181
2189
2182 def date(self):
2190 def date(self):
2183 return self._parent.filedate(self._path)
2191 return self._parent.filedate(self._path)
2184
2192
2185 def exists(self):
2193 def exists(self):
2186 return self.lexists()
2194 return self.lexists()
2187
2195
2188 def lexists(self):
2196 def lexists(self):
2189 return self._parent.exists(self._path)
2197 return self._parent.exists(self._path)
2190
2198
2191 def copysource(self):
2199 def copysource(self):
2192 return self._parent.copydata(self._path)
2200 return self._parent.copydata(self._path)
2193
2201
2194 def size(self):
2202 def size(self):
2195 return self._parent.size(self._path)
2203 return self._parent.size(self._path)
2196
2204
2197 def markcopied(self, origin):
2205 def markcopied(self, origin):
2198 self._parent.markcopied(self._path, origin)
2206 self._parent.markcopied(self._path, origin)
2199
2207
2200 def audit(self):
2208 def audit(self):
2201 pass
2209 pass
2202
2210
2203 def flags(self):
2211 def flags(self):
2204 return self._parent.flags(self._path)
2212 return self._parent.flags(self._path)
2205
2213
2206 def setflags(self, islink, isexec):
2214 def setflags(self, islink, isexec):
2207 return self._parent.setflags(self._path, islink, isexec)
2215 return self._parent.setflags(self._path, islink, isexec)
2208
2216
2209 def write(self, data, flags, backgroundclose=False, **kwargs):
2217 def write(self, data, flags, backgroundclose=False, **kwargs):
2210 return self._parent.write(self._path, data, flags, **kwargs)
2218 return self._parent.write(self._path, data, flags, **kwargs)
2211
2219
2212 def remove(self, ignoremissing=False):
2220 def remove(self, ignoremissing=False):
2213 return self._parent.remove(self._path)
2221 return self._parent.remove(self._path)
2214
2222
2215 def clearunknown(self):
2223 def clearunknown(self):
2216 pass
2224 pass
2217
2225
2218 class workingcommitctx(workingctx):
2226 class workingcommitctx(workingctx):
2219 """A workingcommitctx object makes access to data related to
2227 """A workingcommitctx object makes access to data related to
2220 the revision being committed convenient.
2228 the revision being committed convenient.
2221
2229
2222 This hides changes in the working directory, if they aren't
2230 This hides changes in the working directory, if they aren't
2223 committed in this context.
2231 committed in this context.
2224 """
2232 """
2225 def __init__(self, repo, changes,
2233 def __init__(self, repo, changes,
2226 text="", user=None, date=None, extra=None):
2234 text="", user=None, date=None, extra=None):
2227 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2235 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2228 changes)
2236 changes)
2229
2237
2230 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2238 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2231 """Return matched files only in ``self._status``
2239 """Return matched files only in ``self._status``
2232
2240
2233 Uncommitted files appear "clean" via this context, even if
2241 Uncommitted files appear "clean" via this context, even if
2234 they aren't actually so in the working directory.
2242 they aren't actually so in the working directory.
2235 """
2243 """
2236 if clean:
2244 if clean:
2237 clean = [f for f in self._manifest if f not in self._changedset]
2245 clean = [f for f in self._manifest if f not in self._changedset]
2238 else:
2246 else:
2239 clean = []
2247 clean = []
2240 return scmutil.status([f for f in self._status.modified if match(f)],
2248 return scmutil.status([f for f in self._status.modified if match(f)],
2241 [f for f in self._status.added if match(f)],
2249 [f for f in self._status.added if match(f)],
2242 [f for f in self._status.removed if match(f)],
2250 [f for f in self._status.removed if match(f)],
2243 [], [], [], clean)
2251 [], [], [], clean)
2244
2252
2245 @propertycache
2253 @propertycache
2246 def _changedset(self):
2254 def _changedset(self):
2247 """Return the set of files changed in this context
2255 """Return the set of files changed in this context
2248 """
2256 """
2249 changed = set(self._status.modified)
2257 changed = set(self._status.modified)
2250 changed.update(self._status.added)
2258 changed.update(self._status.added)
2251 changed.update(self._status.removed)
2259 changed.update(self._status.removed)
2252 return changed
2260 return changed
2253
2261
2254 def makecachingfilectxfn(func):
2262 def makecachingfilectxfn(func):
2255 """Create a filectxfn that caches based on the path.
2263 """Create a filectxfn that caches based on the path.
2256
2264
2257 We can't use util.cachefunc because it uses all arguments as the cache
2265 We can't use util.cachefunc because it uses all arguments as the cache
2258 key and this creates a cycle since the arguments include the repo and
2266 key and this creates a cycle since the arguments include the repo and
2259 memctx.
2267 memctx.
2260 """
2268 """
2261 cache = {}
2269 cache = {}
2262
2270
2263 def getfilectx(repo, memctx, path):
2271 def getfilectx(repo, memctx, path):
2264 if path not in cache:
2272 if path not in cache:
2265 cache[path] = func(repo, memctx, path)
2273 cache[path] = func(repo, memctx, path)
2266 return cache[path]
2274 return cache[path]
2267
2275
2268 return getfilectx
2276 return getfilectx
2269
2277
2270 def memfilefromctx(ctx):
2278 def memfilefromctx(ctx):
2271 """Given a context return a memfilectx for ctx[path]
2279 """Given a context return a memfilectx for ctx[path]
2272
2280
2273 This is a convenience method for building a memctx based on another
2281 This is a convenience method for building a memctx based on another
2274 context.
2282 context.
2275 """
2283 """
2276 def getfilectx(repo, memctx, path):
2284 def getfilectx(repo, memctx, path):
2277 fctx = ctx[path]
2285 fctx = ctx[path]
2278 copysource = fctx.copysource()
2286 copysource = fctx.copysource()
2279 return memfilectx(repo, memctx, path, fctx.data(),
2287 return memfilectx(repo, memctx, path, fctx.data(),
2280 islink=fctx.islink(), isexec=fctx.isexec(),
2288 islink=fctx.islink(), isexec=fctx.isexec(),
2281 copysource=copysource)
2289 copysource=copysource)
2282
2290
2283 return getfilectx
2291 return getfilectx
2284
2292
2285 def memfilefrompatch(patchstore):
2293 def memfilefrompatch(patchstore):
2286 """Given a patch (e.g. patchstore object) return a memfilectx
2294 """Given a patch (e.g. patchstore object) return a memfilectx
2287
2295
2288 This is a convenience method for building a memctx based on a patchstore.
2296 This is a convenience method for building a memctx based on a patchstore.
2289 """
2297 """
2290 def getfilectx(repo, memctx, path):
2298 def getfilectx(repo, memctx, path):
2291 data, mode, copysource = patchstore.getfile(path)
2299 data, mode, copysource = patchstore.getfile(path)
2292 if data is None:
2300 if data is None:
2293 return None
2301 return None
2294 islink, isexec = mode
2302 islink, isexec = mode
2295 return memfilectx(repo, memctx, path, data, islink=islink,
2303 return memfilectx(repo, memctx, path, data, islink=islink,
2296 isexec=isexec, copysource=copysource)
2304 isexec=isexec, copysource=copysource)
2297
2305
2298 return getfilectx
2306 return getfilectx
2299
2307
2300 class memctx(committablectx):
2308 class memctx(committablectx):
2301 """Use memctx to perform in-memory commits via localrepo.commitctx().
2309 """Use memctx to perform in-memory commits via localrepo.commitctx().
2302
2310
2303 Revision information is supplied at initialization time while
2311 Revision information is supplied at initialization time while
2304 related files data and is made available through a callback
2312 related files data and is made available through a callback
2305 mechanism. 'repo' is the current localrepo, 'parents' is a
2313 mechanism. 'repo' is the current localrepo, 'parents' is a
2306 sequence of two parent revisions identifiers (pass None for every
2314 sequence of two parent revisions identifiers (pass None for every
2307 missing parent), 'text' is the commit message and 'files' lists
2315 missing parent), 'text' is the commit message and 'files' lists
2308 names of files touched by the revision (normalized and relative to
2316 names of files touched by the revision (normalized and relative to
2309 repository root).
2317 repository root).
2310
2318
2311 filectxfn(repo, memctx, path) is a callable receiving the
2319 filectxfn(repo, memctx, path) is a callable receiving the
2312 repository, the current memctx object and the normalized path of
2320 repository, the current memctx object and the normalized path of
2313 requested file, relative to repository root. It is fired by the
2321 requested file, relative to repository root. It is fired by the
2314 commit function for every file in 'files', but calls order is
2322 commit function for every file in 'files', but calls order is
2315 undefined. If the file is available in the revision being
2323 undefined. If the file is available in the revision being
2316 committed (updated or added), filectxfn returns a memfilectx
2324 committed (updated or added), filectxfn returns a memfilectx
2317 object. If the file was removed, filectxfn return None for recent
2325 object. If the file was removed, filectxfn return None for recent
2318 Mercurial. Moved files are represented by marking the source file
2326 Mercurial. Moved files are represented by marking the source file
2319 removed and the new file added with copy information (see
2327 removed and the new file added with copy information (see
2320 memfilectx).
2328 memfilectx).
2321
2329
2322 user receives the committer name and defaults to current
2330 user receives the committer name and defaults to current
2323 repository username, date is the commit date in any format
2331 repository username, date is the commit date in any format
2324 supported by dateutil.parsedate() and defaults to current date, extra
2332 supported by dateutil.parsedate() and defaults to current date, extra
2325 is a dictionary of metadata or is left empty.
2333 is a dictionary of metadata or is left empty.
2326 """
2334 """
2327
2335
2328 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2336 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2329 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2337 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2330 # this field to determine what to do in filectxfn.
2338 # this field to determine what to do in filectxfn.
2331 _returnnoneformissingfiles = True
2339 _returnnoneformissingfiles = True
2332
2340
2333 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2341 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2334 date=None, extra=None, branch=None, editor=False):
2342 date=None, extra=None, branch=None, editor=False):
2335 super(memctx, self).__init__(repo, text, user, date, extra,
2343 super(memctx, self).__init__(repo, text, user, date, extra,
2336 branch=branch)
2344 branch=branch)
2337 self._rev = None
2345 self._rev = None
2338 self._node = None
2346 self._node = None
2339 parents = [(p or nullid) for p in parents]
2347 parents = [(p or nullid) for p in parents]
2340 p1, p2 = parents
2348 p1, p2 = parents
2341 self._parents = [self._repo[p] for p in (p1, p2)]
2349 self._parents = [self._repo[p] for p in (p1, p2)]
2342 files = sorted(set(files))
2350 files = sorted(set(files))
2343 self._files = files
2351 self._files = files
2344 self.substate = {}
2352 self.substate = {}
2345
2353
2346 if isinstance(filectxfn, patch.filestore):
2354 if isinstance(filectxfn, patch.filestore):
2347 filectxfn = memfilefrompatch(filectxfn)
2355 filectxfn = memfilefrompatch(filectxfn)
2348 elif not callable(filectxfn):
2356 elif not callable(filectxfn):
2349 # if store is not callable, wrap it in a function
2357 # if store is not callable, wrap it in a function
2350 filectxfn = memfilefromctx(filectxfn)
2358 filectxfn = memfilefromctx(filectxfn)
2351
2359
2352 # memoizing increases performance for e.g. vcs convert scenarios.
2360 # memoizing increases performance for e.g. vcs convert scenarios.
2353 self._filectxfn = makecachingfilectxfn(filectxfn)
2361 self._filectxfn = makecachingfilectxfn(filectxfn)
2354
2362
2355 if editor:
2363 if editor:
2356 self._text = editor(self._repo, self, [])
2364 self._text = editor(self._repo, self, [])
2357 self._repo.savecommitmessage(self._text)
2365 self._repo.savecommitmessage(self._text)
2358
2366
2359 def filectx(self, path, filelog=None):
2367 def filectx(self, path, filelog=None):
2360 """get a file context from the working directory
2368 """get a file context from the working directory
2361
2369
2362 Returns None if file doesn't exist and should be removed."""
2370 Returns None if file doesn't exist and should be removed."""
2363 return self._filectxfn(self._repo, self, path)
2371 return self._filectxfn(self._repo, self, path)
2364
2372
2365 def commit(self):
2373 def commit(self):
2366 """commit context to the repo"""
2374 """commit context to the repo"""
2367 return self._repo.commitctx(self)
2375 return self._repo.commitctx(self)
2368
2376
2369 @propertycache
2377 @propertycache
2370 def _manifest(self):
2378 def _manifest(self):
2371 """generate a manifest based on the return values of filectxfn"""
2379 """generate a manifest based on the return values of filectxfn"""
2372
2380
2373 # keep this simple for now; just worry about p1
2381 # keep this simple for now; just worry about p1
2374 pctx = self._parents[0]
2382 pctx = self._parents[0]
2375 man = pctx.manifest().copy()
2383 man = pctx.manifest().copy()
2376
2384
2377 for f in self._status.modified:
2385 for f in self._status.modified:
2378 man[f] = modifiednodeid
2386 man[f] = modifiednodeid
2379
2387
2380 for f in self._status.added:
2388 for f in self._status.added:
2381 man[f] = addednodeid
2389 man[f] = addednodeid
2382
2390
2383 for f in self._status.removed:
2391 for f in self._status.removed:
2384 if f in man:
2392 if f in man:
2385 del man[f]
2393 del man[f]
2386
2394
2387 return man
2395 return man
2388
2396
2389 @propertycache
2397 @propertycache
2390 def _status(self):
2398 def _status(self):
2391 """Calculate exact status from ``files`` specified at construction
2399 """Calculate exact status from ``files`` specified at construction
2392 """
2400 """
2393 man1 = self.p1().manifest()
2401 man1 = self.p1().manifest()
2394 p2 = self._parents[1]
2402 p2 = self._parents[1]
2395 # "1 < len(self._parents)" can't be used for checking
2403 # "1 < len(self._parents)" can't be used for checking
2396 # existence of the 2nd parent, because "memctx._parents" is
2404 # existence of the 2nd parent, because "memctx._parents" is
2397 # explicitly initialized by the list, of which length is 2.
2405 # explicitly initialized by the list, of which length is 2.
2398 if p2.node() != nullid:
2406 if p2.node() != nullid:
2399 man2 = p2.manifest()
2407 man2 = p2.manifest()
2400 managing = lambda f: f in man1 or f in man2
2408 managing = lambda f: f in man1 or f in man2
2401 else:
2409 else:
2402 managing = lambda f: f in man1
2410 managing = lambda f: f in man1
2403
2411
2404 modified, added, removed = [], [], []
2412 modified, added, removed = [], [], []
2405 for f in self._files:
2413 for f in self._files:
2406 if not managing(f):
2414 if not managing(f):
2407 added.append(f)
2415 added.append(f)
2408 elif self[f]:
2416 elif self[f]:
2409 modified.append(f)
2417 modified.append(f)
2410 else:
2418 else:
2411 removed.append(f)
2419 removed.append(f)
2412
2420
2413 return scmutil.status(modified, added, removed, [], [], [], [])
2421 return scmutil.status(modified, added, removed, [], [], [], [])
2414
2422
2415 class memfilectx(committablefilectx):
2423 class memfilectx(committablefilectx):
2416 """memfilectx represents an in-memory file to commit.
2424 """memfilectx represents an in-memory file to commit.
2417
2425
2418 See memctx and committablefilectx for more details.
2426 See memctx and committablefilectx for more details.
2419 """
2427 """
2420 def __init__(self, repo, changectx, path, data, islink=False,
2428 def __init__(self, repo, changectx, path, data, islink=False,
2421 isexec=False, copysource=None):
2429 isexec=False, copysource=None):
2422 """
2430 """
2423 path is the normalized file path relative to repository root.
2431 path is the normalized file path relative to repository root.
2424 data is the file content as a string.
2432 data is the file content as a string.
2425 islink is True if the file is a symbolic link.
2433 islink is True if the file is a symbolic link.
2426 isexec is True if the file is executable.
2434 isexec is True if the file is executable.
2427 copied is the source file path if current file was copied in the
2435 copied is the source file path if current file was copied in the
2428 revision being committed, or None."""
2436 revision being committed, or None."""
2429 super(memfilectx, self).__init__(repo, path, None, changectx)
2437 super(memfilectx, self).__init__(repo, path, None, changectx)
2430 self._data = data
2438 self._data = data
2431 if islink:
2439 if islink:
2432 self._flags = 'l'
2440 self._flags = 'l'
2433 elif isexec:
2441 elif isexec:
2434 self._flags = 'x'
2442 self._flags = 'x'
2435 else:
2443 else:
2436 self._flags = ''
2444 self._flags = ''
2437 self._copysource = copysource
2445 self._copysource = copysource
2438
2446
2439 def copysource(self):
2447 def copysource(self):
2440 return self._copysource
2448 return self._copysource
2441
2449
2442 def cmp(self, fctx):
2450 def cmp(self, fctx):
2443 return self.data() != fctx.data()
2451 return self.data() != fctx.data()
2444
2452
2445 def data(self):
2453 def data(self):
2446 return self._data
2454 return self._data
2447
2455
2448 def remove(self, ignoremissing=False):
2456 def remove(self, ignoremissing=False):
2449 """wraps unlink for a repo's working directory"""
2457 """wraps unlink for a repo's working directory"""
2450 # need to figure out what to do here
2458 # need to figure out what to do here
2451 del self._changectx[self._path]
2459 del self._changectx[self._path]
2452
2460
2453 def write(self, data, flags, **kwargs):
2461 def write(self, data, flags, **kwargs):
2454 """wraps repo.wwrite"""
2462 """wraps repo.wwrite"""
2455 self._data = data
2463 self._data = data
2456
2464
2457
2465
2458 class metadataonlyctx(committablectx):
2466 class metadataonlyctx(committablectx):
2459 """Like memctx but it's reusing the manifest of different commit.
2467 """Like memctx but it's reusing the manifest of different commit.
2460 Intended to be used by lightweight operations that are creating
2468 Intended to be used by lightweight operations that are creating
2461 metadata-only changes.
2469 metadata-only changes.
2462
2470
2463 Revision information is supplied at initialization time. 'repo' is the
2471 Revision information is supplied at initialization time. 'repo' is the
2464 current localrepo, 'ctx' is original revision which manifest we're reuisng
2472 current localrepo, 'ctx' is original revision which manifest we're reuisng
2465 'parents' is a sequence of two parent revisions identifiers (pass None for
2473 'parents' is a sequence of two parent revisions identifiers (pass None for
2466 every missing parent), 'text' is the commit.
2474 every missing parent), 'text' is the commit.
2467
2475
2468 user receives the committer name and defaults to current repository
2476 user receives the committer name and defaults to current repository
2469 username, date is the commit date in any format supported by
2477 username, date is the commit date in any format supported by
2470 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2478 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2471 metadata or is left empty.
2479 metadata or is left empty.
2472 """
2480 """
2473 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2481 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2474 date=None, extra=None, editor=False):
2482 date=None, extra=None, editor=False):
2475 if text is None:
2483 if text is None:
2476 text = originalctx.description()
2484 text = originalctx.description()
2477 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2485 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2478 self._rev = None
2486 self._rev = None
2479 self._node = None
2487 self._node = None
2480 self._originalctx = originalctx
2488 self._originalctx = originalctx
2481 self._manifestnode = originalctx.manifestnode()
2489 self._manifestnode = originalctx.manifestnode()
2482 if parents is None:
2490 if parents is None:
2483 parents = originalctx.parents()
2491 parents = originalctx.parents()
2484 else:
2492 else:
2485 parents = [repo[p] for p in parents if p is not None]
2493 parents = [repo[p] for p in parents if p is not None]
2486 parents = parents[:]
2494 parents = parents[:]
2487 while len(parents) < 2:
2495 while len(parents) < 2:
2488 parents.append(repo[nullid])
2496 parents.append(repo[nullid])
2489 p1, p2 = self._parents = parents
2497 p1, p2 = self._parents = parents
2490
2498
2491 # sanity check to ensure that the reused manifest parents are
2499 # sanity check to ensure that the reused manifest parents are
2492 # manifests of our commit parents
2500 # manifests of our commit parents
2493 mp1, mp2 = self.manifestctx().parents
2501 mp1, mp2 = self.manifestctx().parents
2494 if p1 != nullid and p1.manifestnode() != mp1:
2502 if p1 != nullid and p1.manifestnode() != mp1:
2495 raise RuntimeError(r"can't reuse the manifest: its p1 "
2503 raise RuntimeError(r"can't reuse the manifest: its p1 "
2496 r"doesn't match the new ctx p1")
2504 r"doesn't match the new ctx p1")
2497 if p2 != nullid and p2.manifestnode() != mp2:
2505 if p2 != nullid and p2.manifestnode() != mp2:
2498 raise RuntimeError(r"can't reuse the manifest: "
2506 raise RuntimeError(r"can't reuse the manifest: "
2499 r"its p2 doesn't match the new ctx p2")
2507 r"its p2 doesn't match the new ctx p2")
2500
2508
2501 self._files = originalctx.files()
2509 self._files = originalctx.files()
2502 self.substate = {}
2510 self.substate = {}
2503
2511
2504 if editor:
2512 if editor:
2505 self._text = editor(self._repo, self, [])
2513 self._text = editor(self._repo, self, [])
2506 self._repo.savecommitmessage(self._text)
2514 self._repo.savecommitmessage(self._text)
2507
2515
2508 def manifestnode(self):
2516 def manifestnode(self):
2509 return self._manifestnode
2517 return self._manifestnode
2510
2518
2511 @property
2519 @property
2512 def _manifestctx(self):
2520 def _manifestctx(self):
2513 return self._repo.manifestlog[self._manifestnode]
2521 return self._repo.manifestlog[self._manifestnode]
2514
2522
2515 def filectx(self, path, filelog=None):
2523 def filectx(self, path, filelog=None):
2516 return self._originalctx.filectx(path, filelog=filelog)
2524 return self._originalctx.filectx(path, filelog=filelog)
2517
2525
2518 def commit(self):
2526 def commit(self):
2519 """commit context to the repo"""
2527 """commit context to the repo"""
2520 return self._repo.commitctx(self)
2528 return self._repo.commitctx(self)
2521
2529
2522 @property
2530 @property
2523 def _manifest(self):
2531 def _manifest(self):
2524 return self._originalctx.manifest()
2532 return self._originalctx.manifest()
2525
2533
2526 @propertycache
2534 @propertycache
2527 def _status(self):
2535 def _status(self):
2528 """Calculate exact status from ``files`` specified in the ``origctx``
2536 """Calculate exact status from ``files`` specified in the ``origctx``
2529 and parents manifests.
2537 and parents manifests.
2530 """
2538 """
2531 man1 = self.p1().manifest()
2539 man1 = self.p1().manifest()
2532 p2 = self._parents[1]
2540 p2 = self._parents[1]
2533 # "1 < len(self._parents)" can't be used for checking
2541 # "1 < len(self._parents)" can't be used for checking
2534 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2542 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2535 # explicitly initialized by the list, of which length is 2.
2543 # explicitly initialized by the list, of which length is 2.
2536 if p2.node() != nullid:
2544 if p2.node() != nullid:
2537 man2 = p2.manifest()
2545 man2 = p2.manifest()
2538 managing = lambda f: f in man1 or f in man2
2546 managing = lambda f: f in man1 or f in man2
2539 else:
2547 else:
2540 managing = lambda f: f in man1
2548 managing = lambda f: f in man1
2541
2549
2542 modified, added, removed = [], [], []
2550 modified, added, removed = [], [], []
2543 for f in self._files:
2551 for f in self._files:
2544 if not managing(f):
2552 if not managing(f):
2545 added.append(f)
2553 added.append(f)
2546 elif f in self:
2554 elif f in self:
2547 modified.append(f)
2555 modified.append(f)
2548 else:
2556 else:
2549 removed.append(f)
2557 removed.append(f)
2550
2558
2551 return scmutil.status(modified, added, removed, [], [], [], [])
2559 return scmutil.status(modified, added, removed, [], [], [], [])
2552
2560
2553 class arbitraryfilectx(object):
2561 class arbitraryfilectx(object):
2554 """Allows you to use filectx-like functions on a file in an arbitrary
2562 """Allows you to use filectx-like functions on a file in an arbitrary
2555 location on disk, possibly not in the working directory.
2563 location on disk, possibly not in the working directory.
2556 """
2564 """
2557 def __init__(self, path, repo=None):
2565 def __init__(self, path, repo=None):
2558 # Repo is optional because contrib/simplemerge uses this class.
2566 # Repo is optional because contrib/simplemerge uses this class.
2559 self._repo = repo
2567 self._repo = repo
2560 self._path = path
2568 self._path = path
2561
2569
2562 def cmp(self, fctx):
2570 def cmp(self, fctx):
2563 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2571 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2564 # path if either side is a symlink.
2572 # path if either side is a symlink.
2565 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2573 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2566 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2574 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2567 # Add a fast-path for merge if both sides are disk-backed.
2575 # Add a fast-path for merge if both sides are disk-backed.
2568 # Note that filecmp uses the opposite return values (True if same)
2576 # Note that filecmp uses the opposite return values (True if same)
2569 # from our cmp functions (True if different).
2577 # from our cmp functions (True if different).
2570 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2578 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2571 return self.data() != fctx.data()
2579 return self.data() != fctx.data()
2572
2580
2573 def path(self):
2581 def path(self):
2574 return self._path
2582 return self._path
2575
2583
2576 def flags(self):
2584 def flags(self):
2577 return ''
2585 return ''
2578
2586
2579 def data(self):
2587 def data(self):
2580 return util.readfile(self._path)
2588 return util.readfile(self._path)
2581
2589
2582 def decodeddata(self):
2590 def decodeddata(self):
2583 with open(self._path, "rb") as f:
2591 with open(self._path, "rb") as f:
2584 return f.read()
2592 return f.read()
2585
2593
2586 def remove(self):
2594 def remove(self):
2587 util.unlink(self._path)
2595 util.unlink(self._path)
2588
2596
2589 def write(self, data, flags, **kwargs):
2597 def write(self, data, flags, **kwargs):
2590 assert not flags
2598 assert not flags
2591 with open(self._path, "wb") as f:
2599 with open(self._path, "wb") as f:
2592 f.write(data)
2600 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now