##// END OF EJS Templates
context: take advantage of `_descendantrev` in introrev if available...
Boris Feld -
r40729:aee94f0a default
parent child Browse files
Show More
@@ -1,2442 +1,2447 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
298 hunksfilterfn=None):
298 hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
307 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = self._repo.narrowmatch(match)
346 match = self._repo.narrowmatch(match)
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 for l in r:
374 for l in r:
375 l.sort()
375 l.sort()
376
376
377 return r
377 return r
378
378
379 class changectx(basectx):
379 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
382 the repo."""
382 the repo."""
383 def __init__(self, repo, rev, node):
383 def __init__(self, repo, rev, node):
384 super(changectx, self).__init__(repo)
384 super(changectx, self).__init__(repo)
385 self._rev = rev
385 self._rev = rev
386 self._node = node
386 self._node = node
387
387
388 def __hash__(self):
388 def __hash__(self):
389 try:
389 try:
390 return hash(self._rev)
390 return hash(self._rev)
391 except AttributeError:
391 except AttributeError:
392 return id(self)
392 return id(self)
393
393
394 def __nonzero__(self):
394 def __nonzero__(self):
395 return self._rev != nullrev
395 return self._rev != nullrev
396
396
397 __bool__ = __nonzero__
397 __bool__ = __nonzero__
398
398
399 @propertycache
399 @propertycache
400 def _changeset(self):
400 def _changeset(self):
401 return self._repo.changelog.changelogrevision(self.rev())
401 return self._repo.changelog.changelogrevision(self.rev())
402
402
403 @propertycache
403 @propertycache
404 def _manifest(self):
404 def _manifest(self):
405 return self._manifestctx.read()
405 return self._manifestctx.read()
406
406
407 @property
407 @property
408 def _manifestctx(self):
408 def _manifestctx(self):
409 return self._repo.manifestlog[self._changeset.manifest]
409 return self._repo.manifestlog[self._changeset.manifest]
410
410
411 @propertycache
411 @propertycache
412 def _manifestdelta(self):
412 def _manifestdelta(self):
413 return self._manifestctx.readdelta()
413 return self._manifestctx.readdelta()
414
414
415 @propertycache
415 @propertycache
416 def _parents(self):
416 def _parents(self):
417 repo = self._repo
417 repo = self._repo
418 p1, p2 = repo.changelog.parentrevs(self._rev)
418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 if p2 == nullrev:
419 if p2 == nullrev:
420 return [repo[p1]]
420 return [repo[p1]]
421 return [repo[p1], repo[p2]]
421 return [repo[p1], repo[p2]]
422
422
423 def changeset(self):
423 def changeset(self):
424 c = self._changeset
424 c = self._changeset
425 return (
425 return (
426 c.manifest,
426 c.manifest,
427 c.user,
427 c.user,
428 c.date,
428 c.date,
429 c.files,
429 c.files,
430 c.description,
430 c.description,
431 c.extra,
431 c.extra,
432 )
432 )
433 def manifestnode(self):
433 def manifestnode(self):
434 return self._changeset.manifest
434 return self._changeset.manifest
435
435
436 def user(self):
436 def user(self):
437 return self._changeset.user
437 return self._changeset.user
438 def date(self):
438 def date(self):
439 return self._changeset.date
439 return self._changeset.date
440 def files(self):
440 def files(self):
441 return self._changeset.files
441 return self._changeset.files
442 def description(self):
442 def description(self):
443 return self._changeset.description
443 return self._changeset.description
444 def branch(self):
444 def branch(self):
445 return encoding.tolocal(self._changeset.extra.get("branch"))
445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 def closesbranch(self):
446 def closesbranch(self):
447 return 'close' in self._changeset.extra
447 return 'close' in self._changeset.extra
448 def extra(self):
448 def extra(self):
449 """Return a dict of extra information."""
449 """Return a dict of extra information."""
450 return self._changeset.extra
450 return self._changeset.extra
451 def tags(self):
451 def tags(self):
452 """Return a list of byte tag names"""
452 """Return a list of byte tag names"""
453 return self._repo.nodetags(self._node)
453 return self._repo.nodetags(self._node)
454 def bookmarks(self):
454 def bookmarks(self):
455 """Return a list of byte bookmark names."""
455 """Return a list of byte bookmark names."""
456 return self._repo.nodebookmarks(self._node)
456 return self._repo.nodebookmarks(self._node)
457 def phase(self):
457 def phase(self):
458 return self._repo._phasecache.phase(self._repo, self._rev)
458 return self._repo._phasecache.phase(self._repo, self._rev)
459 def hidden(self):
459 def hidden(self):
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461
461
462 def isinmemory(self):
462 def isinmemory(self):
463 return False
463 return False
464
464
465 def children(self):
465 def children(self):
466 """return list of changectx contexts for each child changeset.
466 """return list of changectx contexts for each child changeset.
467
467
468 This returns only the immediate child changesets. Use descendants() to
468 This returns only the immediate child changesets. Use descendants() to
469 recursively walk children.
469 recursively walk children.
470 """
470 """
471 c = self._repo.changelog.children(self._node)
471 c = self._repo.changelog.children(self._node)
472 return [self._repo[x] for x in c]
472 return [self._repo[x] for x in c]
473
473
474 def ancestors(self):
474 def ancestors(self):
475 for a in self._repo.changelog.ancestors([self._rev]):
475 for a in self._repo.changelog.ancestors([self._rev]):
476 yield self._repo[a]
476 yield self._repo[a]
477
477
478 def descendants(self):
478 def descendants(self):
479 """Recursively yield all children of the changeset.
479 """Recursively yield all children of the changeset.
480
480
481 For just the immediate children, use children()
481 For just the immediate children, use children()
482 """
482 """
483 for d in self._repo.changelog.descendants([self._rev]):
483 for d in self._repo.changelog.descendants([self._rev]):
484 yield self._repo[d]
484 yield self._repo[d]
485
485
486 def filectx(self, path, fileid=None, filelog=None):
486 def filectx(self, path, fileid=None, filelog=None):
487 """get a file context from this changeset"""
487 """get a file context from this changeset"""
488 if fileid is None:
488 if fileid is None:
489 fileid = self.filenode(path)
489 fileid = self.filenode(path)
490 return filectx(self._repo, path, fileid=fileid,
490 return filectx(self._repo, path, fileid=fileid,
491 changectx=self, filelog=filelog)
491 changectx=self, filelog=filelog)
492
492
493 def ancestor(self, c2, warn=False):
493 def ancestor(self, c2, warn=False):
494 """return the "best" ancestor context of self and c2
494 """return the "best" ancestor context of self and c2
495
495
496 If there are multiple candidates, it will show a message and check
496 If there are multiple candidates, it will show a message and check
497 merge.preferancestor configuration before falling back to the
497 merge.preferancestor configuration before falling back to the
498 revlog ancestor."""
498 revlog ancestor."""
499 # deal with workingctxs
499 # deal with workingctxs
500 n2 = c2._node
500 n2 = c2._node
501 if n2 is None:
501 if n2 is None:
502 n2 = c2._parents[0]._node
502 n2 = c2._parents[0]._node
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 if not cahs:
504 if not cahs:
505 anc = nullid
505 anc = nullid
506 elif len(cahs) == 1:
506 elif len(cahs) == 1:
507 anc = cahs[0]
507 anc = cahs[0]
508 else:
508 else:
509 # experimental config: merge.preferancestor
509 # experimental config: merge.preferancestor
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 try:
511 try:
512 ctx = scmutil.revsymbol(self._repo, r)
512 ctx = scmutil.revsymbol(self._repo, r)
513 except error.RepoLookupError:
513 except error.RepoLookupError:
514 continue
514 continue
515 anc = ctx.node()
515 anc = ctx.node()
516 if anc in cahs:
516 if anc in cahs:
517 break
517 break
518 else:
518 else:
519 anc = self._repo.changelog.ancestor(self._node, n2)
519 anc = self._repo.changelog.ancestor(self._node, n2)
520 if warn:
520 if warn:
521 self._repo.ui.status(
521 self._repo.ui.status(
522 (_("note: using %s as ancestor of %s and %s\n") %
522 (_("note: using %s as ancestor of %s and %s\n") %
523 (short(anc), short(self._node), short(n2))) +
523 (short(anc), short(self._node), short(n2))) +
524 ''.join(_(" alternatively, use --config "
524 ''.join(_(" alternatively, use --config "
525 "merge.preferancestor=%s\n") %
525 "merge.preferancestor=%s\n") %
526 short(n) for n in sorted(cahs) if n != anc))
526 short(n) for n in sorted(cahs) if n != anc))
527 return self._repo[anc]
527 return self._repo[anc]
528
528
529 def isancestorof(self, other):
529 def isancestorof(self, other):
530 """True if this changeset is an ancestor of other"""
530 """True if this changeset is an ancestor of other"""
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532
532
533 def walk(self, match):
533 def walk(self, match):
534 '''Generates matching file names.'''
534 '''Generates matching file names.'''
535
535
536 # Wrap match.bad method to have message with nodeid
536 # Wrap match.bad method to have message with nodeid
537 def bad(fn, msg):
537 def bad(fn, msg):
538 # The manifest doesn't know about subrepos, so don't complain about
538 # The manifest doesn't know about subrepos, so don't complain about
539 # paths into valid subrepos.
539 # paths into valid subrepos.
540 if any(fn == s or fn.startswith(s + '/')
540 if any(fn == s or fn.startswith(s + '/')
541 for s in self.substate):
541 for s in self.substate):
542 return
542 return
543 match.bad(fn, _('no such file in rev %s') % self)
543 match.bad(fn, _('no such file in rev %s') % self)
544
544
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 return self._manifest.walk(m)
546 return self._manifest.walk(m)
547
547
548 def matches(self, match):
548 def matches(self, match):
549 return self.walk(match)
549 return self.walk(match)
550
550
551 class basefilectx(object):
551 class basefilectx(object):
552 """A filecontext object represents the common logic for its children:
552 """A filecontext object represents the common logic for its children:
553 filectx: read-only access to a filerevision that is already present
553 filectx: read-only access to a filerevision that is already present
554 in the repo,
554 in the repo,
555 workingfilectx: a filecontext that represents files from the working
555 workingfilectx: a filecontext that represents files from the working
556 directory,
556 directory,
557 memfilectx: a filecontext that represents files in-memory,
557 memfilectx: a filecontext that represents files in-memory,
558 """
558 """
559 @propertycache
559 @propertycache
560 def _filelog(self):
560 def _filelog(self):
561 return self._repo.file(self._path)
561 return self._repo.file(self._path)
562
562
563 @propertycache
563 @propertycache
564 def _changeid(self):
564 def _changeid(self):
565 if r'_changectx' in self.__dict__:
565 if r'_changectx' in self.__dict__:
566 return self._changectx.rev()
566 return self._changectx.rev()
567 elif r'_descendantrev' in self.__dict__:
567 elif r'_descendantrev' in self.__dict__:
568 # this file context was created from a revision with a known
568 # this file context was created from a revision with a known
569 # descendant, we can (lazily) correct for linkrev aliases
569 # descendant, we can (lazily) correct for linkrev aliases
570 return self._adjustlinkrev(self._descendantrev)
570 return self._adjustlinkrev(self._descendantrev)
571 else:
571 else:
572 return self._filelog.linkrev(self._filerev)
572 return self._filelog.linkrev(self._filerev)
573
573
574 @propertycache
574 @propertycache
575 def _filenode(self):
575 def _filenode(self):
576 if r'_fileid' in self.__dict__:
576 if r'_fileid' in self.__dict__:
577 return self._filelog.lookup(self._fileid)
577 return self._filelog.lookup(self._fileid)
578 else:
578 else:
579 return self._changectx.filenode(self._path)
579 return self._changectx.filenode(self._path)
580
580
581 @propertycache
581 @propertycache
582 def _filerev(self):
582 def _filerev(self):
583 return self._filelog.rev(self._filenode)
583 return self._filelog.rev(self._filenode)
584
584
585 @propertycache
585 @propertycache
586 def _repopath(self):
586 def _repopath(self):
587 return self._path
587 return self._path
588
588
589 def __nonzero__(self):
589 def __nonzero__(self):
590 try:
590 try:
591 self._filenode
591 self._filenode
592 return True
592 return True
593 except error.LookupError:
593 except error.LookupError:
594 # file is missing
594 # file is missing
595 return False
595 return False
596
596
597 __bool__ = __nonzero__
597 __bool__ = __nonzero__
598
598
599 def __bytes__(self):
599 def __bytes__(self):
600 try:
600 try:
601 return "%s@%s" % (self.path(), self._changectx)
601 return "%s@%s" % (self.path(), self._changectx)
602 except error.LookupError:
602 except error.LookupError:
603 return "%s@???" % self.path()
603 return "%s@???" % self.path()
604
604
605 __str__ = encoding.strmethod(__bytes__)
605 __str__ = encoding.strmethod(__bytes__)
606
606
607 def __repr__(self):
607 def __repr__(self):
608 return r"<%s %s>" % (type(self).__name__, str(self))
608 return r"<%s %s>" % (type(self).__name__, str(self))
609
609
610 def __hash__(self):
610 def __hash__(self):
611 try:
611 try:
612 return hash((self._path, self._filenode))
612 return hash((self._path, self._filenode))
613 except AttributeError:
613 except AttributeError:
614 return id(self)
614 return id(self)
615
615
616 def __eq__(self, other):
616 def __eq__(self, other):
617 try:
617 try:
618 return (type(self) == type(other) and self._path == other._path
618 return (type(self) == type(other) and self._path == other._path
619 and self._filenode == other._filenode)
619 and self._filenode == other._filenode)
620 except AttributeError:
620 except AttributeError:
621 return False
621 return False
622
622
623 def __ne__(self, other):
623 def __ne__(self, other):
624 return not (self == other)
624 return not (self == other)
625
625
626 def filerev(self):
626 def filerev(self):
627 return self._filerev
627 return self._filerev
628 def filenode(self):
628 def filenode(self):
629 return self._filenode
629 return self._filenode
630 @propertycache
630 @propertycache
631 def _flags(self):
631 def _flags(self):
632 return self._changectx.flags(self._path)
632 return self._changectx.flags(self._path)
633 def flags(self):
633 def flags(self):
634 return self._flags
634 return self._flags
635 def filelog(self):
635 def filelog(self):
636 return self._filelog
636 return self._filelog
637 def rev(self):
637 def rev(self):
638 return self._changeid
638 return self._changeid
639 def linkrev(self):
639 def linkrev(self):
640 return self._filelog.linkrev(self._filerev)
640 return self._filelog.linkrev(self._filerev)
641 def node(self):
641 def node(self):
642 return self._changectx.node()
642 return self._changectx.node()
643 def hex(self):
643 def hex(self):
644 return self._changectx.hex()
644 return self._changectx.hex()
645 def user(self):
645 def user(self):
646 return self._changectx.user()
646 return self._changectx.user()
647 def date(self):
647 def date(self):
648 return self._changectx.date()
648 return self._changectx.date()
649 def files(self):
649 def files(self):
650 return self._changectx.files()
650 return self._changectx.files()
651 def description(self):
651 def description(self):
652 return self._changectx.description()
652 return self._changectx.description()
653 def branch(self):
653 def branch(self):
654 return self._changectx.branch()
654 return self._changectx.branch()
655 def extra(self):
655 def extra(self):
656 return self._changectx.extra()
656 return self._changectx.extra()
657 def phase(self):
657 def phase(self):
658 return self._changectx.phase()
658 return self._changectx.phase()
659 def phasestr(self):
659 def phasestr(self):
660 return self._changectx.phasestr()
660 return self._changectx.phasestr()
661 def obsolete(self):
661 def obsolete(self):
662 return self._changectx.obsolete()
662 return self._changectx.obsolete()
663 def instabilities(self):
663 def instabilities(self):
664 return self._changectx.instabilities()
664 return self._changectx.instabilities()
665 def manifest(self):
665 def manifest(self):
666 return self._changectx.manifest()
666 return self._changectx.manifest()
667 def changectx(self):
667 def changectx(self):
668 return self._changectx
668 return self._changectx
669 def renamed(self):
669 def renamed(self):
670 return self._copied
670 return self._copied
671 def repo(self):
671 def repo(self):
672 return self._repo
672 return self._repo
673 def size(self):
673 def size(self):
674 return len(self.data())
674 return len(self.data())
675
675
676 def path(self):
676 def path(self):
677 return self._path
677 return self._path
678
678
679 def isbinary(self):
679 def isbinary(self):
680 try:
680 try:
681 return stringutil.binary(self.data())
681 return stringutil.binary(self.data())
682 except IOError:
682 except IOError:
683 return False
683 return False
684 def isexec(self):
684 def isexec(self):
685 return 'x' in self.flags()
685 return 'x' in self.flags()
686 def islink(self):
686 def islink(self):
687 return 'l' in self.flags()
687 return 'l' in self.flags()
688
688
689 def isabsent(self):
689 def isabsent(self):
690 """whether this filectx represents a file not in self._changectx
690 """whether this filectx represents a file not in self._changectx
691
691
692 This is mainly for merge code to detect change/delete conflicts. This is
692 This is mainly for merge code to detect change/delete conflicts. This is
693 expected to be True for all subclasses of basectx."""
693 expected to be True for all subclasses of basectx."""
694 return False
694 return False
695
695
696 _customcmp = False
696 _customcmp = False
697 def cmp(self, fctx):
697 def cmp(self, fctx):
698 """compare with other file context
698 """compare with other file context
699
699
700 returns True if different than fctx.
700 returns True if different than fctx.
701 """
701 """
702 if fctx._customcmp:
702 if fctx._customcmp:
703 return fctx.cmp(self)
703 return fctx.cmp(self)
704
704
705 if (fctx._filenode is None
705 if (fctx._filenode is None
706 and (self._repo._encodefilterpats
706 and (self._repo._encodefilterpats
707 # if file data starts with '\1\n', empty metadata block is
707 # if file data starts with '\1\n', empty metadata block is
708 # prepended, which adds 4 bytes to filelog.size().
708 # prepended, which adds 4 bytes to filelog.size().
709 or self.size() - 4 == fctx.size())
709 or self.size() - 4 == fctx.size())
710 or self.size() == fctx.size()):
710 or self.size() == fctx.size()):
711 return self._filelog.cmp(self._filenode, fctx.data())
711 return self._filelog.cmp(self._filenode, fctx.data())
712
712
713 return True
713 return True
714
714
715 def _adjustlinkrev(self, srcrev, inclusive=False):
715 def _adjustlinkrev(self, srcrev, inclusive=False):
716 """return the first ancestor of <srcrev> introducing <fnode>
716 """return the first ancestor of <srcrev> introducing <fnode>
717
717
718 If the linkrev of the file revision does not point to an ancestor of
718 If the linkrev of the file revision does not point to an ancestor of
719 srcrev, we'll walk down the ancestors until we find one introducing
719 srcrev, we'll walk down the ancestors until we find one introducing
720 this file revision.
720 this file revision.
721
721
722 :srcrev: the changeset revision we search ancestors from
722 :srcrev: the changeset revision we search ancestors from
723 :inclusive: if true, the src revision will also be checked
723 :inclusive: if true, the src revision will also be checked
724 """
724 """
725 repo = self._repo
725 repo = self._repo
726 cl = repo.unfiltered().changelog
726 cl = repo.unfiltered().changelog
727 mfl = repo.manifestlog
727 mfl = repo.manifestlog
728 # fetch the linkrev
728 # fetch the linkrev
729 lkr = self.linkrev()
729 lkr = self.linkrev()
730 if srcrev == lkr:
730 if srcrev == lkr:
731 return lkr
731 return lkr
732 # hack to reuse ancestor computation when searching for renames
732 # hack to reuse ancestor computation when searching for renames
733 memberanc = getattr(self, '_ancestrycontext', None)
733 memberanc = getattr(self, '_ancestrycontext', None)
734 iteranc = None
734 iteranc = None
735 if srcrev is None:
735 if srcrev is None:
736 # wctx case, used by workingfilectx during mergecopy
736 # wctx case, used by workingfilectx during mergecopy
737 revs = [p.rev() for p in self._repo[None].parents()]
737 revs = [p.rev() for p in self._repo[None].parents()]
738 inclusive = True # we skipped the real (revless) source
738 inclusive = True # we skipped the real (revless) source
739 else:
739 else:
740 revs = [srcrev]
740 revs = [srcrev]
741 if memberanc is None:
741 if memberanc is None:
742 memberanc = iteranc = cl.ancestors(revs, lkr,
742 memberanc = iteranc = cl.ancestors(revs, lkr,
743 inclusive=inclusive)
743 inclusive=inclusive)
744 # check if this linkrev is an ancestor of srcrev
744 # check if this linkrev is an ancestor of srcrev
745 if lkr not in memberanc:
745 if lkr not in memberanc:
746 if iteranc is None:
746 if iteranc is None:
747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
748 fnode = self._filenode
748 fnode = self._filenode
749 path = self._path
749 path = self._path
750 for a in iteranc:
750 for a in iteranc:
751 ac = cl.read(a) # get changeset data (we avoid object creation)
751 ac = cl.read(a) # get changeset data (we avoid object creation)
752 if path in ac[3]: # checking the 'files' field.
752 if path in ac[3]: # checking the 'files' field.
753 # The file has been touched, check if the content is
753 # The file has been touched, check if the content is
754 # similar to the one we search for.
754 # similar to the one we search for.
755 if fnode == mfl[ac[0]].readfast().get(path):
755 if fnode == mfl[ac[0]].readfast().get(path):
756 return a
756 return a
757 # In theory, we should never get out of that loop without a result.
757 # In theory, we should never get out of that loop without a result.
758 # But if manifest uses a buggy file revision (not children of the
758 # But if manifest uses a buggy file revision (not children of the
759 # one it replaces) we could. Such a buggy situation will likely
759 # one it replaces) we could. Such a buggy situation will likely
760 # result is crash somewhere else at to some point.
760 # result is crash somewhere else at to some point.
761 return lkr
761 return lkr
762
762
763 def introrev(self):
763 def introrev(self):
764 """return the rev of the changeset which introduced this file revision
764 """return the rev of the changeset which introduced this file revision
765
765
766 This method is different from linkrev because it take into account the
766 This method is different from linkrev because it take into account the
767 changeset the filectx was created from. It ensures the returned
767 changeset the filectx was created from. It ensures the returned
768 revision is one of its ancestors. This prevents bugs from
768 revision is one of its ancestors. This prevents bugs from
769 'linkrev-shadowing' when a file revision is used by multiple
769 'linkrev-shadowing' when a file revision is used by multiple
770 changesets.
770 changesets.
771 """
771 """
772 toprev = None
772 toprev = None
773 attrs = vars(self)
773 attrs = vars(self)
774 if r'_changeid' in attrs:
774 if r'_changeid' in attrs:
775 # We have a cached value already
775 # We have a cached value already
776 toprev = self._changeid
776 toprev = self._changeid
777 elif r'_changectx' in attrs:
777 elif r'_changectx' in attrs:
778 # We know which changelog entry we are coming from
778 # We know which changelog entry we are coming from
779 toprev = self._changectx.rev()
779 toprev = self._changectx.rev()
780
780
781 if toprev is not None:
781 if toprev is not None:
782 return self._adjustlinkrev(toprev, inclusive=True)
782 return self._adjustlinkrev(toprev, inclusive=True)
783 elif r'_descendantrev' in attrs:
784 introrev = self._adjustlinkrev(self._descendantrev)
785 # be nice and cache the result of the computation
786 self._changeid = introrev
787 return introrev
783 else:
788 else:
784 return self.linkrev()
789 return self.linkrev()
785
790
786 def introfilectx(self):
791 def introfilectx(self):
787 """Return filectx having identical contents, but pointing to the
792 """Return filectx having identical contents, but pointing to the
788 changeset revision where this filectx was introduced"""
793 changeset revision where this filectx was introduced"""
789 introrev = self.introrev()
794 introrev = self.introrev()
790 if self.rev() == introrev:
795 if self.rev() == introrev:
791 return self
796 return self
792 return self.filectx(self.filenode(), changeid=introrev)
797 return self.filectx(self.filenode(), changeid=introrev)
793
798
794 def _parentfilectx(self, path, fileid, filelog):
799 def _parentfilectx(self, path, fileid, filelog):
795 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
800 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
796 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
801 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
797 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
802 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
798 # If self is associated with a changeset (probably explicitly
803 # If self is associated with a changeset (probably explicitly
799 # fed), ensure the created filectx is associated with a
804 # fed), ensure the created filectx is associated with a
800 # changeset that is an ancestor of self.changectx.
805 # changeset that is an ancestor of self.changectx.
801 # This lets us later use _adjustlinkrev to get a correct link.
806 # This lets us later use _adjustlinkrev to get a correct link.
802 fctx._descendantrev = self.rev()
807 fctx._descendantrev = self.rev()
803 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
808 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
804 elif r'_descendantrev' in vars(self):
809 elif r'_descendantrev' in vars(self):
805 # Otherwise propagate _descendantrev if we have one associated.
810 # Otherwise propagate _descendantrev if we have one associated.
806 fctx._descendantrev = self._descendantrev
811 fctx._descendantrev = self._descendantrev
807 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
812 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
808 return fctx
813 return fctx
809
814
810 def parents(self):
815 def parents(self):
811 _path = self._path
816 _path = self._path
812 fl = self._filelog
817 fl = self._filelog
813 parents = self._filelog.parents(self._filenode)
818 parents = self._filelog.parents(self._filenode)
814 pl = [(_path, node, fl) for node in parents if node != nullid]
819 pl = [(_path, node, fl) for node in parents if node != nullid]
815
820
816 r = fl.renamed(self._filenode)
821 r = fl.renamed(self._filenode)
817 if r:
822 if r:
818 # - In the simple rename case, both parent are nullid, pl is empty.
823 # - In the simple rename case, both parent are nullid, pl is empty.
819 # - In case of merge, only one of the parent is null id and should
824 # - In case of merge, only one of the parent is null id and should
820 # be replaced with the rename information. This parent is -always-
825 # be replaced with the rename information. This parent is -always-
821 # the first one.
826 # the first one.
822 #
827 #
823 # As null id have always been filtered out in the previous list
828 # As null id have always been filtered out in the previous list
824 # comprehension, inserting to 0 will always result in "replacing
829 # comprehension, inserting to 0 will always result in "replacing
825 # first nullid parent with rename information.
830 # first nullid parent with rename information.
826 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
827
832
828 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
833 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
829
834
830 def p1(self):
835 def p1(self):
831 return self.parents()[0]
836 return self.parents()[0]
832
837
833 def p2(self):
838 def p2(self):
834 p = self.parents()
839 p = self.parents()
835 if len(p) == 2:
840 if len(p) == 2:
836 return p[1]
841 return p[1]
837 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
842 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
838
843
839 def annotate(self, follow=False, skiprevs=None, diffopts=None):
844 def annotate(self, follow=False, skiprevs=None, diffopts=None):
840 """Returns a list of annotateline objects for each line in the file
845 """Returns a list of annotateline objects for each line in the file
841
846
842 - line.fctx is the filectx of the node where that line was last changed
847 - line.fctx is the filectx of the node where that line was last changed
843 - line.lineno is the line number at the first appearance in the managed
848 - line.lineno is the line number at the first appearance in the managed
844 file
849 file
845 - line.text is the data on that line (including newline character)
850 - line.text is the data on that line (including newline character)
846 """
851 """
847 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
852 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
848
853
849 def parents(f):
854 def parents(f):
850 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
855 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
851 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
856 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
852 # from the topmost introrev (= srcrev) down to p.linkrev() if it
857 # from the topmost introrev (= srcrev) down to p.linkrev() if it
853 # isn't an ancestor of the srcrev.
858 # isn't an ancestor of the srcrev.
854 f._changeid
859 f._changeid
855 pl = f.parents()
860 pl = f.parents()
856
861
857 # Don't return renamed parents if we aren't following.
862 # Don't return renamed parents if we aren't following.
858 if not follow:
863 if not follow:
859 pl = [p for p in pl if p.path() == f.path()]
864 pl = [p for p in pl if p.path() == f.path()]
860
865
861 # renamed filectx won't have a filelog yet, so set it
866 # renamed filectx won't have a filelog yet, so set it
862 # from the cache to save time
867 # from the cache to save time
863 for p in pl:
868 for p in pl:
864 if not r'_filelog' in p.__dict__:
869 if not r'_filelog' in p.__dict__:
865 p._filelog = getlog(p.path())
870 p._filelog = getlog(p.path())
866
871
867 return pl
872 return pl
868
873
869 # use linkrev to find the first changeset where self appeared
874 # use linkrev to find the first changeset where self appeared
870 base = self.introfilectx()
875 base = self.introfilectx()
871 if getattr(base, '_ancestrycontext', None) is None:
876 if getattr(base, '_ancestrycontext', None) is None:
872 cl = self._repo.changelog
877 cl = self._repo.changelog
873 if base.rev() is None:
878 if base.rev() is None:
874 # wctx is not inclusive, but works because _ancestrycontext
879 # wctx is not inclusive, but works because _ancestrycontext
875 # is used to test filelog revisions
880 # is used to test filelog revisions
876 ac = cl.ancestors([p.rev() for p in base.parents()],
881 ac = cl.ancestors([p.rev() for p in base.parents()],
877 inclusive=True)
882 inclusive=True)
878 else:
883 else:
879 ac = cl.ancestors([base.rev()], inclusive=True)
884 ac = cl.ancestors([base.rev()], inclusive=True)
880 base._ancestrycontext = ac
885 base._ancestrycontext = ac
881
886
882 return dagop.annotate(base, parents, skiprevs=skiprevs,
887 return dagop.annotate(base, parents, skiprevs=skiprevs,
883 diffopts=diffopts)
888 diffopts=diffopts)
884
889
885 def ancestors(self, followfirst=False):
890 def ancestors(self, followfirst=False):
886 visit = {}
891 visit = {}
887 c = self
892 c = self
888 if followfirst:
893 if followfirst:
889 cut = 1
894 cut = 1
890 else:
895 else:
891 cut = None
896 cut = None
892
897
893 while True:
898 while True:
894 for parent in c.parents()[:cut]:
899 for parent in c.parents()[:cut]:
895 visit[(parent.linkrev(), parent.filenode())] = parent
900 visit[(parent.linkrev(), parent.filenode())] = parent
896 if not visit:
901 if not visit:
897 break
902 break
898 c = visit.pop(max(visit))
903 c = visit.pop(max(visit))
899 yield c
904 yield c
900
905
901 def decodeddata(self):
906 def decodeddata(self):
902 """Returns `data()` after running repository decoding filters.
907 """Returns `data()` after running repository decoding filters.
903
908
904 This is often equivalent to how the data would be expressed on disk.
909 This is often equivalent to how the data would be expressed on disk.
905 """
910 """
906 return self._repo.wwritedata(self.path(), self.data())
911 return self._repo.wwritedata(self.path(), self.data())
907
912
908 class filectx(basefilectx):
913 class filectx(basefilectx):
909 """A filecontext object makes access to data related to a particular
914 """A filecontext object makes access to data related to a particular
910 filerevision convenient."""
915 filerevision convenient."""
911 def __init__(self, repo, path, changeid=None, fileid=None,
916 def __init__(self, repo, path, changeid=None, fileid=None,
912 filelog=None, changectx=None):
917 filelog=None, changectx=None):
913 """changeid must be a revision number, if specified.
918 """changeid must be a revision number, if specified.
914 fileid can be a file revision or node."""
919 fileid can be a file revision or node."""
915 self._repo = repo
920 self._repo = repo
916 self._path = path
921 self._path = path
917
922
918 assert (changeid is not None
923 assert (changeid is not None
919 or fileid is not None
924 or fileid is not None
920 or changectx is not None), \
925 or changectx is not None), \
921 ("bad args: changeid=%r, fileid=%r, changectx=%r"
926 ("bad args: changeid=%r, fileid=%r, changectx=%r"
922 % (changeid, fileid, changectx))
927 % (changeid, fileid, changectx))
923
928
924 if filelog is not None:
929 if filelog is not None:
925 self._filelog = filelog
930 self._filelog = filelog
926
931
927 if changeid is not None:
932 if changeid is not None:
928 self._changeid = changeid
933 self._changeid = changeid
929 if changectx is not None:
934 if changectx is not None:
930 self._changectx = changectx
935 self._changectx = changectx
931 if fileid is not None:
936 if fileid is not None:
932 self._fileid = fileid
937 self._fileid = fileid
933
938
934 @propertycache
939 @propertycache
935 def _changectx(self):
940 def _changectx(self):
936 try:
941 try:
937 return self._repo[self._changeid]
942 return self._repo[self._changeid]
938 except error.FilteredRepoLookupError:
943 except error.FilteredRepoLookupError:
939 # Linkrev may point to any revision in the repository. When the
944 # Linkrev may point to any revision in the repository. When the
940 # repository is filtered this may lead to `filectx` trying to build
945 # repository is filtered this may lead to `filectx` trying to build
941 # `changectx` for filtered revision. In such case we fallback to
946 # `changectx` for filtered revision. In such case we fallback to
942 # creating `changectx` on the unfiltered version of the reposition.
947 # creating `changectx` on the unfiltered version of the reposition.
943 # This fallback should not be an issue because `changectx` from
948 # This fallback should not be an issue because `changectx` from
944 # `filectx` are not used in complex operations that care about
949 # `filectx` are not used in complex operations that care about
945 # filtering.
950 # filtering.
946 #
951 #
947 # This fallback is a cheap and dirty fix that prevent several
952 # This fallback is a cheap and dirty fix that prevent several
948 # crashes. It does not ensure the behavior is correct. However the
953 # crashes. It does not ensure the behavior is correct. However the
949 # behavior was not correct before filtering either and "incorrect
954 # behavior was not correct before filtering either and "incorrect
950 # behavior" is seen as better as "crash"
955 # behavior" is seen as better as "crash"
951 #
956 #
952 # Linkrevs have several serious troubles with filtering that are
957 # Linkrevs have several serious troubles with filtering that are
953 # complicated to solve. Proper handling of the issue here should be
958 # complicated to solve. Proper handling of the issue here should be
954 # considered when solving linkrev issue are on the table.
959 # considered when solving linkrev issue are on the table.
955 return self._repo.unfiltered()[self._changeid]
960 return self._repo.unfiltered()[self._changeid]
956
961
957 def filectx(self, fileid, changeid=None):
962 def filectx(self, fileid, changeid=None):
958 '''opens an arbitrary revision of the file without
963 '''opens an arbitrary revision of the file without
959 opening a new filelog'''
964 opening a new filelog'''
960 return filectx(self._repo, self._path, fileid=fileid,
965 return filectx(self._repo, self._path, fileid=fileid,
961 filelog=self._filelog, changeid=changeid)
966 filelog=self._filelog, changeid=changeid)
962
967
963 def rawdata(self):
968 def rawdata(self):
964 return self._filelog.revision(self._filenode, raw=True)
969 return self._filelog.revision(self._filenode, raw=True)
965
970
966 def rawflags(self):
971 def rawflags(self):
967 """low-level revlog flags"""
972 """low-level revlog flags"""
968 return self._filelog.flags(self._filerev)
973 return self._filelog.flags(self._filerev)
969
974
970 def data(self):
975 def data(self):
971 try:
976 try:
972 return self._filelog.read(self._filenode)
977 return self._filelog.read(self._filenode)
973 except error.CensoredNodeError:
978 except error.CensoredNodeError:
974 if self._repo.ui.config("censor", "policy") == "ignore":
979 if self._repo.ui.config("censor", "policy") == "ignore":
975 return ""
980 return ""
976 raise error.Abort(_("censored node: %s") % short(self._filenode),
981 raise error.Abort(_("censored node: %s") % short(self._filenode),
977 hint=_("set censor.policy to ignore errors"))
982 hint=_("set censor.policy to ignore errors"))
978
983
979 def size(self):
984 def size(self):
980 return self._filelog.size(self._filerev)
985 return self._filelog.size(self._filerev)
981
986
982 @propertycache
987 @propertycache
983 def _copied(self):
988 def _copied(self):
984 """check if file was actually renamed in this changeset revision
989 """check if file was actually renamed in this changeset revision
985
990
986 If rename logged in file revision, we report copy for changeset only
991 If rename logged in file revision, we report copy for changeset only
987 if file revisions linkrev points back to the changeset in question
992 if file revisions linkrev points back to the changeset in question
988 or both changeset parents contain different file revisions.
993 or both changeset parents contain different file revisions.
989 """
994 """
990
995
991 renamed = self._filelog.renamed(self._filenode)
996 renamed = self._filelog.renamed(self._filenode)
992 if not renamed:
997 if not renamed:
993 return None
998 return None
994
999
995 if self.rev() == self.linkrev():
1000 if self.rev() == self.linkrev():
996 return renamed
1001 return renamed
997
1002
998 name = self.path()
1003 name = self.path()
999 fnode = self._filenode
1004 fnode = self._filenode
1000 for p in self._changectx.parents():
1005 for p in self._changectx.parents():
1001 try:
1006 try:
1002 if fnode == p.filenode(name):
1007 if fnode == p.filenode(name):
1003 return None
1008 return None
1004 except error.LookupError:
1009 except error.LookupError:
1005 pass
1010 pass
1006 return renamed
1011 return renamed
1007
1012
1008 def children(self):
1013 def children(self):
1009 # hard for renames
1014 # hard for renames
1010 c = self._filelog.children(self._filenode)
1015 c = self._filelog.children(self._filenode)
1011 return [filectx(self._repo, self._path, fileid=x,
1016 return [filectx(self._repo, self._path, fileid=x,
1012 filelog=self._filelog) for x in c]
1017 filelog=self._filelog) for x in c]
1013
1018
1014 class committablectx(basectx):
1019 class committablectx(basectx):
1015 """A committablectx object provides common functionality for a context that
1020 """A committablectx object provides common functionality for a context that
1016 wants the ability to commit, e.g. workingctx or memctx."""
1021 wants the ability to commit, e.g. workingctx or memctx."""
1017 def __init__(self, repo, text="", user=None, date=None, extra=None,
1022 def __init__(self, repo, text="", user=None, date=None, extra=None,
1018 changes=None):
1023 changes=None):
1019 super(committablectx, self).__init__(repo)
1024 super(committablectx, self).__init__(repo)
1020 self._rev = None
1025 self._rev = None
1021 self._node = None
1026 self._node = None
1022 self._text = text
1027 self._text = text
1023 if date:
1028 if date:
1024 self._date = dateutil.parsedate(date)
1029 self._date = dateutil.parsedate(date)
1025 if user:
1030 if user:
1026 self._user = user
1031 self._user = user
1027 if changes:
1032 if changes:
1028 self._status = changes
1033 self._status = changes
1029
1034
1030 self._extra = {}
1035 self._extra = {}
1031 if extra:
1036 if extra:
1032 self._extra = extra.copy()
1037 self._extra = extra.copy()
1033 if 'branch' not in self._extra:
1038 if 'branch' not in self._extra:
1034 try:
1039 try:
1035 branch = encoding.fromlocal(self._repo.dirstate.branch())
1040 branch = encoding.fromlocal(self._repo.dirstate.branch())
1036 except UnicodeDecodeError:
1041 except UnicodeDecodeError:
1037 raise error.Abort(_('branch name not in UTF-8!'))
1042 raise error.Abort(_('branch name not in UTF-8!'))
1038 self._extra['branch'] = branch
1043 self._extra['branch'] = branch
1039 if self._extra['branch'] == '':
1044 if self._extra['branch'] == '':
1040 self._extra['branch'] = 'default'
1045 self._extra['branch'] = 'default'
1041
1046
1042 def __bytes__(self):
1047 def __bytes__(self):
1043 return bytes(self._parents[0]) + "+"
1048 return bytes(self._parents[0]) + "+"
1044
1049
1045 __str__ = encoding.strmethod(__bytes__)
1050 __str__ = encoding.strmethod(__bytes__)
1046
1051
1047 def __nonzero__(self):
1052 def __nonzero__(self):
1048 return True
1053 return True
1049
1054
1050 __bool__ = __nonzero__
1055 __bool__ = __nonzero__
1051
1056
1052 def _buildflagfunc(self):
1057 def _buildflagfunc(self):
1053 # Create a fallback function for getting file flags when the
1058 # Create a fallback function for getting file flags when the
1054 # filesystem doesn't support them
1059 # filesystem doesn't support them
1055
1060
1056 copiesget = self._repo.dirstate.copies().get
1061 copiesget = self._repo.dirstate.copies().get
1057 parents = self.parents()
1062 parents = self.parents()
1058 if len(parents) < 2:
1063 if len(parents) < 2:
1059 # when we have one parent, it's easy: copy from parent
1064 # when we have one parent, it's easy: copy from parent
1060 man = parents[0].manifest()
1065 man = parents[0].manifest()
1061 def func(f):
1066 def func(f):
1062 f = copiesget(f, f)
1067 f = copiesget(f, f)
1063 return man.flags(f)
1068 return man.flags(f)
1064 else:
1069 else:
1065 # merges are tricky: we try to reconstruct the unstored
1070 # merges are tricky: we try to reconstruct the unstored
1066 # result from the merge (issue1802)
1071 # result from the merge (issue1802)
1067 p1, p2 = parents
1072 p1, p2 = parents
1068 pa = p1.ancestor(p2)
1073 pa = p1.ancestor(p2)
1069 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1074 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1070
1075
1071 def func(f):
1076 def func(f):
1072 f = copiesget(f, f) # may be wrong for merges with copies
1077 f = copiesget(f, f) # may be wrong for merges with copies
1073 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1078 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1074 if fl1 == fl2:
1079 if fl1 == fl2:
1075 return fl1
1080 return fl1
1076 if fl1 == fla:
1081 if fl1 == fla:
1077 return fl2
1082 return fl2
1078 if fl2 == fla:
1083 if fl2 == fla:
1079 return fl1
1084 return fl1
1080 return '' # punt for conflicts
1085 return '' # punt for conflicts
1081
1086
1082 return func
1087 return func
1083
1088
1084 @propertycache
1089 @propertycache
1085 def _flagfunc(self):
1090 def _flagfunc(self):
1086 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1091 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1087
1092
1088 @propertycache
1093 @propertycache
1089 def _status(self):
1094 def _status(self):
1090 return self._repo.status()
1095 return self._repo.status()
1091
1096
1092 @propertycache
1097 @propertycache
1093 def _user(self):
1098 def _user(self):
1094 return self._repo.ui.username()
1099 return self._repo.ui.username()
1095
1100
1096 @propertycache
1101 @propertycache
1097 def _date(self):
1102 def _date(self):
1098 ui = self._repo.ui
1103 ui = self._repo.ui
1099 date = ui.configdate('devel', 'default-date')
1104 date = ui.configdate('devel', 'default-date')
1100 if date is None:
1105 if date is None:
1101 date = dateutil.makedate()
1106 date = dateutil.makedate()
1102 return date
1107 return date
1103
1108
1104 def subrev(self, subpath):
1109 def subrev(self, subpath):
1105 return None
1110 return None
1106
1111
1107 def manifestnode(self):
1112 def manifestnode(self):
1108 return None
1113 return None
1109 def user(self):
1114 def user(self):
1110 return self._user or self._repo.ui.username()
1115 return self._user or self._repo.ui.username()
1111 def date(self):
1116 def date(self):
1112 return self._date
1117 return self._date
1113 def description(self):
1118 def description(self):
1114 return self._text
1119 return self._text
1115 def files(self):
1120 def files(self):
1116 return sorted(self._status.modified + self._status.added +
1121 return sorted(self._status.modified + self._status.added +
1117 self._status.removed)
1122 self._status.removed)
1118
1123
1119 def modified(self):
1124 def modified(self):
1120 return self._status.modified
1125 return self._status.modified
1121 def added(self):
1126 def added(self):
1122 return self._status.added
1127 return self._status.added
1123 def removed(self):
1128 def removed(self):
1124 return self._status.removed
1129 return self._status.removed
1125 def deleted(self):
1130 def deleted(self):
1126 return self._status.deleted
1131 return self._status.deleted
1127 def branch(self):
1132 def branch(self):
1128 return encoding.tolocal(self._extra['branch'])
1133 return encoding.tolocal(self._extra['branch'])
1129 def closesbranch(self):
1134 def closesbranch(self):
1130 return 'close' in self._extra
1135 return 'close' in self._extra
1131 def extra(self):
1136 def extra(self):
1132 return self._extra
1137 return self._extra
1133
1138
1134 def isinmemory(self):
1139 def isinmemory(self):
1135 return False
1140 return False
1136
1141
1137 def tags(self):
1142 def tags(self):
1138 return []
1143 return []
1139
1144
1140 def bookmarks(self):
1145 def bookmarks(self):
1141 b = []
1146 b = []
1142 for p in self.parents():
1147 for p in self.parents():
1143 b.extend(p.bookmarks())
1148 b.extend(p.bookmarks())
1144 return b
1149 return b
1145
1150
1146 def phase(self):
1151 def phase(self):
1147 phase = phases.draft # default phase to draft
1152 phase = phases.draft # default phase to draft
1148 for p in self.parents():
1153 for p in self.parents():
1149 phase = max(phase, p.phase())
1154 phase = max(phase, p.phase())
1150 return phase
1155 return phase
1151
1156
1152 def hidden(self):
1157 def hidden(self):
1153 return False
1158 return False
1154
1159
1155 def children(self):
1160 def children(self):
1156 return []
1161 return []
1157
1162
1158 def flags(self, path):
1163 def flags(self, path):
1159 if r'_manifest' in self.__dict__:
1164 if r'_manifest' in self.__dict__:
1160 try:
1165 try:
1161 return self._manifest.flags(path)
1166 return self._manifest.flags(path)
1162 except KeyError:
1167 except KeyError:
1163 return ''
1168 return ''
1164
1169
1165 try:
1170 try:
1166 return self._flagfunc(path)
1171 return self._flagfunc(path)
1167 except OSError:
1172 except OSError:
1168 return ''
1173 return ''
1169
1174
1170 def ancestor(self, c2):
1175 def ancestor(self, c2):
1171 """return the "best" ancestor context of self and c2"""
1176 """return the "best" ancestor context of self and c2"""
1172 return self._parents[0].ancestor(c2) # punt on two parents for now
1177 return self._parents[0].ancestor(c2) # punt on two parents for now
1173
1178
1174 def walk(self, match):
1179 def walk(self, match):
1175 '''Generates matching file names.'''
1180 '''Generates matching file names.'''
1176 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1181 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1177 subrepos=sorted(self.substate),
1182 subrepos=sorted(self.substate),
1178 unknown=True, ignored=False))
1183 unknown=True, ignored=False))
1179
1184
1180 def matches(self, match):
1185 def matches(self, match):
1181 match = self._repo.narrowmatch(match)
1186 match = self._repo.narrowmatch(match)
1182 ds = self._repo.dirstate
1187 ds = self._repo.dirstate
1183 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1188 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1184
1189
1185 def ancestors(self):
1190 def ancestors(self):
1186 for p in self._parents:
1191 for p in self._parents:
1187 yield p
1192 yield p
1188 for a in self._repo.changelog.ancestors(
1193 for a in self._repo.changelog.ancestors(
1189 [p.rev() for p in self._parents]):
1194 [p.rev() for p in self._parents]):
1190 yield self._repo[a]
1195 yield self._repo[a]
1191
1196
1192 def markcommitted(self, node):
1197 def markcommitted(self, node):
1193 """Perform post-commit cleanup necessary after committing this ctx
1198 """Perform post-commit cleanup necessary after committing this ctx
1194
1199
1195 Specifically, this updates backing stores this working context
1200 Specifically, this updates backing stores this working context
1196 wraps to reflect the fact that the changes reflected by this
1201 wraps to reflect the fact that the changes reflected by this
1197 workingctx have been committed. For example, it marks
1202 workingctx have been committed. For example, it marks
1198 modified and added files as normal in the dirstate.
1203 modified and added files as normal in the dirstate.
1199
1204
1200 """
1205 """
1201
1206
1202 with self._repo.dirstate.parentchange():
1207 with self._repo.dirstate.parentchange():
1203 for f in self.modified() + self.added():
1208 for f in self.modified() + self.added():
1204 self._repo.dirstate.normal(f)
1209 self._repo.dirstate.normal(f)
1205 for f in self.removed():
1210 for f in self.removed():
1206 self._repo.dirstate.drop(f)
1211 self._repo.dirstate.drop(f)
1207 self._repo.dirstate.setparents(node)
1212 self._repo.dirstate.setparents(node)
1208
1213
1209 # write changes out explicitly, because nesting wlock at
1214 # write changes out explicitly, because nesting wlock at
1210 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1215 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1211 # from immediately doing so for subsequent changing files
1216 # from immediately doing so for subsequent changing files
1212 self._repo.dirstate.write(self._repo.currenttransaction())
1217 self._repo.dirstate.write(self._repo.currenttransaction())
1213
1218
1214 def dirty(self, missing=False, merge=True, branch=True):
1219 def dirty(self, missing=False, merge=True, branch=True):
1215 return False
1220 return False
1216
1221
1217 class workingctx(committablectx):
1222 class workingctx(committablectx):
1218 """A workingctx object makes access to data related to
1223 """A workingctx object makes access to data related to
1219 the current working directory convenient.
1224 the current working directory convenient.
1220 date - any valid date string or (unixtime, offset), or None.
1225 date - any valid date string or (unixtime, offset), or None.
1221 user - username string, or None.
1226 user - username string, or None.
1222 extra - a dictionary of extra values, or None.
1227 extra - a dictionary of extra values, or None.
1223 changes - a list of file lists as returned by localrepo.status()
1228 changes - a list of file lists as returned by localrepo.status()
1224 or None to use the repository status.
1229 or None to use the repository status.
1225 """
1230 """
1226 def __init__(self, repo, text="", user=None, date=None, extra=None,
1231 def __init__(self, repo, text="", user=None, date=None, extra=None,
1227 changes=None):
1232 changes=None):
1228 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1233 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1229
1234
1230 def __iter__(self):
1235 def __iter__(self):
1231 d = self._repo.dirstate
1236 d = self._repo.dirstate
1232 for f in d:
1237 for f in d:
1233 if d[f] != 'r':
1238 if d[f] != 'r':
1234 yield f
1239 yield f
1235
1240
1236 def __contains__(self, key):
1241 def __contains__(self, key):
1237 return self._repo.dirstate[key] not in "?r"
1242 return self._repo.dirstate[key] not in "?r"
1238
1243
1239 def hex(self):
1244 def hex(self):
1240 return hex(wdirid)
1245 return hex(wdirid)
1241
1246
1242 @propertycache
1247 @propertycache
1243 def _parents(self):
1248 def _parents(self):
1244 p = self._repo.dirstate.parents()
1249 p = self._repo.dirstate.parents()
1245 if p[1] == nullid:
1250 if p[1] == nullid:
1246 p = p[:-1]
1251 p = p[:-1]
1247 # use unfiltered repo to delay/avoid loading obsmarkers
1252 # use unfiltered repo to delay/avoid loading obsmarkers
1248 unfi = self._repo.unfiltered()
1253 unfi = self._repo.unfiltered()
1249 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1254 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1250
1255
1251 def _fileinfo(self, path):
1256 def _fileinfo(self, path):
1252 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1257 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1253 self._manifest
1258 self._manifest
1254 return super(workingctx, self)._fileinfo(path)
1259 return super(workingctx, self)._fileinfo(path)
1255
1260
1256 def filectx(self, path, filelog=None):
1261 def filectx(self, path, filelog=None):
1257 """get a file context from the working directory"""
1262 """get a file context from the working directory"""
1258 return workingfilectx(self._repo, path, workingctx=self,
1263 return workingfilectx(self._repo, path, workingctx=self,
1259 filelog=filelog)
1264 filelog=filelog)
1260
1265
1261 def dirty(self, missing=False, merge=True, branch=True):
1266 def dirty(self, missing=False, merge=True, branch=True):
1262 "check whether a working directory is modified"
1267 "check whether a working directory is modified"
1263 # check subrepos first
1268 # check subrepos first
1264 for s in sorted(self.substate):
1269 for s in sorted(self.substate):
1265 if self.sub(s).dirty(missing=missing):
1270 if self.sub(s).dirty(missing=missing):
1266 return True
1271 return True
1267 # check current working dir
1272 # check current working dir
1268 return ((merge and self.p2()) or
1273 return ((merge and self.p2()) or
1269 (branch and self.branch() != self.p1().branch()) or
1274 (branch and self.branch() != self.p1().branch()) or
1270 self.modified() or self.added() or self.removed() or
1275 self.modified() or self.added() or self.removed() or
1271 (missing and self.deleted()))
1276 (missing and self.deleted()))
1272
1277
1273 def add(self, list, prefix=""):
1278 def add(self, list, prefix=""):
1274 with self._repo.wlock():
1279 with self._repo.wlock():
1275 ui, ds = self._repo.ui, self._repo.dirstate
1280 ui, ds = self._repo.ui, self._repo.dirstate
1276 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1281 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1277 rejected = []
1282 rejected = []
1278 lstat = self._repo.wvfs.lstat
1283 lstat = self._repo.wvfs.lstat
1279 for f in list:
1284 for f in list:
1280 # ds.pathto() returns an absolute file when this is invoked from
1285 # ds.pathto() returns an absolute file when this is invoked from
1281 # the keyword extension. That gets flagged as non-portable on
1286 # the keyword extension. That gets flagged as non-portable on
1282 # Windows, since it contains the drive letter and colon.
1287 # Windows, since it contains the drive letter and colon.
1283 scmutil.checkportable(ui, os.path.join(prefix, f))
1288 scmutil.checkportable(ui, os.path.join(prefix, f))
1284 try:
1289 try:
1285 st = lstat(f)
1290 st = lstat(f)
1286 except OSError:
1291 except OSError:
1287 ui.warn(_("%s does not exist!\n") % uipath(f))
1292 ui.warn(_("%s does not exist!\n") % uipath(f))
1288 rejected.append(f)
1293 rejected.append(f)
1289 continue
1294 continue
1290 limit = ui.configbytes('ui', 'large-file-limit')
1295 limit = ui.configbytes('ui', 'large-file-limit')
1291 if limit != 0 and st.st_size > limit:
1296 if limit != 0 and st.st_size > limit:
1292 ui.warn(_("%s: up to %d MB of RAM may be required "
1297 ui.warn(_("%s: up to %d MB of RAM may be required "
1293 "to manage this file\n"
1298 "to manage this file\n"
1294 "(use 'hg revert %s' to cancel the "
1299 "(use 'hg revert %s' to cancel the "
1295 "pending addition)\n")
1300 "pending addition)\n")
1296 % (f, 3 * st.st_size // 1000000, uipath(f)))
1301 % (f, 3 * st.st_size // 1000000, uipath(f)))
1297 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1302 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1298 ui.warn(_("%s not added: only files and symlinks "
1303 ui.warn(_("%s not added: only files and symlinks "
1299 "supported currently\n") % uipath(f))
1304 "supported currently\n") % uipath(f))
1300 rejected.append(f)
1305 rejected.append(f)
1301 elif ds[f] in 'amn':
1306 elif ds[f] in 'amn':
1302 ui.warn(_("%s already tracked!\n") % uipath(f))
1307 ui.warn(_("%s already tracked!\n") % uipath(f))
1303 elif ds[f] == 'r':
1308 elif ds[f] == 'r':
1304 ds.normallookup(f)
1309 ds.normallookup(f)
1305 else:
1310 else:
1306 ds.add(f)
1311 ds.add(f)
1307 return rejected
1312 return rejected
1308
1313
1309 def forget(self, files, prefix=""):
1314 def forget(self, files, prefix=""):
1310 with self._repo.wlock():
1315 with self._repo.wlock():
1311 ds = self._repo.dirstate
1316 ds = self._repo.dirstate
1312 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1317 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1313 rejected = []
1318 rejected = []
1314 for f in files:
1319 for f in files:
1315 if f not in self._repo.dirstate:
1320 if f not in self._repo.dirstate:
1316 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1321 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1317 rejected.append(f)
1322 rejected.append(f)
1318 elif self._repo.dirstate[f] != 'a':
1323 elif self._repo.dirstate[f] != 'a':
1319 self._repo.dirstate.remove(f)
1324 self._repo.dirstate.remove(f)
1320 else:
1325 else:
1321 self._repo.dirstate.drop(f)
1326 self._repo.dirstate.drop(f)
1322 return rejected
1327 return rejected
1323
1328
1324 def undelete(self, list):
1329 def undelete(self, list):
1325 pctxs = self.parents()
1330 pctxs = self.parents()
1326 with self._repo.wlock():
1331 with self._repo.wlock():
1327 ds = self._repo.dirstate
1332 ds = self._repo.dirstate
1328 for f in list:
1333 for f in list:
1329 if self._repo.dirstate[f] != 'r':
1334 if self._repo.dirstate[f] != 'r':
1330 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1335 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1331 else:
1336 else:
1332 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1337 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1333 t = fctx.data()
1338 t = fctx.data()
1334 self._repo.wwrite(f, t, fctx.flags())
1339 self._repo.wwrite(f, t, fctx.flags())
1335 self._repo.dirstate.normal(f)
1340 self._repo.dirstate.normal(f)
1336
1341
1337 def copy(self, source, dest):
1342 def copy(self, source, dest):
1338 try:
1343 try:
1339 st = self._repo.wvfs.lstat(dest)
1344 st = self._repo.wvfs.lstat(dest)
1340 except OSError as err:
1345 except OSError as err:
1341 if err.errno != errno.ENOENT:
1346 if err.errno != errno.ENOENT:
1342 raise
1347 raise
1343 self._repo.ui.warn(_("%s does not exist!\n")
1348 self._repo.ui.warn(_("%s does not exist!\n")
1344 % self._repo.dirstate.pathto(dest))
1349 % self._repo.dirstate.pathto(dest))
1345 return
1350 return
1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1351 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1347 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1352 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1348 "symbolic link\n")
1353 "symbolic link\n")
1349 % self._repo.dirstate.pathto(dest))
1354 % self._repo.dirstate.pathto(dest))
1350 else:
1355 else:
1351 with self._repo.wlock():
1356 with self._repo.wlock():
1352 if self._repo.dirstate[dest] in '?':
1357 if self._repo.dirstate[dest] in '?':
1353 self._repo.dirstate.add(dest)
1358 self._repo.dirstate.add(dest)
1354 elif self._repo.dirstate[dest] in 'r':
1359 elif self._repo.dirstate[dest] in 'r':
1355 self._repo.dirstate.normallookup(dest)
1360 self._repo.dirstate.normallookup(dest)
1356 self._repo.dirstate.copy(source, dest)
1361 self._repo.dirstate.copy(source, dest)
1357
1362
1358 def match(self, pats=None, include=None, exclude=None, default='glob',
1363 def match(self, pats=None, include=None, exclude=None, default='glob',
1359 listsubrepos=False, badfn=None):
1364 listsubrepos=False, badfn=None):
1360 r = self._repo
1365 r = self._repo
1361
1366
1362 # Only a case insensitive filesystem needs magic to translate user input
1367 # Only a case insensitive filesystem needs magic to translate user input
1363 # to actual case in the filesystem.
1368 # to actual case in the filesystem.
1364 icasefs = not util.fscasesensitive(r.root)
1369 icasefs = not util.fscasesensitive(r.root)
1365 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1370 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1366 default, auditor=r.auditor, ctx=self,
1371 default, auditor=r.auditor, ctx=self,
1367 listsubrepos=listsubrepos, badfn=badfn,
1372 listsubrepos=listsubrepos, badfn=badfn,
1368 icasefs=icasefs)
1373 icasefs=icasefs)
1369
1374
1370 def _filtersuspectsymlink(self, files):
1375 def _filtersuspectsymlink(self, files):
1371 if not files or self._repo.dirstate._checklink:
1376 if not files or self._repo.dirstate._checklink:
1372 return files
1377 return files
1373
1378
1374 # Symlink placeholders may get non-symlink-like contents
1379 # Symlink placeholders may get non-symlink-like contents
1375 # via user error or dereferencing by NFS or Samba servers,
1380 # via user error or dereferencing by NFS or Samba servers,
1376 # so we filter out any placeholders that don't look like a
1381 # so we filter out any placeholders that don't look like a
1377 # symlink
1382 # symlink
1378 sane = []
1383 sane = []
1379 for f in files:
1384 for f in files:
1380 if self.flags(f) == 'l':
1385 if self.flags(f) == 'l':
1381 d = self[f].data()
1386 d = self[f].data()
1382 if (d == '' or len(d) >= 1024 or '\n' in d
1387 if (d == '' or len(d) >= 1024 or '\n' in d
1383 or stringutil.binary(d)):
1388 or stringutil.binary(d)):
1384 self._repo.ui.debug('ignoring suspect symlink placeholder'
1389 self._repo.ui.debug('ignoring suspect symlink placeholder'
1385 ' "%s"\n' % f)
1390 ' "%s"\n' % f)
1386 continue
1391 continue
1387 sane.append(f)
1392 sane.append(f)
1388 return sane
1393 return sane
1389
1394
1390 def _checklookup(self, files):
1395 def _checklookup(self, files):
1391 # check for any possibly clean files
1396 # check for any possibly clean files
1392 if not files:
1397 if not files:
1393 return [], [], []
1398 return [], [], []
1394
1399
1395 modified = []
1400 modified = []
1396 deleted = []
1401 deleted = []
1397 fixup = []
1402 fixup = []
1398 pctx = self._parents[0]
1403 pctx = self._parents[0]
1399 # do a full compare of any files that might have changed
1404 # do a full compare of any files that might have changed
1400 for f in sorted(files):
1405 for f in sorted(files):
1401 try:
1406 try:
1402 # This will return True for a file that got replaced by a
1407 # This will return True for a file that got replaced by a
1403 # directory in the interim, but fixing that is pretty hard.
1408 # directory in the interim, but fixing that is pretty hard.
1404 if (f not in pctx or self.flags(f) != pctx.flags(f)
1409 if (f not in pctx or self.flags(f) != pctx.flags(f)
1405 or pctx[f].cmp(self[f])):
1410 or pctx[f].cmp(self[f])):
1406 modified.append(f)
1411 modified.append(f)
1407 else:
1412 else:
1408 fixup.append(f)
1413 fixup.append(f)
1409 except (IOError, OSError):
1414 except (IOError, OSError):
1410 # A file become inaccessible in between? Mark it as deleted,
1415 # A file become inaccessible in between? Mark it as deleted,
1411 # matching dirstate behavior (issue5584).
1416 # matching dirstate behavior (issue5584).
1412 # The dirstate has more complex behavior around whether a
1417 # The dirstate has more complex behavior around whether a
1413 # missing file matches a directory, etc, but we don't need to
1418 # missing file matches a directory, etc, but we don't need to
1414 # bother with that: if f has made it to this point, we're sure
1419 # bother with that: if f has made it to this point, we're sure
1415 # it's in the dirstate.
1420 # it's in the dirstate.
1416 deleted.append(f)
1421 deleted.append(f)
1417
1422
1418 return modified, deleted, fixup
1423 return modified, deleted, fixup
1419
1424
1420 def _poststatusfixup(self, status, fixup):
1425 def _poststatusfixup(self, status, fixup):
1421 """update dirstate for files that are actually clean"""
1426 """update dirstate for files that are actually clean"""
1422 poststatus = self._repo.postdsstatus()
1427 poststatus = self._repo.postdsstatus()
1423 if fixup or poststatus:
1428 if fixup or poststatus:
1424 try:
1429 try:
1425 oldid = self._repo.dirstate.identity()
1430 oldid = self._repo.dirstate.identity()
1426
1431
1427 # updating the dirstate is optional
1432 # updating the dirstate is optional
1428 # so we don't wait on the lock
1433 # so we don't wait on the lock
1429 # wlock can invalidate the dirstate, so cache normal _after_
1434 # wlock can invalidate the dirstate, so cache normal _after_
1430 # taking the lock
1435 # taking the lock
1431 with self._repo.wlock(False):
1436 with self._repo.wlock(False):
1432 if self._repo.dirstate.identity() == oldid:
1437 if self._repo.dirstate.identity() == oldid:
1433 if fixup:
1438 if fixup:
1434 normal = self._repo.dirstate.normal
1439 normal = self._repo.dirstate.normal
1435 for f in fixup:
1440 for f in fixup:
1436 normal(f)
1441 normal(f)
1437 # write changes out explicitly, because nesting
1442 # write changes out explicitly, because nesting
1438 # wlock at runtime may prevent 'wlock.release()'
1443 # wlock at runtime may prevent 'wlock.release()'
1439 # after this block from doing so for subsequent
1444 # after this block from doing so for subsequent
1440 # changing files
1445 # changing files
1441 tr = self._repo.currenttransaction()
1446 tr = self._repo.currenttransaction()
1442 self._repo.dirstate.write(tr)
1447 self._repo.dirstate.write(tr)
1443
1448
1444 if poststatus:
1449 if poststatus:
1445 for ps in poststatus:
1450 for ps in poststatus:
1446 ps(self, status)
1451 ps(self, status)
1447 else:
1452 else:
1448 # in this case, writing changes out breaks
1453 # in this case, writing changes out breaks
1449 # consistency, because .hg/dirstate was
1454 # consistency, because .hg/dirstate was
1450 # already changed simultaneously after last
1455 # already changed simultaneously after last
1451 # caching (see also issue5584 for detail)
1456 # caching (see also issue5584 for detail)
1452 self._repo.ui.debug('skip updating dirstate: '
1457 self._repo.ui.debug('skip updating dirstate: '
1453 'identity mismatch\n')
1458 'identity mismatch\n')
1454 except error.LockError:
1459 except error.LockError:
1455 pass
1460 pass
1456 finally:
1461 finally:
1457 # Even if the wlock couldn't be grabbed, clear out the list.
1462 # Even if the wlock couldn't be grabbed, clear out the list.
1458 self._repo.clearpostdsstatus()
1463 self._repo.clearpostdsstatus()
1459
1464
1460 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1465 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1461 '''Gets the status from the dirstate -- internal use only.'''
1466 '''Gets the status from the dirstate -- internal use only.'''
1462 subrepos = []
1467 subrepos = []
1463 if '.hgsub' in self:
1468 if '.hgsub' in self:
1464 subrepos = sorted(self.substate)
1469 subrepos = sorted(self.substate)
1465 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1470 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1466 clean=clean, unknown=unknown)
1471 clean=clean, unknown=unknown)
1467
1472
1468 # check for any possibly clean files
1473 # check for any possibly clean files
1469 fixup = []
1474 fixup = []
1470 if cmp:
1475 if cmp:
1471 modified2, deleted2, fixup = self._checklookup(cmp)
1476 modified2, deleted2, fixup = self._checklookup(cmp)
1472 s.modified.extend(modified2)
1477 s.modified.extend(modified2)
1473 s.deleted.extend(deleted2)
1478 s.deleted.extend(deleted2)
1474
1479
1475 if fixup and clean:
1480 if fixup and clean:
1476 s.clean.extend(fixup)
1481 s.clean.extend(fixup)
1477
1482
1478 self._poststatusfixup(s, fixup)
1483 self._poststatusfixup(s, fixup)
1479
1484
1480 if match.always():
1485 if match.always():
1481 # cache for performance
1486 # cache for performance
1482 if s.unknown or s.ignored or s.clean:
1487 if s.unknown or s.ignored or s.clean:
1483 # "_status" is cached with list*=False in the normal route
1488 # "_status" is cached with list*=False in the normal route
1484 self._status = scmutil.status(s.modified, s.added, s.removed,
1489 self._status = scmutil.status(s.modified, s.added, s.removed,
1485 s.deleted, [], [], [])
1490 s.deleted, [], [], [])
1486 else:
1491 else:
1487 self._status = s
1492 self._status = s
1488
1493
1489 return s
1494 return s
1490
1495
1491 @propertycache
1496 @propertycache
1492 def _manifest(self):
1497 def _manifest(self):
1493 """generate a manifest corresponding to the values in self._status
1498 """generate a manifest corresponding to the values in self._status
1494
1499
1495 This reuse the file nodeid from parent, but we use special node
1500 This reuse the file nodeid from parent, but we use special node
1496 identifiers for added and modified files. This is used by manifests
1501 identifiers for added and modified files. This is used by manifests
1497 merge to see that files are different and by update logic to avoid
1502 merge to see that files are different and by update logic to avoid
1498 deleting newly added files.
1503 deleting newly added files.
1499 """
1504 """
1500 return self._buildstatusmanifest(self._status)
1505 return self._buildstatusmanifest(self._status)
1501
1506
1502 def _buildstatusmanifest(self, status):
1507 def _buildstatusmanifest(self, status):
1503 """Builds a manifest that includes the given status results."""
1508 """Builds a manifest that includes the given status results."""
1504 parents = self.parents()
1509 parents = self.parents()
1505
1510
1506 man = parents[0].manifest().copy()
1511 man = parents[0].manifest().copy()
1507
1512
1508 ff = self._flagfunc
1513 ff = self._flagfunc
1509 for i, l in ((addednodeid, status.added),
1514 for i, l in ((addednodeid, status.added),
1510 (modifiednodeid, status.modified)):
1515 (modifiednodeid, status.modified)):
1511 for f in l:
1516 for f in l:
1512 man[f] = i
1517 man[f] = i
1513 try:
1518 try:
1514 man.setflag(f, ff(f))
1519 man.setflag(f, ff(f))
1515 except OSError:
1520 except OSError:
1516 pass
1521 pass
1517
1522
1518 for f in status.deleted + status.removed:
1523 for f in status.deleted + status.removed:
1519 if f in man:
1524 if f in man:
1520 del man[f]
1525 del man[f]
1521
1526
1522 return man
1527 return man
1523
1528
1524 def _buildstatus(self, other, s, match, listignored, listclean,
1529 def _buildstatus(self, other, s, match, listignored, listclean,
1525 listunknown):
1530 listunknown):
1526 """build a status with respect to another context
1531 """build a status with respect to another context
1527
1532
1528 This includes logic for maintaining the fast path of status when
1533 This includes logic for maintaining the fast path of status when
1529 comparing the working directory against its parent, which is to skip
1534 comparing the working directory against its parent, which is to skip
1530 building a new manifest if self (working directory) is not comparing
1535 building a new manifest if self (working directory) is not comparing
1531 against its parent (repo['.']).
1536 against its parent (repo['.']).
1532 """
1537 """
1533 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1538 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1534 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1539 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1535 # might have accidentally ended up with the entire contents of the file
1540 # might have accidentally ended up with the entire contents of the file
1536 # they are supposed to be linking to.
1541 # they are supposed to be linking to.
1537 s.modified[:] = self._filtersuspectsymlink(s.modified)
1542 s.modified[:] = self._filtersuspectsymlink(s.modified)
1538 if other != self._repo['.']:
1543 if other != self._repo['.']:
1539 s = super(workingctx, self)._buildstatus(other, s, match,
1544 s = super(workingctx, self)._buildstatus(other, s, match,
1540 listignored, listclean,
1545 listignored, listclean,
1541 listunknown)
1546 listunknown)
1542 return s
1547 return s
1543
1548
1544 def _matchstatus(self, other, match):
1549 def _matchstatus(self, other, match):
1545 """override the match method with a filter for directory patterns
1550 """override the match method with a filter for directory patterns
1546
1551
1547 We use inheritance to customize the match.bad method only in cases of
1552 We use inheritance to customize the match.bad method only in cases of
1548 workingctx since it belongs only to the working directory when
1553 workingctx since it belongs only to the working directory when
1549 comparing against the parent changeset.
1554 comparing against the parent changeset.
1550
1555
1551 If we aren't comparing against the working directory's parent, then we
1556 If we aren't comparing against the working directory's parent, then we
1552 just use the default match object sent to us.
1557 just use the default match object sent to us.
1553 """
1558 """
1554 if other != self._repo['.']:
1559 if other != self._repo['.']:
1555 def bad(f, msg):
1560 def bad(f, msg):
1556 # 'f' may be a directory pattern from 'match.files()',
1561 # 'f' may be a directory pattern from 'match.files()',
1557 # so 'f not in ctx1' is not enough
1562 # so 'f not in ctx1' is not enough
1558 if f not in other and not other.hasdir(f):
1563 if f not in other and not other.hasdir(f):
1559 self._repo.ui.warn('%s: %s\n' %
1564 self._repo.ui.warn('%s: %s\n' %
1560 (self._repo.dirstate.pathto(f), msg))
1565 (self._repo.dirstate.pathto(f), msg))
1561 match.bad = bad
1566 match.bad = bad
1562 return match
1567 return match
1563
1568
1564 def markcommitted(self, node):
1569 def markcommitted(self, node):
1565 super(workingctx, self).markcommitted(node)
1570 super(workingctx, self).markcommitted(node)
1566
1571
1567 sparse.aftercommit(self._repo, node)
1572 sparse.aftercommit(self._repo, node)
1568
1573
1569 class committablefilectx(basefilectx):
1574 class committablefilectx(basefilectx):
1570 """A committablefilectx provides common functionality for a file context
1575 """A committablefilectx provides common functionality for a file context
1571 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1576 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1572 def __init__(self, repo, path, filelog=None, ctx=None):
1577 def __init__(self, repo, path, filelog=None, ctx=None):
1573 self._repo = repo
1578 self._repo = repo
1574 self._path = path
1579 self._path = path
1575 self._changeid = None
1580 self._changeid = None
1576 self._filerev = self._filenode = None
1581 self._filerev = self._filenode = None
1577
1582
1578 if filelog is not None:
1583 if filelog is not None:
1579 self._filelog = filelog
1584 self._filelog = filelog
1580 if ctx:
1585 if ctx:
1581 self._changectx = ctx
1586 self._changectx = ctx
1582
1587
1583 def __nonzero__(self):
1588 def __nonzero__(self):
1584 return True
1589 return True
1585
1590
1586 __bool__ = __nonzero__
1591 __bool__ = __nonzero__
1587
1592
1588 def linkrev(self):
1593 def linkrev(self):
1589 # linked to self._changectx no matter if file is modified or not
1594 # linked to self._changectx no matter if file is modified or not
1590 return self.rev()
1595 return self.rev()
1591
1596
1592 def parents(self):
1597 def parents(self):
1593 '''return parent filectxs, following copies if necessary'''
1598 '''return parent filectxs, following copies if necessary'''
1594 def filenode(ctx, path):
1599 def filenode(ctx, path):
1595 return ctx._manifest.get(path, nullid)
1600 return ctx._manifest.get(path, nullid)
1596
1601
1597 path = self._path
1602 path = self._path
1598 fl = self._filelog
1603 fl = self._filelog
1599 pcl = self._changectx._parents
1604 pcl = self._changectx._parents
1600 renamed = self.renamed()
1605 renamed = self.renamed()
1601
1606
1602 if renamed:
1607 if renamed:
1603 pl = [renamed + (None,)]
1608 pl = [renamed + (None,)]
1604 else:
1609 else:
1605 pl = [(path, filenode(pcl[0], path), fl)]
1610 pl = [(path, filenode(pcl[0], path), fl)]
1606
1611
1607 for pc in pcl[1:]:
1612 for pc in pcl[1:]:
1608 pl.append((path, filenode(pc, path), fl))
1613 pl.append((path, filenode(pc, path), fl))
1609
1614
1610 return [self._parentfilectx(p, fileid=n, filelog=l)
1615 return [self._parentfilectx(p, fileid=n, filelog=l)
1611 for p, n, l in pl if n != nullid]
1616 for p, n, l in pl if n != nullid]
1612
1617
1613 def children(self):
1618 def children(self):
1614 return []
1619 return []
1615
1620
1616 class workingfilectx(committablefilectx):
1621 class workingfilectx(committablefilectx):
1617 """A workingfilectx object makes access to data related to a particular
1622 """A workingfilectx object makes access to data related to a particular
1618 file in the working directory convenient."""
1623 file in the working directory convenient."""
1619 def __init__(self, repo, path, filelog=None, workingctx=None):
1624 def __init__(self, repo, path, filelog=None, workingctx=None):
1620 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1625 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1621
1626
1622 @propertycache
1627 @propertycache
1623 def _changectx(self):
1628 def _changectx(self):
1624 return workingctx(self._repo)
1629 return workingctx(self._repo)
1625
1630
1626 def data(self):
1631 def data(self):
1627 return self._repo.wread(self._path)
1632 return self._repo.wread(self._path)
1628 def renamed(self):
1633 def renamed(self):
1629 rp = self._repo.dirstate.copied(self._path)
1634 rp = self._repo.dirstate.copied(self._path)
1630 if not rp:
1635 if not rp:
1631 return None
1636 return None
1632 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1637 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1633
1638
1634 def size(self):
1639 def size(self):
1635 return self._repo.wvfs.lstat(self._path).st_size
1640 return self._repo.wvfs.lstat(self._path).st_size
1636 def date(self):
1641 def date(self):
1637 t, tz = self._changectx.date()
1642 t, tz = self._changectx.date()
1638 try:
1643 try:
1639 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1644 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1640 except OSError as err:
1645 except OSError as err:
1641 if err.errno != errno.ENOENT:
1646 if err.errno != errno.ENOENT:
1642 raise
1647 raise
1643 return (t, tz)
1648 return (t, tz)
1644
1649
1645 def exists(self):
1650 def exists(self):
1646 return self._repo.wvfs.exists(self._path)
1651 return self._repo.wvfs.exists(self._path)
1647
1652
1648 def lexists(self):
1653 def lexists(self):
1649 return self._repo.wvfs.lexists(self._path)
1654 return self._repo.wvfs.lexists(self._path)
1650
1655
1651 def audit(self):
1656 def audit(self):
1652 return self._repo.wvfs.audit(self._path)
1657 return self._repo.wvfs.audit(self._path)
1653
1658
1654 def cmp(self, fctx):
1659 def cmp(self, fctx):
1655 """compare with other file context
1660 """compare with other file context
1656
1661
1657 returns True if different than fctx.
1662 returns True if different than fctx.
1658 """
1663 """
1659 # fctx should be a filectx (not a workingfilectx)
1664 # fctx should be a filectx (not a workingfilectx)
1660 # invert comparison to reuse the same code path
1665 # invert comparison to reuse the same code path
1661 return fctx.cmp(self)
1666 return fctx.cmp(self)
1662
1667
1663 def remove(self, ignoremissing=False):
1668 def remove(self, ignoremissing=False):
1664 """wraps unlink for a repo's working directory"""
1669 """wraps unlink for a repo's working directory"""
1665 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1670 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1666 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1671 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1667 rmdir=rmdir)
1672 rmdir=rmdir)
1668
1673
1669 def write(self, data, flags, backgroundclose=False, **kwargs):
1674 def write(self, data, flags, backgroundclose=False, **kwargs):
1670 """wraps repo.wwrite"""
1675 """wraps repo.wwrite"""
1671 self._repo.wwrite(self._path, data, flags,
1676 self._repo.wwrite(self._path, data, flags,
1672 backgroundclose=backgroundclose,
1677 backgroundclose=backgroundclose,
1673 **kwargs)
1678 **kwargs)
1674
1679
1675 def markcopied(self, src):
1680 def markcopied(self, src):
1676 """marks this file a copy of `src`"""
1681 """marks this file a copy of `src`"""
1677 if self._repo.dirstate[self._path] in "nma":
1682 if self._repo.dirstate[self._path] in "nma":
1678 self._repo.dirstate.copy(src, self._path)
1683 self._repo.dirstate.copy(src, self._path)
1679
1684
1680 def clearunknown(self):
1685 def clearunknown(self):
1681 """Removes conflicting items in the working directory so that
1686 """Removes conflicting items in the working directory so that
1682 ``write()`` can be called successfully.
1687 ``write()`` can be called successfully.
1683 """
1688 """
1684 wvfs = self._repo.wvfs
1689 wvfs = self._repo.wvfs
1685 f = self._path
1690 f = self._path
1686 wvfs.audit(f)
1691 wvfs.audit(f)
1687 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1692 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1688 # remove files under the directory as they should already be
1693 # remove files under the directory as they should already be
1689 # warned and backed up
1694 # warned and backed up
1690 if wvfs.isdir(f) and not wvfs.islink(f):
1695 if wvfs.isdir(f) and not wvfs.islink(f):
1691 wvfs.rmtree(f, forcibly=True)
1696 wvfs.rmtree(f, forcibly=True)
1692 for p in reversed(list(util.finddirs(f))):
1697 for p in reversed(list(util.finddirs(f))):
1693 if wvfs.isfileorlink(p):
1698 if wvfs.isfileorlink(p):
1694 wvfs.unlink(p)
1699 wvfs.unlink(p)
1695 break
1700 break
1696 else:
1701 else:
1697 # don't remove files if path conflicts are not processed
1702 # don't remove files if path conflicts are not processed
1698 if wvfs.isdir(f) and not wvfs.islink(f):
1703 if wvfs.isdir(f) and not wvfs.islink(f):
1699 wvfs.removedirs(f)
1704 wvfs.removedirs(f)
1700
1705
1701 def setflags(self, l, x):
1706 def setflags(self, l, x):
1702 self._repo.wvfs.setflags(self._path, l, x)
1707 self._repo.wvfs.setflags(self._path, l, x)
1703
1708
1704 class overlayworkingctx(committablectx):
1709 class overlayworkingctx(committablectx):
1705 """Wraps another mutable context with a write-back cache that can be
1710 """Wraps another mutable context with a write-back cache that can be
1706 converted into a commit context.
1711 converted into a commit context.
1707
1712
1708 self._cache[path] maps to a dict with keys: {
1713 self._cache[path] maps to a dict with keys: {
1709 'exists': bool?
1714 'exists': bool?
1710 'date': date?
1715 'date': date?
1711 'data': str?
1716 'data': str?
1712 'flags': str?
1717 'flags': str?
1713 'copied': str? (path or None)
1718 'copied': str? (path or None)
1714 }
1719 }
1715 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1720 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1716 is `False`, the file was deleted.
1721 is `False`, the file was deleted.
1717 """
1722 """
1718
1723
1719 def __init__(self, repo):
1724 def __init__(self, repo):
1720 super(overlayworkingctx, self).__init__(repo)
1725 super(overlayworkingctx, self).__init__(repo)
1721 self.clean()
1726 self.clean()
1722
1727
1723 def setbase(self, wrappedctx):
1728 def setbase(self, wrappedctx):
1724 self._wrappedctx = wrappedctx
1729 self._wrappedctx = wrappedctx
1725 self._parents = [wrappedctx]
1730 self._parents = [wrappedctx]
1726 # Drop old manifest cache as it is now out of date.
1731 # Drop old manifest cache as it is now out of date.
1727 # This is necessary when, e.g., rebasing several nodes with one
1732 # This is necessary when, e.g., rebasing several nodes with one
1728 # ``overlayworkingctx`` (e.g. with --collapse).
1733 # ``overlayworkingctx`` (e.g. with --collapse).
1729 util.clearcachedproperty(self, '_manifest')
1734 util.clearcachedproperty(self, '_manifest')
1730
1735
1731 def data(self, path):
1736 def data(self, path):
1732 if self.isdirty(path):
1737 if self.isdirty(path):
1733 if self._cache[path]['exists']:
1738 if self._cache[path]['exists']:
1734 if self._cache[path]['data']:
1739 if self._cache[path]['data']:
1735 return self._cache[path]['data']
1740 return self._cache[path]['data']
1736 else:
1741 else:
1737 # Must fallback here, too, because we only set flags.
1742 # Must fallback here, too, because we only set flags.
1738 return self._wrappedctx[path].data()
1743 return self._wrappedctx[path].data()
1739 else:
1744 else:
1740 raise error.ProgrammingError("No such file or directory: %s" %
1745 raise error.ProgrammingError("No such file or directory: %s" %
1741 path)
1746 path)
1742 else:
1747 else:
1743 return self._wrappedctx[path].data()
1748 return self._wrappedctx[path].data()
1744
1749
1745 @propertycache
1750 @propertycache
1746 def _manifest(self):
1751 def _manifest(self):
1747 parents = self.parents()
1752 parents = self.parents()
1748 man = parents[0].manifest().copy()
1753 man = parents[0].manifest().copy()
1749
1754
1750 flag = self._flagfunc
1755 flag = self._flagfunc
1751 for path in self.added():
1756 for path in self.added():
1752 man[path] = addednodeid
1757 man[path] = addednodeid
1753 man.setflag(path, flag(path))
1758 man.setflag(path, flag(path))
1754 for path in self.modified():
1759 for path in self.modified():
1755 man[path] = modifiednodeid
1760 man[path] = modifiednodeid
1756 man.setflag(path, flag(path))
1761 man.setflag(path, flag(path))
1757 for path in self.removed():
1762 for path in self.removed():
1758 del man[path]
1763 del man[path]
1759 return man
1764 return man
1760
1765
1761 @propertycache
1766 @propertycache
1762 def _flagfunc(self):
1767 def _flagfunc(self):
1763 def f(path):
1768 def f(path):
1764 return self._cache[path]['flags']
1769 return self._cache[path]['flags']
1765 return f
1770 return f
1766
1771
1767 def files(self):
1772 def files(self):
1768 return sorted(self.added() + self.modified() + self.removed())
1773 return sorted(self.added() + self.modified() + self.removed())
1769
1774
1770 def modified(self):
1775 def modified(self):
1771 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1776 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1772 self._existsinparent(f)]
1777 self._existsinparent(f)]
1773
1778
1774 def added(self):
1779 def added(self):
1775 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1780 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1776 not self._existsinparent(f)]
1781 not self._existsinparent(f)]
1777
1782
1778 def removed(self):
1783 def removed(self):
1779 return [f for f in self._cache.keys() if
1784 return [f for f in self._cache.keys() if
1780 not self._cache[f]['exists'] and self._existsinparent(f)]
1785 not self._cache[f]['exists'] and self._existsinparent(f)]
1781
1786
1782 def isinmemory(self):
1787 def isinmemory(self):
1783 return True
1788 return True
1784
1789
1785 def filedate(self, path):
1790 def filedate(self, path):
1786 if self.isdirty(path):
1791 if self.isdirty(path):
1787 return self._cache[path]['date']
1792 return self._cache[path]['date']
1788 else:
1793 else:
1789 return self._wrappedctx[path].date()
1794 return self._wrappedctx[path].date()
1790
1795
1791 def markcopied(self, path, origin):
1796 def markcopied(self, path, origin):
1792 if self.isdirty(path):
1797 if self.isdirty(path):
1793 self._cache[path]['copied'] = origin
1798 self._cache[path]['copied'] = origin
1794 else:
1799 else:
1795 raise error.ProgrammingError('markcopied() called on clean context')
1800 raise error.ProgrammingError('markcopied() called on clean context')
1796
1801
1797 def copydata(self, path):
1802 def copydata(self, path):
1798 if self.isdirty(path):
1803 if self.isdirty(path):
1799 return self._cache[path]['copied']
1804 return self._cache[path]['copied']
1800 else:
1805 else:
1801 raise error.ProgrammingError('copydata() called on clean context')
1806 raise error.ProgrammingError('copydata() called on clean context')
1802
1807
1803 def flags(self, path):
1808 def flags(self, path):
1804 if self.isdirty(path):
1809 if self.isdirty(path):
1805 if self._cache[path]['exists']:
1810 if self._cache[path]['exists']:
1806 return self._cache[path]['flags']
1811 return self._cache[path]['flags']
1807 else:
1812 else:
1808 raise error.ProgrammingError("No such file or directory: %s" %
1813 raise error.ProgrammingError("No such file or directory: %s" %
1809 self._path)
1814 self._path)
1810 else:
1815 else:
1811 return self._wrappedctx[path].flags()
1816 return self._wrappedctx[path].flags()
1812
1817
1813 def _existsinparent(self, path):
1818 def _existsinparent(self, path):
1814 try:
1819 try:
1815 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1820 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1816 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1821 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1817 # with an ``exists()`` function.
1822 # with an ``exists()`` function.
1818 self._wrappedctx[path]
1823 self._wrappedctx[path]
1819 return True
1824 return True
1820 except error.ManifestLookupError:
1825 except error.ManifestLookupError:
1821 return False
1826 return False
1822
1827
1823 def _auditconflicts(self, path):
1828 def _auditconflicts(self, path):
1824 """Replicates conflict checks done by wvfs.write().
1829 """Replicates conflict checks done by wvfs.write().
1825
1830
1826 Since we never write to the filesystem and never call `applyupdates` in
1831 Since we never write to the filesystem and never call `applyupdates` in
1827 IMM, we'll never check that a path is actually writable -- e.g., because
1832 IMM, we'll never check that a path is actually writable -- e.g., because
1828 it adds `a/foo`, but `a` is actually a file in the other commit.
1833 it adds `a/foo`, but `a` is actually a file in the other commit.
1829 """
1834 """
1830 def fail(path, component):
1835 def fail(path, component):
1831 # p1() is the base and we're receiving "writes" for p2()'s
1836 # p1() is the base and we're receiving "writes" for p2()'s
1832 # files.
1837 # files.
1833 if 'l' in self.p1()[component].flags():
1838 if 'l' in self.p1()[component].flags():
1834 raise error.Abort("error: %s conflicts with symlink %s "
1839 raise error.Abort("error: %s conflicts with symlink %s "
1835 "in %s." % (path, component,
1840 "in %s." % (path, component,
1836 self.p1().rev()))
1841 self.p1().rev()))
1837 else:
1842 else:
1838 raise error.Abort("error: '%s' conflicts with file '%s' in "
1843 raise error.Abort("error: '%s' conflicts with file '%s' in "
1839 "%s." % (path, component,
1844 "%s." % (path, component,
1840 self.p1().rev()))
1845 self.p1().rev()))
1841
1846
1842 # Test that each new directory to be created to write this path from p2
1847 # Test that each new directory to be created to write this path from p2
1843 # is not a file in p1.
1848 # is not a file in p1.
1844 components = path.split('/')
1849 components = path.split('/')
1845 for i in pycompat.xrange(len(components)):
1850 for i in pycompat.xrange(len(components)):
1846 component = "/".join(components[0:i])
1851 component = "/".join(components[0:i])
1847 if component in self.p1() and self._cache[component]['exists']:
1852 if component in self.p1() and self._cache[component]['exists']:
1848 fail(path, component)
1853 fail(path, component)
1849
1854
1850 # Test the other direction -- that this path from p2 isn't a directory
1855 # Test the other direction -- that this path from p2 isn't a directory
1851 # in p1 (test that p1 doesn't any paths matching `path/*`).
1856 # in p1 (test that p1 doesn't any paths matching `path/*`).
1852 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1857 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1853 matches = self.p1().manifest().matches(match)
1858 matches = self.p1().manifest().matches(match)
1854 mfiles = matches.keys()
1859 mfiles = matches.keys()
1855 if len(mfiles) > 0:
1860 if len(mfiles) > 0:
1856 if len(mfiles) == 1 and mfiles[0] == path:
1861 if len(mfiles) == 1 and mfiles[0] == path:
1857 return
1862 return
1858 # omit the files which are deleted in current IMM wctx
1863 # omit the files which are deleted in current IMM wctx
1859 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1864 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1860 if not mfiles:
1865 if not mfiles:
1861 return
1866 return
1862 raise error.Abort("error: file '%s' cannot be written because "
1867 raise error.Abort("error: file '%s' cannot be written because "
1863 " '%s/' is a folder in %s (containing %d "
1868 " '%s/' is a folder in %s (containing %d "
1864 "entries: %s)"
1869 "entries: %s)"
1865 % (path, path, self.p1(), len(mfiles),
1870 % (path, path, self.p1(), len(mfiles),
1866 ', '.join(mfiles)))
1871 ', '.join(mfiles)))
1867
1872
1868 def write(self, path, data, flags='', **kwargs):
1873 def write(self, path, data, flags='', **kwargs):
1869 if data is None:
1874 if data is None:
1870 raise error.ProgrammingError("data must be non-None")
1875 raise error.ProgrammingError("data must be non-None")
1871 self._auditconflicts(path)
1876 self._auditconflicts(path)
1872 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1877 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1873 flags=flags)
1878 flags=flags)
1874
1879
1875 def setflags(self, path, l, x):
1880 def setflags(self, path, l, x):
1876 flag = ''
1881 flag = ''
1877 if l:
1882 if l:
1878 flag = 'l'
1883 flag = 'l'
1879 elif x:
1884 elif x:
1880 flag = 'x'
1885 flag = 'x'
1881 self._markdirty(path, exists=True, date=dateutil.makedate(),
1886 self._markdirty(path, exists=True, date=dateutil.makedate(),
1882 flags=flag)
1887 flags=flag)
1883
1888
1884 def remove(self, path):
1889 def remove(self, path):
1885 self._markdirty(path, exists=False)
1890 self._markdirty(path, exists=False)
1886
1891
1887 def exists(self, path):
1892 def exists(self, path):
1888 """exists behaves like `lexists`, but needs to follow symlinks and
1893 """exists behaves like `lexists`, but needs to follow symlinks and
1889 return False if they are broken.
1894 return False if they are broken.
1890 """
1895 """
1891 if self.isdirty(path):
1896 if self.isdirty(path):
1892 # If this path exists and is a symlink, "follow" it by calling
1897 # If this path exists and is a symlink, "follow" it by calling
1893 # exists on the destination path.
1898 # exists on the destination path.
1894 if (self._cache[path]['exists'] and
1899 if (self._cache[path]['exists'] and
1895 'l' in self._cache[path]['flags']):
1900 'l' in self._cache[path]['flags']):
1896 return self.exists(self._cache[path]['data'].strip())
1901 return self.exists(self._cache[path]['data'].strip())
1897 else:
1902 else:
1898 return self._cache[path]['exists']
1903 return self._cache[path]['exists']
1899
1904
1900 return self._existsinparent(path)
1905 return self._existsinparent(path)
1901
1906
1902 def lexists(self, path):
1907 def lexists(self, path):
1903 """lexists returns True if the path exists"""
1908 """lexists returns True if the path exists"""
1904 if self.isdirty(path):
1909 if self.isdirty(path):
1905 return self._cache[path]['exists']
1910 return self._cache[path]['exists']
1906
1911
1907 return self._existsinparent(path)
1912 return self._existsinparent(path)
1908
1913
1909 def size(self, path):
1914 def size(self, path):
1910 if self.isdirty(path):
1915 if self.isdirty(path):
1911 if self._cache[path]['exists']:
1916 if self._cache[path]['exists']:
1912 return len(self._cache[path]['data'])
1917 return len(self._cache[path]['data'])
1913 else:
1918 else:
1914 raise error.ProgrammingError("No such file or directory: %s" %
1919 raise error.ProgrammingError("No such file or directory: %s" %
1915 self._path)
1920 self._path)
1916 return self._wrappedctx[path].size()
1921 return self._wrappedctx[path].size()
1917
1922
1918 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1923 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1919 user=None, editor=None):
1924 user=None, editor=None):
1920 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1925 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1921 committed.
1926 committed.
1922
1927
1923 ``text`` is the commit message.
1928 ``text`` is the commit message.
1924 ``parents`` (optional) are rev numbers.
1929 ``parents`` (optional) are rev numbers.
1925 """
1930 """
1926 # Default parents to the wrapped contexts' if not passed.
1931 # Default parents to the wrapped contexts' if not passed.
1927 if parents is None:
1932 if parents is None:
1928 parents = self._wrappedctx.parents()
1933 parents = self._wrappedctx.parents()
1929 if len(parents) == 1:
1934 if len(parents) == 1:
1930 parents = (parents[0], None)
1935 parents = (parents[0], None)
1931
1936
1932 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1937 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1933 if parents[1] is None:
1938 if parents[1] is None:
1934 parents = (self._repo[parents[0]], None)
1939 parents = (self._repo[parents[0]], None)
1935 else:
1940 else:
1936 parents = (self._repo[parents[0]], self._repo[parents[1]])
1941 parents = (self._repo[parents[0]], self._repo[parents[1]])
1937
1942
1938 files = self._cache.keys()
1943 files = self._cache.keys()
1939 def getfile(repo, memctx, path):
1944 def getfile(repo, memctx, path):
1940 if self._cache[path]['exists']:
1945 if self._cache[path]['exists']:
1941 return memfilectx(repo, memctx, path,
1946 return memfilectx(repo, memctx, path,
1942 self._cache[path]['data'],
1947 self._cache[path]['data'],
1943 'l' in self._cache[path]['flags'],
1948 'l' in self._cache[path]['flags'],
1944 'x' in self._cache[path]['flags'],
1949 'x' in self._cache[path]['flags'],
1945 self._cache[path]['copied'])
1950 self._cache[path]['copied'])
1946 else:
1951 else:
1947 # Returning None, but including the path in `files`, is
1952 # Returning None, but including the path in `files`, is
1948 # necessary for memctx to register a deletion.
1953 # necessary for memctx to register a deletion.
1949 return None
1954 return None
1950 return memctx(self._repo, parents, text, files, getfile, date=date,
1955 return memctx(self._repo, parents, text, files, getfile, date=date,
1951 extra=extra, user=user, branch=branch, editor=editor)
1956 extra=extra, user=user, branch=branch, editor=editor)
1952
1957
1953 def isdirty(self, path):
1958 def isdirty(self, path):
1954 return path in self._cache
1959 return path in self._cache
1955
1960
1956 def isempty(self):
1961 def isempty(self):
1957 # We need to discard any keys that are actually clean before the empty
1962 # We need to discard any keys that are actually clean before the empty
1958 # commit check.
1963 # commit check.
1959 self._compact()
1964 self._compact()
1960 return len(self._cache) == 0
1965 return len(self._cache) == 0
1961
1966
1962 def clean(self):
1967 def clean(self):
1963 self._cache = {}
1968 self._cache = {}
1964
1969
1965 def _compact(self):
1970 def _compact(self):
1966 """Removes keys from the cache that are actually clean, by comparing
1971 """Removes keys from the cache that are actually clean, by comparing
1967 them with the underlying context.
1972 them with the underlying context.
1968
1973
1969 This can occur during the merge process, e.g. by passing --tool :local
1974 This can occur during the merge process, e.g. by passing --tool :local
1970 to resolve a conflict.
1975 to resolve a conflict.
1971 """
1976 """
1972 keys = []
1977 keys = []
1973 for path in self._cache.keys():
1978 for path in self._cache.keys():
1974 cache = self._cache[path]
1979 cache = self._cache[path]
1975 try:
1980 try:
1976 underlying = self._wrappedctx[path]
1981 underlying = self._wrappedctx[path]
1977 if (underlying.data() == cache['data'] and
1982 if (underlying.data() == cache['data'] and
1978 underlying.flags() == cache['flags']):
1983 underlying.flags() == cache['flags']):
1979 keys.append(path)
1984 keys.append(path)
1980 except error.ManifestLookupError:
1985 except error.ManifestLookupError:
1981 # Path not in the underlying manifest (created).
1986 # Path not in the underlying manifest (created).
1982 continue
1987 continue
1983
1988
1984 for path in keys:
1989 for path in keys:
1985 del self._cache[path]
1990 del self._cache[path]
1986 return keys
1991 return keys
1987
1992
1988 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1993 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1989 # data not provided, let's see if we already have some; if not, let's
1994 # data not provided, let's see if we already have some; if not, let's
1990 # grab it from our underlying context, so that we always have data if
1995 # grab it from our underlying context, so that we always have data if
1991 # the file is marked as existing.
1996 # the file is marked as existing.
1992 if exists and data is None:
1997 if exists and data is None:
1993 oldentry = self._cache.get(path) or {}
1998 oldentry = self._cache.get(path) or {}
1994 data = oldentry.get('data') or self._wrappedctx[path].data()
1999 data = oldentry.get('data') or self._wrappedctx[path].data()
1995
2000
1996 self._cache[path] = {
2001 self._cache[path] = {
1997 'exists': exists,
2002 'exists': exists,
1998 'data': data,
2003 'data': data,
1999 'date': date,
2004 'date': date,
2000 'flags': flags,
2005 'flags': flags,
2001 'copied': None,
2006 'copied': None,
2002 }
2007 }
2003
2008
2004 def filectx(self, path, filelog=None):
2009 def filectx(self, path, filelog=None):
2005 return overlayworkingfilectx(self._repo, path, parent=self,
2010 return overlayworkingfilectx(self._repo, path, parent=self,
2006 filelog=filelog)
2011 filelog=filelog)
2007
2012
2008 class overlayworkingfilectx(committablefilectx):
2013 class overlayworkingfilectx(committablefilectx):
2009 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2014 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2010 cache, which can be flushed through later by calling ``flush()``."""
2015 cache, which can be flushed through later by calling ``flush()``."""
2011
2016
2012 def __init__(self, repo, path, filelog=None, parent=None):
2017 def __init__(self, repo, path, filelog=None, parent=None):
2013 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2018 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2014 parent)
2019 parent)
2015 self._repo = repo
2020 self._repo = repo
2016 self._parent = parent
2021 self._parent = parent
2017 self._path = path
2022 self._path = path
2018
2023
2019 def cmp(self, fctx):
2024 def cmp(self, fctx):
2020 return self.data() != fctx.data()
2025 return self.data() != fctx.data()
2021
2026
2022 def changectx(self):
2027 def changectx(self):
2023 return self._parent
2028 return self._parent
2024
2029
2025 def data(self):
2030 def data(self):
2026 return self._parent.data(self._path)
2031 return self._parent.data(self._path)
2027
2032
2028 def date(self):
2033 def date(self):
2029 return self._parent.filedate(self._path)
2034 return self._parent.filedate(self._path)
2030
2035
2031 def exists(self):
2036 def exists(self):
2032 return self.lexists()
2037 return self.lexists()
2033
2038
2034 def lexists(self):
2039 def lexists(self):
2035 return self._parent.exists(self._path)
2040 return self._parent.exists(self._path)
2036
2041
2037 def renamed(self):
2042 def renamed(self):
2038 path = self._parent.copydata(self._path)
2043 path = self._parent.copydata(self._path)
2039 if not path:
2044 if not path:
2040 return None
2045 return None
2041 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2046 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2042
2047
2043 def size(self):
2048 def size(self):
2044 return self._parent.size(self._path)
2049 return self._parent.size(self._path)
2045
2050
2046 def markcopied(self, origin):
2051 def markcopied(self, origin):
2047 self._parent.markcopied(self._path, origin)
2052 self._parent.markcopied(self._path, origin)
2048
2053
2049 def audit(self):
2054 def audit(self):
2050 pass
2055 pass
2051
2056
2052 def flags(self):
2057 def flags(self):
2053 return self._parent.flags(self._path)
2058 return self._parent.flags(self._path)
2054
2059
2055 def setflags(self, islink, isexec):
2060 def setflags(self, islink, isexec):
2056 return self._parent.setflags(self._path, islink, isexec)
2061 return self._parent.setflags(self._path, islink, isexec)
2057
2062
2058 def write(self, data, flags, backgroundclose=False, **kwargs):
2063 def write(self, data, flags, backgroundclose=False, **kwargs):
2059 return self._parent.write(self._path, data, flags, **kwargs)
2064 return self._parent.write(self._path, data, flags, **kwargs)
2060
2065
2061 def remove(self, ignoremissing=False):
2066 def remove(self, ignoremissing=False):
2062 return self._parent.remove(self._path)
2067 return self._parent.remove(self._path)
2063
2068
2064 def clearunknown(self):
2069 def clearunknown(self):
2065 pass
2070 pass
2066
2071
2067 class workingcommitctx(workingctx):
2072 class workingcommitctx(workingctx):
2068 """A workingcommitctx object makes access to data related to
2073 """A workingcommitctx object makes access to data related to
2069 the revision being committed convenient.
2074 the revision being committed convenient.
2070
2075
2071 This hides changes in the working directory, if they aren't
2076 This hides changes in the working directory, if they aren't
2072 committed in this context.
2077 committed in this context.
2073 """
2078 """
2074 def __init__(self, repo, changes,
2079 def __init__(self, repo, changes,
2075 text="", user=None, date=None, extra=None):
2080 text="", user=None, date=None, extra=None):
2076 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2081 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2077 changes)
2082 changes)
2078
2083
2079 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2084 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2080 """Return matched files only in ``self._status``
2085 """Return matched files only in ``self._status``
2081
2086
2082 Uncommitted files appear "clean" via this context, even if
2087 Uncommitted files appear "clean" via this context, even if
2083 they aren't actually so in the working directory.
2088 they aren't actually so in the working directory.
2084 """
2089 """
2085 if clean:
2090 if clean:
2086 clean = [f for f in self._manifest if f not in self._changedset]
2091 clean = [f for f in self._manifest if f not in self._changedset]
2087 else:
2092 else:
2088 clean = []
2093 clean = []
2089 return scmutil.status([f for f in self._status.modified if match(f)],
2094 return scmutil.status([f for f in self._status.modified if match(f)],
2090 [f for f in self._status.added if match(f)],
2095 [f for f in self._status.added if match(f)],
2091 [f for f in self._status.removed if match(f)],
2096 [f for f in self._status.removed if match(f)],
2092 [], [], [], clean)
2097 [], [], [], clean)
2093
2098
2094 @propertycache
2099 @propertycache
2095 def _changedset(self):
2100 def _changedset(self):
2096 """Return the set of files changed in this context
2101 """Return the set of files changed in this context
2097 """
2102 """
2098 changed = set(self._status.modified)
2103 changed = set(self._status.modified)
2099 changed.update(self._status.added)
2104 changed.update(self._status.added)
2100 changed.update(self._status.removed)
2105 changed.update(self._status.removed)
2101 return changed
2106 return changed
2102
2107
2103 def makecachingfilectxfn(func):
2108 def makecachingfilectxfn(func):
2104 """Create a filectxfn that caches based on the path.
2109 """Create a filectxfn that caches based on the path.
2105
2110
2106 We can't use util.cachefunc because it uses all arguments as the cache
2111 We can't use util.cachefunc because it uses all arguments as the cache
2107 key and this creates a cycle since the arguments include the repo and
2112 key and this creates a cycle since the arguments include the repo and
2108 memctx.
2113 memctx.
2109 """
2114 """
2110 cache = {}
2115 cache = {}
2111
2116
2112 def getfilectx(repo, memctx, path):
2117 def getfilectx(repo, memctx, path):
2113 if path not in cache:
2118 if path not in cache:
2114 cache[path] = func(repo, memctx, path)
2119 cache[path] = func(repo, memctx, path)
2115 return cache[path]
2120 return cache[path]
2116
2121
2117 return getfilectx
2122 return getfilectx
2118
2123
2119 def memfilefromctx(ctx):
2124 def memfilefromctx(ctx):
2120 """Given a context return a memfilectx for ctx[path]
2125 """Given a context return a memfilectx for ctx[path]
2121
2126
2122 This is a convenience method for building a memctx based on another
2127 This is a convenience method for building a memctx based on another
2123 context.
2128 context.
2124 """
2129 """
2125 def getfilectx(repo, memctx, path):
2130 def getfilectx(repo, memctx, path):
2126 fctx = ctx[path]
2131 fctx = ctx[path]
2127 # this is weird but apparently we only keep track of one parent
2132 # this is weird but apparently we only keep track of one parent
2128 # (why not only store that instead of a tuple?)
2133 # (why not only store that instead of a tuple?)
2129 copied = fctx.renamed()
2134 copied = fctx.renamed()
2130 if copied:
2135 if copied:
2131 copied = copied[0]
2136 copied = copied[0]
2132 return memfilectx(repo, memctx, path, fctx.data(),
2137 return memfilectx(repo, memctx, path, fctx.data(),
2133 islink=fctx.islink(), isexec=fctx.isexec(),
2138 islink=fctx.islink(), isexec=fctx.isexec(),
2134 copied=copied)
2139 copied=copied)
2135
2140
2136 return getfilectx
2141 return getfilectx
2137
2142
2138 def memfilefrompatch(patchstore):
2143 def memfilefrompatch(patchstore):
2139 """Given a patch (e.g. patchstore object) return a memfilectx
2144 """Given a patch (e.g. patchstore object) return a memfilectx
2140
2145
2141 This is a convenience method for building a memctx based on a patchstore.
2146 This is a convenience method for building a memctx based on a patchstore.
2142 """
2147 """
2143 def getfilectx(repo, memctx, path):
2148 def getfilectx(repo, memctx, path):
2144 data, mode, copied = patchstore.getfile(path)
2149 data, mode, copied = patchstore.getfile(path)
2145 if data is None:
2150 if data is None:
2146 return None
2151 return None
2147 islink, isexec = mode
2152 islink, isexec = mode
2148 return memfilectx(repo, memctx, path, data, islink=islink,
2153 return memfilectx(repo, memctx, path, data, islink=islink,
2149 isexec=isexec, copied=copied)
2154 isexec=isexec, copied=copied)
2150
2155
2151 return getfilectx
2156 return getfilectx
2152
2157
2153 class memctx(committablectx):
2158 class memctx(committablectx):
2154 """Use memctx to perform in-memory commits via localrepo.commitctx().
2159 """Use memctx to perform in-memory commits via localrepo.commitctx().
2155
2160
2156 Revision information is supplied at initialization time while
2161 Revision information is supplied at initialization time while
2157 related files data and is made available through a callback
2162 related files data and is made available through a callback
2158 mechanism. 'repo' is the current localrepo, 'parents' is a
2163 mechanism. 'repo' is the current localrepo, 'parents' is a
2159 sequence of two parent revisions identifiers (pass None for every
2164 sequence of two parent revisions identifiers (pass None for every
2160 missing parent), 'text' is the commit message and 'files' lists
2165 missing parent), 'text' is the commit message and 'files' lists
2161 names of files touched by the revision (normalized and relative to
2166 names of files touched by the revision (normalized and relative to
2162 repository root).
2167 repository root).
2163
2168
2164 filectxfn(repo, memctx, path) is a callable receiving the
2169 filectxfn(repo, memctx, path) is a callable receiving the
2165 repository, the current memctx object and the normalized path of
2170 repository, the current memctx object and the normalized path of
2166 requested file, relative to repository root. It is fired by the
2171 requested file, relative to repository root. It is fired by the
2167 commit function for every file in 'files', but calls order is
2172 commit function for every file in 'files', but calls order is
2168 undefined. If the file is available in the revision being
2173 undefined. If the file is available in the revision being
2169 committed (updated or added), filectxfn returns a memfilectx
2174 committed (updated or added), filectxfn returns a memfilectx
2170 object. If the file was removed, filectxfn return None for recent
2175 object. If the file was removed, filectxfn return None for recent
2171 Mercurial. Moved files are represented by marking the source file
2176 Mercurial. Moved files are represented by marking the source file
2172 removed and the new file added with copy information (see
2177 removed and the new file added with copy information (see
2173 memfilectx).
2178 memfilectx).
2174
2179
2175 user receives the committer name and defaults to current
2180 user receives the committer name and defaults to current
2176 repository username, date is the commit date in any format
2181 repository username, date is the commit date in any format
2177 supported by dateutil.parsedate() and defaults to current date, extra
2182 supported by dateutil.parsedate() and defaults to current date, extra
2178 is a dictionary of metadata or is left empty.
2183 is a dictionary of metadata or is left empty.
2179 """
2184 """
2180
2185
2181 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2186 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2182 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2187 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2183 # this field to determine what to do in filectxfn.
2188 # this field to determine what to do in filectxfn.
2184 _returnnoneformissingfiles = True
2189 _returnnoneformissingfiles = True
2185
2190
2186 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2191 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2187 date=None, extra=None, branch=None, editor=False):
2192 date=None, extra=None, branch=None, editor=False):
2188 super(memctx, self).__init__(repo, text, user, date, extra)
2193 super(memctx, self).__init__(repo, text, user, date, extra)
2189 self._rev = None
2194 self._rev = None
2190 self._node = None
2195 self._node = None
2191 parents = [(p or nullid) for p in parents]
2196 parents = [(p or nullid) for p in parents]
2192 p1, p2 = parents
2197 p1, p2 = parents
2193 self._parents = [self._repo[p] for p in (p1, p2)]
2198 self._parents = [self._repo[p] for p in (p1, p2)]
2194 files = sorted(set(files))
2199 files = sorted(set(files))
2195 self._files = files
2200 self._files = files
2196 if branch is not None:
2201 if branch is not None:
2197 self._extra['branch'] = encoding.fromlocal(branch)
2202 self._extra['branch'] = encoding.fromlocal(branch)
2198 self.substate = {}
2203 self.substate = {}
2199
2204
2200 if isinstance(filectxfn, patch.filestore):
2205 if isinstance(filectxfn, patch.filestore):
2201 filectxfn = memfilefrompatch(filectxfn)
2206 filectxfn = memfilefrompatch(filectxfn)
2202 elif not callable(filectxfn):
2207 elif not callable(filectxfn):
2203 # if store is not callable, wrap it in a function
2208 # if store is not callable, wrap it in a function
2204 filectxfn = memfilefromctx(filectxfn)
2209 filectxfn = memfilefromctx(filectxfn)
2205
2210
2206 # memoizing increases performance for e.g. vcs convert scenarios.
2211 # memoizing increases performance for e.g. vcs convert scenarios.
2207 self._filectxfn = makecachingfilectxfn(filectxfn)
2212 self._filectxfn = makecachingfilectxfn(filectxfn)
2208
2213
2209 if editor:
2214 if editor:
2210 self._text = editor(self._repo, self, [])
2215 self._text = editor(self._repo, self, [])
2211 self._repo.savecommitmessage(self._text)
2216 self._repo.savecommitmessage(self._text)
2212
2217
2213 def filectx(self, path, filelog=None):
2218 def filectx(self, path, filelog=None):
2214 """get a file context from the working directory
2219 """get a file context from the working directory
2215
2220
2216 Returns None if file doesn't exist and should be removed."""
2221 Returns None if file doesn't exist and should be removed."""
2217 return self._filectxfn(self._repo, self, path)
2222 return self._filectxfn(self._repo, self, path)
2218
2223
2219 def commit(self):
2224 def commit(self):
2220 """commit context to the repo"""
2225 """commit context to the repo"""
2221 return self._repo.commitctx(self)
2226 return self._repo.commitctx(self)
2222
2227
2223 @propertycache
2228 @propertycache
2224 def _manifest(self):
2229 def _manifest(self):
2225 """generate a manifest based on the return values of filectxfn"""
2230 """generate a manifest based on the return values of filectxfn"""
2226
2231
2227 # keep this simple for now; just worry about p1
2232 # keep this simple for now; just worry about p1
2228 pctx = self._parents[0]
2233 pctx = self._parents[0]
2229 man = pctx.manifest().copy()
2234 man = pctx.manifest().copy()
2230
2235
2231 for f in self._status.modified:
2236 for f in self._status.modified:
2232 man[f] = modifiednodeid
2237 man[f] = modifiednodeid
2233
2238
2234 for f in self._status.added:
2239 for f in self._status.added:
2235 man[f] = addednodeid
2240 man[f] = addednodeid
2236
2241
2237 for f in self._status.removed:
2242 for f in self._status.removed:
2238 if f in man:
2243 if f in man:
2239 del man[f]
2244 del man[f]
2240
2245
2241 return man
2246 return man
2242
2247
2243 @propertycache
2248 @propertycache
2244 def _status(self):
2249 def _status(self):
2245 """Calculate exact status from ``files`` specified at construction
2250 """Calculate exact status from ``files`` specified at construction
2246 """
2251 """
2247 man1 = self.p1().manifest()
2252 man1 = self.p1().manifest()
2248 p2 = self._parents[1]
2253 p2 = self._parents[1]
2249 # "1 < len(self._parents)" can't be used for checking
2254 # "1 < len(self._parents)" can't be used for checking
2250 # existence of the 2nd parent, because "memctx._parents" is
2255 # existence of the 2nd parent, because "memctx._parents" is
2251 # explicitly initialized by the list, of which length is 2.
2256 # explicitly initialized by the list, of which length is 2.
2252 if p2.node() != nullid:
2257 if p2.node() != nullid:
2253 man2 = p2.manifest()
2258 man2 = p2.manifest()
2254 managing = lambda f: f in man1 or f in man2
2259 managing = lambda f: f in man1 or f in man2
2255 else:
2260 else:
2256 managing = lambda f: f in man1
2261 managing = lambda f: f in man1
2257
2262
2258 modified, added, removed = [], [], []
2263 modified, added, removed = [], [], []
2259 for f in self._files:
2264 for f in self._files:
2260 if not managing(f):
2265 if not managing(f):
2261 added.append(f)
2266 added.append(f)
2262 elif self[f]:
2267 elif self[f]:
2263 modified.append(f)
2268 modified.append(f)
2264 else:
2269 else:
2265 removed.append(f)
2270 removed.append(f)
2266
2271
2267 return scmutil.status(modified, added, removed, [], [], [], [])
2272 return scmutil.status(modified, added, removed, [], [], [], [])
2268
2273
2269 class memfilectx(committablefilectx):
2274 class memfilectx(committablefilectx):
2270 """memfilectx represents an in-memory file to commit.
2275 """memfilectx represents an in-memory file to commit.
2271
2276
2272 See memctx and committablefilectx for more details.
2277 See memctx and committablefilectx for more details.
2273 """
2278 """
2274 def __init__(self, repo, changectx, path, data, islink=False,
2279 def __init__(self, repo, changectx, path, data, islink=False,
2275 isexec=False, copied=None):
2280 isexec=False, copied=None):
2276 """
2281 """
2277 path is the normalized file path relative to repository root.
2282 path is the normalized file path relative to repository root.
2278 data is the file content as a string.
2283 data is the file content as a string.
2279 islink is True if the file is a symbolic link.
2284 islink is True if the file is a symbolic link.
2280 isexec is True if the file is executable.
2285 isexec is True if the file is executable.
2281 copied is the source file path if current file was copied in the
2286 copied is the source file path if current file was copied in the
2282 revision being committed, or None."""
2287 revision being committed, or None."""
2283 super(memfilectx, self).__init__(repo, path, None, changectx)
2288 super(memfilectx, self).__init__(repo, path, None, changectx)
2284 self._data = data
2289 self._data = data
2285 if islink:
2290 if islink:
2286 self._flags = 'l'
2291 self._flags = 'l'
2287 elif isexec:
2292 elif isexec:
2288 self._flags = 'x'
2293 self._flags = 'x'
2289 else:
2294 else:
2290 self._flags = ''
2295 self._flags = ''
2291 self._copied = None
2296 self._copied = None
2292 if copied:
2297 if copied:
2293 self._copied = (copied, nullid)
2298 self._copied = (copied, nullid)
2294
2299
2295 def data(self):
2300 def data(self):
2296 return self._data
2301 return self._data
2297
2302
2298 def remove(self, ignoremissing=False):
2303 def remove(self, ignoremissing=False):
2299 """wraps unlink for a repo's working directory"""
2304 """wraps unlink for a repo's working directory"""
2300 # need to figure out what to do here
2305 # need to figure out what to do here
2301 del self._changectx[self._path]
2306 del self._changectx[self._path]
2302
2307
2303 def write(self, data, flags, **kwargs):
2308 def write(self, data, flags, **kwargs):
2304 """wraps repo.wwrite"""
2309 """wraps repo.wwrite"""
2305 self._data = data
2310 self._data = data
2306
2311
2307
2312
2308 class metadataonlyctx(committablectx):
2313 class metadataonlyctx(committablectx):
2309 """Like memctx but it's reusing the manifest of different commit.
2314 """Like memctx but it's reusing the manifest of different commit.
2310 Intended to be used by lightweight operations that are creating
2315 Intended to be used by lightweight operations that are creating
2311 metadata-only changes.
2316 metadata-only changes.
2312
2317
2313 Revision information is supplied at initialization time. 'repo' is the
2318 Revision information is supplied at initialization time. 'repo' is the
2314 current localrepo, 'ctx' is original revision which manifest we're reuisng
2319 current localrepo, 'ctx' is original revision which manifest we're reuisng
2315 'parents' is a sequence of two parent revisions identifiers (pass None for
2320 'parents' is a sequence of two parent revisions identifiers (pass None for
2316 every missing parent), 'text' is the commit.
2321 every missing parent), 'text' is the commit.
2317
2322
2318 user receives the committer name and defaults to current repository
2323 user receives the committer name and defaults to current repository
2319 username, date is the commit date in any format supported by
2324 username, date is the commit date in any format supported by
2320 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2325 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2321 metadata or is left empty.
2326 metadata or is left empty.
2322 """
2327 """
2323 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2328 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2324 date=None, extra=None, editor=False):
2329 date=None, extra=None, editor=False):
2325 if text is None:
2330 if text is None:
2326 text = originalctx.description()
2331 text = originalctx.description()
2327 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2332 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2328 self._rev = None
2333 self._rev = None
2329 self._node = None
2334 self._node = None
2330 self._originalctx = originalctx
2335 self._originalctx = originalctx
2331 self._manifestnode = originalctx.manifestnode()
2336 self._manifestnode = originalctx.manifestnode()
2332 if parents is None:
2337 if parents is None:
2333 parents = originalctx.parents()
2338 parents = originalctx.parents()
2334 else:
2339 else:
2335 parents = [repo[p] for p in parents if p is not None]
2340 parents = [repo[p] for p in parents if p is not None]
2336 parents = parents[:]
2341 parents = parents[:]
2337 while len(parents) < 2:
2342 while len(parents) < 2:
2338 parents.append(repo[nullid])
2343 parents.append(repo[nullid])
2339 p1, p2 = self._parents = parents
2344 p1, p2 = self._parents = parents
2340
2345
2341 # sanity check to ensure that the reused manifest parents are
2346 # sanity check to ensure that the reused manifest parents are
2342 # manifests of our commit parents
2347 # manifests of our commit parents
2343 mp1, mp2 = self.manifestctx().parents
2348 mp1, mp2 = self.manifestctx().parents
2344 if p1 != nullid and p1.manifestnode() != mp1:
2349 if p1 != nullid and p1.manifestnode() != mp1:
2345 raise RuntimeError(r"can't reuse the manifest: its p1 "
2350 raise RuntimeError(r"can't reuse the manifest: its p1 "
2346 r"doesn't match the new ctx p1")
2351 r"doesn't match the new ctx p1")
2347 if p2 != nullid and p2.manifestnode() != mp2:
2352 if p2 != nullid and p2.manifestnode() != mp2:
2348 raise RuntimeError(r"can't reuse the manifest: "
2353 raise RuntimeError(r"can't reuse the manifest: "
2349 r"its p2 doesn't match the new ctx p2")
2354 r"its p2 doesn't match the new ctx p2")
2350
2355
2351 self._files = originalctx.files()
2356 self._files = originalctx.files()
2352 self.substate = {}
2357 self.substate = {}
2353
2358
2354 if editor:
2359 if editor:
2355 self._text = editor(self._repo, self, [])
2360 self._text = editor(self._repo, self, [])
2356 self._repo.savecommitmessage(self._text)
2361 self._repo.savecommitmessage(self._text)
2357
2362
2358 def manifestnode(self):
2363 def manifestnode(self):
2359 return self._manifestnode
2364 return self._manifestnode
2360
2365
2361 @property
2366 @property
2362 def _manifestctx(self):
2367 def _manifestctx(self):
2363 return self._repo.manifestlog[self._manifestnode]
2368 return self._repo.manifestlog[self._manifestnode]
2364
2369
2365 def filectx(self, path, filelog=None):
2370 def filectx(self, path, filelog=None):
2366 return self._originalctx.filectx(path, filelog=filelog)
2371 return self._originalctx.filectx(path, filelog=filelog)
2367
2372
2368 def commit(self):
2373 def commit(self):
2369 """commit context to the repo"""
2374 """commit context to the repo"""
2370 return self._repo.commitctx(self)
2375 return self._repo.commitctx(self)
2371
2376
2372 @property
2377 @property
2373 def _manifest(self):
2378 def _manifest(self):
2374 return self._originalctx.manifest()
2379 return self._originalctx.manifest()
2375
2380
2376 @propertycache
2381 @propertycache
2377 def _status(self):
2382 def _status(self):
2378 """Calculate exact status from ``files`` specified in the ``origctx``
2383 """Calculate exact status from ``files`` specified in the ``origctx``
2379 and parents manifests.
2384 and parents manifests.
2380 """
2385 """
2381 man1 = self.p1().manifest()
2386 man1 = self.p1().manifest()
2382 p2 = self._parents[1]
2387 p2 = self._parents[1]
2383 # "1 < len(self._parents)" can't be used for checking
2388 # "1 < len(self._parents)" can't be used for checking
2384 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2389 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2385 # explicitly initialized by the list, of which length is 2.
2390 # explicitly initialized by the list, of which length is 2.
2386 if p2.node() != nullid:
2391 if p2.node() != nullid:
2387 man2 = p2.manifest()
2392 man2 = p2.manifest()
2388 managing = lambda f: f in man1 or f in man2
2393 managing = lambda f: f in man1 or f in man2
2389 else:
2394 else:
2390 managing = lambda f: f in man1
2395 managing = lambda f: f in man1
2391
2396
2392 modified, added, removed = [], [], []
2397 modified, added, removed = [], [], []
2393 for f in self._files:
2398 for f in self._files:
2394 if not managing(f):
2399 if not managing(f):
2395 added.append(f)
2400 added.append(f)
2396 elif f in self:
2401 elif f in self:
2397 modified.append(f)
2402 modified.append(f)
2398 else:
2403 else:
2399 removed.append(f)
2404 removed.append(f)
2400
2405
2401 return scmutil.status(modified, added, removed, [], [], [], [])
2406 return scmutil.status(modified, added, removed, [], [], [], [])
2402
2407
2403 class arbitraryfilectx(object):
2408 class arbitraryfilectx(object):
2404 """Allows you to use filectx-like functions on a file in an arbitrary
2409 """Allows you to use filectx-like functions on a file in an arbitrary
2405 location on disk, possibly not in the working directory.
2410 location on disk, possibly not in the working directory.
2406 """
2411 """
2407 def __init__(self, path, repo=None):
2412 def __init__(self, path, repo=None):
2408 # Repo is optional because contrib/simplemerge uses this class.
2413 # Repo is optional because contrib/simplemerge uses this class.
2409 self._repo = repo
2414 self._repo = repo
2410 self._path = path
2415 self._path = path
2411
2416
2412 def cmp(self, fctx):
2417 def cmp(self, fctx):
2413 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2418 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2414 # path if either side is a symlink.
2419 # path if either side is a symlink.
2415 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2420 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2416 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2421 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2417 # Add a fast-path for merge if both sides are disk-backed.
2422 # Add a fast-path for merge if both sides are disk-backed.
2418 # Note that filecmp uses the opposite return values (True if same)
2423 # Note that filecmp uses the opposite return values (True if same)
2419 # from our cmp functions (True if different).
2424 # from our cmp functions (True if different).
2420 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2425 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2421 return self.data() != fctx.data()
2426 return self.data() != fctx.data()
2422
2427
2423 def path(self):
2428 def path(self):
2424 return self._path
2429 return self._path
2425
2430
2426 def flags(self):
2431 def flags(self):
2427 return ''
2432 return ''
2428
2433
2429 def data(self):
2434 def data(self):
2430 return util.readfile(self._path)
2435 return util.readfile(self._path)
2431
2436
2432 def decodeddata(self):
2437 def decodeddata(self):
2433 with open(self._path, "rb") as f:
2438 with open(self._path, "rb") as f:
2434 return f.read()
2439 return f.read()
2435
2440
2436 def remove(self):
2441 def remove(self):
2437 util.unlink(self._path)
2442 util.unlink(self._path)
2438
2443
2439 def write(self, data, flags, **kwargs):
2444 def write(self, data, flags, **kwargs):
2440 assert not flags
2445 assert not flags
2441 with open(self._path, "wb") as f:
2446 with open(self._path, "wb") as f:
2442 f.write(data)
2447 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now