##// END OF EJS Templates
context: spell out the logic around linkrev adjustement starting point...
Boris Feld -
r40728:f3f4d853 default
parent child Browse files
Show More
@@ -1,2435 +1,2442 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
298 hunksfilterfn=None):
298 hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
307 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = self._repo.narrowmatch(match)
346 match = self._repo.narrowmatch(match)
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 for l in r:
374 for l in r:
375 l.sort()
375 l.sort()
376
376
377 return r
377 return r
378
378
379 class changectx(basectx):
379 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
382 the repo."""
382 the repo."""
383 def __init__(self, repo, rev, node):
383 def __init__(self, repo, rev, node):
384 super(changectx, self).__init__(repo)
384 super(changectx, self).__init__(repo)
385 self._rev = rev
385 self._rev = rev
386 self._node = node
386 self._node = node
387
387
388 def __hash__(self):
388 def __hash__(self):
389 try:
389 try:
390 return hash(self._rev)
390 return hash(self._rev)
391 except AttributeError:
391 except AttributeError:
392 return id(self)
392 return id(self)
393
393
394 def __nonzero__(self):
394 def __nonzero__(self):
395 return self._rev != nullrev
395 return self._rev != nullrev
396
396
397 __bool__ = __nonzero__
397 __bool__ = __nonzero__
398
398
399 @propertycache
399 @propertycache
400 def _changeset(self):
400 def _changeset(self):
401 return self._repo.changelog.changelogrevision(self.rev())
401 return self._repo.changelog.changelogrevision(self.rev())
402
402
403 @propertycache
403 @propertycache
404 def _manifest(self):
404 def _manifest(self):
405 return self._manifestctx.read()
405 return self._manifestctx.read()
406
406
407 @property
407 @property
408 def _manifestctx(self):
408 def _manifestctx(self):
409 return self._repo.manifestlog[self._changeset.manifest]
409 return self._repo.manifestlog[self._changeset.manifest]
410
410
411 @propertycache
411 @propertycache
412 def _manifestdelta(self):
412 def _manifestdelta(self):
413 return self._manifestctx.readdelta()
413 return self._manifestctx.readdelta()
414
414
415 @propertycache
415 @propertycache
416 def _parents(self):
416 def _parents(self):
417 repo = self._repo
417 repo = self._repo
418 p1, p2 = repo.changelog.parentrevs(self._rev)
418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 if p2 == nullrev:
419 if p2 == nullrev:
420 return [repo[p1]]
420 return [repo[p1]]
421 return [repo[p1], repo[p2]]
421 return [repo[p1], repo[p2]]
422
422
423 def changeset(self):
423 def changeset(self):
424 c = self._changeset
424 c = self._changeset
425 return (
425 return (
426 c.manifest,
426 c.manifest,
427 c.user,
427 c.user,
428 c.date,
428 c.date,
429 c.files,
429 c.files,
430 c.description,
430 c.description,
431 c.extra,
431 c.extra,
432 )
432 )
433 def manifestnode(self):
433 def manifestnode(self):
434 return self._changeset.manifest
434 return self._changeset.manifest
435
435
436 def user(self):
436 def user(self):
437 return self._changeset.user
437 return self._changeset.user
438 def date(self):
438 def date(self):
439 return self._changeset.date
439 return self._changeset.date
440 def files(self):
440 def files(self):
441 return self._changeset.files
441 return self._changeset.files
442 def description(self):
442 def description(self):
443 return self._changeset.description
443 return self._changeset.description
444 def branch(self):
444 def branch(self):
445 return encoding.tolocal(self._changeset.extra.get("branch"))
445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 def closesbranch(self):
446 def closesbranch(self):
447 return 'close' in self._changeset.extra
447 return 'close' in self._changeset.extra
448 def extra(self):
448 def extra(self):
449 """Return a dict of extra information."""
449 """Return a dict of extra information."""
450 return self._changeset.extra
450 return self._changeset.extra
451 def tags(self):
451 def tags(self):
452 """Return a list of byte tag names"""
452 """Return a list of byte tag names"""
453 return self._repo.nodetags(self._node)
453 return self._repo.nodetags(self._node)
454 def bookmarks(self):
454 def bookmarks(self):
455 """Return a list of byte bookmark names."""
455 """Return a list of byte bookmark names."""
456 return self._repo.nodebookmarks(self._node)
456 return self._repo.nodebookmarks(self._node)
457 def phase(self):
457 def phase(self):
458 return self._repo._phasecache.phase(self._repo, self._rev)
458 return self._repo._phasecache.phase(self._repo, self._rev)
459 def hidden(self):
459 def hidden(self):
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461
461
462 def isinmemory(self):
462 def isinmemory(self):
463 return False
463 return False
464
464
465 def children(self):
465 def children(self):
466 """return list of changectx contexts for each child changeset.
466 """return list of changectx contexts for each child changeset.
467
467
468 This returns only the immediate child changesets. Use descendants() to
468 This returns only the immediate child changesets. Use descendants() to
469 recursively walk children.
469 recursively walk children.
470 """
470 """
471 c = self._repo.changelog.children(self._node)
471 c = self._repo.changelog.children(self._node)
472 return [self._repo[x] for x in c]
472 return [self._repo[x] for x in c]
473
473
474 def ancestors(self):
474 def ancestors(self):
475 for a in self._repo.changelog.ancestors([self._rev]):
475 for a in self._repo.changelog.ancestors([self._rev]):
476 yield self._repo[a]
476 yield self._repo[a]
477
477
478 def descendants(self):
478 def descendants(self):
479 """Recursively yield all children of the changeset.
479 """Recursively yield all children of the changeset.
480
480
481 For just the immediate children, use children()
481 For just the immediate children, use children()
482 """
482 """
483 for d in self._repo.changelog.descendants([self._rev]):
483 for d in self._repo.changelog.descendants([self._rev]):
484 yield self._repo[d]
484 yield self._repo[d]
485
485
486 def filectx(self, path, fileid=None, filelog=None):
486 def filectx(self, path, fileid=None, filelog=None):
487 """get a file context from this changeset"""
487 """get a file context from this changeset"""
488 if fileid is None:
488 if fileid is None:
489 fileid = self.filenode(path)
489 fileid = self.filenode(path)
490 return filectx(self._repo, path, fileid=fileid,
490 return filectx(self._repo, path, fileid=fileid,
491 changectx=self, filelog=filelog)
491 changectx=self, filelog=filelog)
492
492
493 def ancestor(self, c2, warn=False):
493 def ancestor(self, c2, warn=False):
494 """return the "best" ancestor context of self and c2
494 """return the "best" ancestor context of self and c2
495
495
496 If there are multiple candidates, it will show a message and check
496 If there are multiple candidates, it will show a message and check
497 merge.preferancestor configuration before falling back to the
497 merge.preferancestor configuration before falling back to the
498 revlog ancestor."""
498 revlog ancestor."""
499 # deal with workingctxs
499 # deal with workingctxs
500 n2 = c2._node
500 n2 = c2._node
501 if n2 is None:
501 if n2 is None:
502 n2 = c2._parents[0]._node
502 n2 = c2._parents[0]._node
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 if not cahs:
504 if not cahs:
505 anc = nullid
505 anc = nullid
506 elif len(cahs) == 1:
506 elif len(cahs) == 1:
507 anc = cahs[0]
507 anc = cahs[0]
508 else:
508 else:
509 # experimental config: merge.preferancestor
509 # experimental config: merge.preferancestor
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 try:
511 try:
512 ctx = scmutil.revsymbol(self._repo, r)
512 ctx = scmutil.revsymbol(self._repo, r)
513 except error.RepoLookupError:
513 except error.RepoLookupError:
514 continue
514 continue
515 anc = ctx.node()
515 anc = ctx.node()
516 if anc in cahs:
516 if anc in cahs:
517 break
517 break
518 else:
518 else:
519 anc = self._repo.changelog.ancestor(self._node, n2)
519 anc = self._repo.changelog.ancestor(self._node, n2)
520 if warn:
520 if warn:
521 self._repo.ui.status(
521 self._repo.ui.status(
522 (_("note: using %s as ancestor of %s and %s\n") %
522 (_("note: using %s as ancestor of %s and %s\n") %
523 (short(anc), short(self._node), short(n2))) +
523 (short(anc), short(self._node), short(n2))) +
524 ''.join(_(" alternatively, use --config "
524 ''.join(_(" alternatively, use --config "
525 "merge.preferancestor=%s\n") %
525 "merge.preferancestor=%s\n") %
526 short(n) for n in sorted(cahs) if n != anc))
526 short(n) for n in sorted(cahs) if n != anc))
527 return self._repo[anc]
527 return self._repo[anc]
528
528
529 def isancestorof(self, other):
529 def isancestorof(self, other):
530 """True if this changeset is an ancestor of other"""
530 """True if this changeset is an ancestor of other"""
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532
532
533 def walk(self, match):
533 def walk(self, match):
534 '''Generates matching file names.'''
534 '''Generates matching file names.'''
535
535
536 # Wrap match.bad method to have message with nodeid
536 # Wrap match.bad method to have message with nodeid
537 def bad(fn, msg):
537 def bad(fn, msg):
538 # The manifest doesn't know about subrepos, so don't complain about
538 # The manifest doesn't know about subrepos, so don't complain about
539 # paths into valid subrepos.
539 # paths into valid subrepos.
540 if any(fn == s or fn.startswith(s + '/')
540 if any(fn == s or fn.startswith(s + '/')
541 for s in self.substate):
541 for s in self.substate):
542 return
542 return
543 match.bad(fn, _('no such file in rev %s') % self)
543 match.bad(fn, _('no such file in rev %s') % self)
544
544
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 return self._manifest.walk(m)
546 return self._manifest.walk(m)
547
547
548 def matches(self, match):
548 def matches(self, match):
549 return self.walk(match)
549 return self.walk(match)
550
550
551 class basefilectx(object):
551 class basefilectx(object):
552 """A filecontext object represents the common logic for its children:
552 """A filecontext object represents the common logic for its children:
553 filectx: read-only access to a filerevision that is already present
553 filectx: read-only access to a filerevision that is already present
554 in the repo,
554 in the repo,
555 workingfilectx: a filecontext that represents files from the working
555 workingfilectx: a filecontext that represents files from the working
556 directory,
556 directory,
557 memfilectx: a filecontext that represents files in-memory,
557 memfilectx: a filecontext that represents files in-memory,
558 """
558 """
559 @propertycache
559 @propertycache
560 def _filelog(self):
560 def _filelog(self):
561 return self._repo.file(self._path)
561 return self._repo.file(self._path)
562
562
563 @propertycache
563 @propertycache
564 def _changeid(self):
564 def _changeid(self):
565 if r'_changectx' in self.__dict__:
565 if r'_changectx' in self.__dict__:
566 return self._changectx.rev()
566 return self._changectx.rev()
567 elif r'_descendantrev' in self.__dict__:
567 elif r'_descendantrev' in self.__dict__:
568 # this file context was created from a revision with a known
568 # this file context was created from a revision with a known
569 # descendant, we can (lazily) correct for linkrev aliases
569 # descendant, we can (lazily) correct for linkrev aliases
570 return self._adjustlinkrev(self._descendantrev)
570 return self._adjustlinkrev(self._descendantrev)
571 else:
571 else:
572 return self._filelog.linkrev(self._filerev)
572 return self._filelog.linkrev(self._filerev)
573
573
574 @propertycache
574 @propertycache
575 def _filenode(self):
575 def _filenode(self):
576 if r'_fileid' in self.__dict__:
576 if r'_fileid' in self.__dict__:
577 return self._filelog.lookup(self._fileid)
577 return self._filelog.lookup(self._fileid)
578 else:
578 else:
579 return self._changectx.filenode(self._path)
579 return self._changectx.filenode(self._path)
580
580
581 @propertycache
581 @propertycache
582 def _filerev(self):
582 def _filerev(self):
583 return self._filelog.rev(self._filenode)
583 return self._filelog.rev(self._filenode)
584
584
585 @propertycache
585 @propertycache
586 def _repopath(self):
586 def _repopath(self):
587 return self._path
587 return self._path
588
588
589 def __nonzero__(self):
589 def __nonzero__(self):
590 try:
590 try:
591 self._filenode
591 self._filenode
592 return True
592 return True
593 except error.LookupError:
593 except error.LookupError:
594 # file is missing
594 # file is missing
595 return False
595 return False
596
596
597 __bool__ = __nonzero__
597 __bool__ = __nonzero__
598
598
599 def __bytes__(self):
599 def __bytes__(self):
600 try:
600 try:
601 return "%s@%s" % (self.path(), self._changectx)
601 return "%s@%s" % (self.path(), self._changectx)
602 except error.LookupError:
602 except error.LookupError:
603 return "%s@???" % self.path()
603 return "%s@???" % self.path()
604
604
605 __str__ = encoding.strmethod(__bytes__)
605 __str__ = encoding.strmethod(__bytes__)
606
606
607 def __repr__(self):
607 def __repr__(self):
608 return r"<%s %s>" % (type(self).__name__, str(self))
608 return r"<%s %s>" % (type(self).__name__, str(self))
609
609
610 def __hash__(self):
610 def __hash__(self):
611 try:
611 try:
612 return hash((self._path, self._filenode))
612 return hash((self._path, self._filenode))
613 except AttributeError:
613 except AttributeError:
614 return id(self)
614 return id(self)
615
615
616 def __eq__(self, other):
616 def __eq__(self, other):
617 try:
617 try:
618 return (type(self) == type(other) and self._path == other._path
618 return (type(self) == type(other) and self._path == other._path
619 and self._filenode == other._filenode)
619 and self._filenode == other._filenode)
620 except AttributeError:
620 except AttributeError:
621 return False
621 return False
622
622
623 def __ne__(self, other):
623 def __ne__(self, other):
624 return not (self == other)
624 return not (self == other)
625
625
626 def filerev(self):
626 def filerev(self):
627 return self._filerev
627 return self._filerev
628 def filenode(self):
628 def filenode(self):
629 return self._filenode
629 return self._filenode
630 @propertycache
630 @propertycache
631 def _flags(self):
631 def _flags(self):
632 return self._changectx.flags(self._path)
632 return self._changectx.flags(self._path)
633 def flags(self):
633 def flags(self):
634 return self._flags
634 return self._flags
635 def filelog(self):
635 def filelog(self):
636 return self._filelog
636 return self._filelog
637 def rev(self):
637 def rev(self):
638 return self._changeid
638 return self._changeid
639 def linkrev(self):
639 def linkrev(self):
640 return self._filelog.linkrev(self._filerev)
640 return self._filelog.linkrev(self._filerev)
641 def node(self):
641 def node(self):
642 return self._changectx.node()
642 return self._changectx.node()
643 def hex(self):
643 def hex(self):
644 return self._changectx.hex()
644 return self._changectx.hex()
645 def user(self):
645 def user(self):
646 return self._changectx.user()
646 return self._changectx.user()
647 def date(self):
647 def date(self):
648 return self._changectx.date()
648 return self._changectx.date()
649 def files(self):
649 def files(self):
650 return self._changectx.files()
650 return self._changectx.files()
651 def description(self):
651 def description(self):
652 return self._changectx.description()
652 return self._changectx.description()
653 def branch(self):
653 def branch(self):
654 return self._changectx.branch()
654 return self._changectx.branch()
655 def extra(self):
655 def extra(self):
656 return self._changectx.extra()
656 return self._changectx.extra()
657 def phase(self):
657 def phase(self):
658 return self._changectx.phase()
658 return self._changectx.phase()
659 def phasestr(self):
659 def phasestr(self):
660 return self._changectx.phasestr()
660 return self._changectx.phasestr()
661 def obsolete(self):
661 def obsolete(self):
662 return self._changectx.obsolete()
662 return self._changectx.obsolete()
663 def instabilities(self):
663 def instabilities(self):
664 return self._changectx.instabilities()
664 return self._changectx.instabilities()
665 def manifest(self):
665 def manifest(self):
666 return self._changectx.manifest()
666 return self._changectx.manifest()
667 def changectx(self):
667 def changectx(self):
668 return self._changectx
668 return self._changectx
669 def renamed(self):
669 def renamed(self):
670 return self._copied
670 return self._copied
671 def repo(self):
671 def repo(self):
672 return self._repo
672 return self._repo
673 def size(self):
673 def size(self):
674 return len(self.data())
674 return len(self.data())
675
675
676 def path(self):
676 def path(self):
677 return self._path
677 return self._path
678
678
679 def isbinary(self):
679 def isbinary(self):
680 try:
680 try:
681 return stringutil.binary(self.data())
681 return stringutil.binary(self.data())
682 except IOError:
682 except IOError:
683 return False
683 return False
684 def isexec(self):
684 def isexec(self):
685 return 'x' in self.flags()
685 return 'x' in self.flags()
686 def islink(self):
686 def islink(self):
687 return 'l' in self.flags()
687 return 'l' in self.flags()
688
688
689 def isabsent(self):
689 def isabsent(self):
690 """whether this filectx represents a file not in self._changectx
690 """whether this filectx represents a file not in self._changectx
691
691
692 This is mainly for merge code to detect change/delete conflicts. This is
692 This is mainly for merge code to detect change/delete conflicts. This is
693 expected to be True for all subclasses of basectx."""
693 expected to be True for all subclasses of basectx."""
694 return False
694 return False
695
695
696 _customcmp = False
696 _customcmp = False
697 def cmp(self, fctx):
697 def cmp(self, fctx):
698 """compare with other file context
698 """compare with other file context
699
699
700 returns True if different than fctx.
700 returns True if different than fctx.
701 """
701 """
702 if fctx._customcmp:
702 if fctx._customcmp:
703 return fctx.cmp(self)
703 return fctx.cmp(self)
704
704
705 if (fctx._filenode is None
705 if (fctx._filenode is None
706 and (self._repo._encodefilterpats
706 and (self._repo._encodefilterpats
707 # if file data starts with '\1\n', empty metadata block is
707 # if file data starts with '\1\n', empty metadata block is
708 # prepended, which adds 4 bytes to filelog.size().
708 # prepended, which adds 4 bytes to filelog.size().
709 or self.size() - 4 == fctx.size())
709 or self.size() - 4 == fctx.size())
710 or self.size() == fctx.size()):
710 or self.size() == fctx.size()):
711 return self._filelog.cmp(self._filenode, fctx.data())
711 return self._filelog.cmp(self._filenode, fctx.data())
712
712
713 return True
713 return True
714
714
715 def _adjustlinkrev(self, srcrev, inclusive=False):
715 def _adjustlinkrev(self, srcrev, inclusive=False):
716 """return the first ancestor of <srcrev> introducing <fnode>
716 """return the first ancestor of <srcrev> introducing <fnode>
717
717
718 If the linkrev of the file revision does not point to an ancestor of
718 If the linkrev of the file revision does not point to an ancestor of
719 srcrev, we'll walk down the ancestors until we find one introducing
719 srcrev, we'll walk down the ancestors until we find one introducing
720 this file revision.
720 this file revision.
721
721
722 :srcrev: the changeset revision we search ancestors from
722 :srcrev: the changeset revision we search ancestors from
723 :inclusive: if true, the src revision will also be checked
723 :inclusive: if true, the src revision will also be checked
724 """
724 """
725 repo = self._repo
725 repo = self._repo
726 cl = repo.unfiltered().changelog
726 cl = repo.unfiltered().changelog
727 mfl = repo.manifestlog
727 mfl = repo.manifestlog
728 # fetch the linkrev
728 # fetch the linkrev
729 lkr = self.linkrev()
729 lkr = self.linkrev()
730 if srcrev == lkr:
730 if srcrev == lkr:
731 return lkr
731 return lkr
732 # hack to reuse ancestor computation when searching for renames
732 # hack to reuse ancestor computation when searching for renames
733 memberanc = getattr(self, '_ancestrycontext', None)
733 memberanc = getattr(self, '_ancestrycontext', None)
734 iteranc = None
734 iteranc = None
735 if srcrev is None:
735 if srcrev is None:
736 # wctx case, used by workingfilectx during mergecopy
736 # wctx case, used by workingfilectx during mergecopy
737 revs = [p.rev() for p in self._repo[None].parents()]
737 revs = [p.rev() for p in self._repo[None].parents()]
738 inclusive = True # we skipped the real (revless) source
738 inclusive = True # we skipped the real (revless) source
739 else:
739 else:
740 revs = [srcrev]
740 revs = [srcrev]
741 if memberanc is None:
741 if memberanc is None:
742 memberanc = iteranc = cl.ancestors(revs, lkr,
742 memberanc = iteranc = cl.ancestors(revs, lkr,
743 inclusive=inclusive)
743 inclusive=inclusive)
744 # check if this linkrev is an ancestor of srcrev
744 # check if this linkrev is an ancestor of srcrev
745 if lkr not in memberanc:
745 if lkr not in memberanc:
746 if iteranc is None:
746 if iteranc is None:
747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
747 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
748 fnode = self._filenode
748 fnode = self._filenode
749 path = self._path
749 path = self._path
750 for a in iteranc:
750 for a in iteranc:
751 ac = cl.read(a) # get changeset data (we avoid object creation)
751 ac = cl.read(a) # get changeset data (we avoid object creation)
752 if path in ac[3]: # checking the 'files' field.
752 if path in ac[3]: # checking the 'files' field.
753 # The file has been touched, check if the content is
753 # The file has been touched, check if the content is
754 # similar to the one we search for.
754 # similar to the one we search for.
755 if fnode == mfl[ac[0]].readfast().get(path):
755 if fnode == mfl[ac[0]].readfast().get(path):
756 return a
756 return a
757 # In theory, we should never get out of that loop without a result.
757 # In theory, we should never get out of that loop without a result.
758 # But if manifest uses a buggy file revision (not children of the
758 # But if manifest uses a buggy file revision (not children of the
759 # one it replaces) we could. Such a buggy situation will likely
759 # one it replaces) we could. Such a buggy situation will likely
760 # result is crash somewhere else at to some point.
760 # result is crash somewhere else at to some point.
761 return lkr
761 return lkr
762
762
763 def introrev(self):
763 def introrev(self):
764 """return the rev of the changeset which introduced this file revision
764 """return the rev of the changeset which introduced this file revision
765
765
766 This method is different from linkrev because it take into account the
766 This method is different from linkrev because it take into account the
767 changeset the filectx was created from. It ensures the returned
767 changeset the filectx was created from. It ensures the returned
768 revision is one of its ancestors. This prevents bugs from
768 revision is one of its ancestors. This prevents bugs from
769 'linkrev-shadowing' when a file revision is used by multiple
769 'linkrev-shadowing' when a file revision is used by multiple
770 changesets.
770 changesets.
771 """
771 """
772 toprev = None
772 attrs = vars(self)
773 attrs = vars(self)
773 hastoprev = (r'_changeid' in attrs or r'_changectx' in attrs)
774 if r'_changeid' in attrs:
774 if hastoprev:
775 # We have a cached value already
775 return self._adjustlinkrev(self.rev(), inclusive=True)
776 toprev = self._changeid
777 elif r'_changectx' in attrs:
778 # We know which changelog entry we are coming from
779 toprev = self._changectx.rev()
780
781 if toprev is not None:
782 return self._adjustlinkrev(toprev, inclusive=True)
776 else:
783 else:
777 return self.linkrev()
784 return self.linkrev()
778
785
779 def introfilectx(self):
786 def introfilectx(self):
780 """Return filectx having identical contents, but pointing to the
787 """Return filectx having identical contents, but pointing to the
781 changeset revision where this filectx was introduced"""
788 changeset revision where this filectx was introduced"""
782 introrev = self.introrev()
789 introrev = self.introrev()
783 if self.rev() == introrev:
790 if self.rev() == introrev:
784 return self
791 return self
785 return self.filectx(self.filenode(), changeid=introrev)
792 return self.filectx(self.filenode(), changeid=introrev)
786
793
787 def _parentfilectx(self, path, fileid, filelog):
794 def _parentfilectx(self, path, fileid, filelog):
788 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
795 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
789 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
796 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
790 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
797 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
791 # If self is associated with a changeset (probably explicitly
798 # If self is associated with a changeset (probably explicitly
792 # fed), ensure the created filectx is associated with a
799 # fed), ensure the created filectx is associated with a
793 # changeset that is an ancestor of self.changectx.
800 # changeset that is an ancestor of self.changectx.
794 # This lets us later use _adjustlinkrev to get a correct link.
801 # This lets us later use _adjustlinkrev to get a correct link.
795 fctx._descendantrev = self.rev()
802 fctx._descendantrev = self.rev()
796 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
803 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
797 elif r'_descendantrev' in vars(self):
804 elif r'_descendantrev' in vars(self):
798 # Otherwise propagate _descendantrev if we have one associated.
805 # Otherwise propagate _descendantrev if we have one associated.
799 fctx._descendantrev = self._descendantrev
806 fctx._descendantrev = self._descendantrev
800 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
807 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
801 return fctx
808 return fctx
802
809
803 def parents(self):
810 def parents(self):
804 _path = self._path
811 _path = self._path
805 fl = self._filelog
812 fl = self._filelog
806 parents = self._filelog.parents(self._filenode)
813 parents = self._filelog.parents(self._filenode)
807 pl = [(_path, node, fl) for node in parents if node != nullid]
814 pl = [(_path, node, fl) for node in parents if node != nullid]
808
815
809 r = fl.renamed(self._filenode)
816 r = fl.renamed(self._filenode)
810 if r:
817 if r:
811 # - In the simple rename case, both parent are nullid, pl is empty.
818 # - In the simple rename case, both parent are nullid, pl is empty.
812 # - In case of merge, only one of the parent is null id and should
819 # - In case of merge, only one of the parent is null id and should
813 # be replaced with the rename information. This parent is -always-
820 # be replaced with the rename information. This parent is -always-
814 # the first one.
821 # the first one.
815 #
822 #
816 # As null id have always been filtered out in the previous list
823 # As null id have always been filtered out in the previous list
817 # comprehension, inserting to 0 will always result in "replacing
824 # comprehension, inserting to 0 will always result in "replacing
818 # first nullid parent with rename information.
825 # first nullid parent with rename information.
819 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
826 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
820
827
821 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
828 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
822
829
823 def p1(self):
830 def p1(self):
824 return self.parents()[0]
831 return self.parents()[0]
825
832
826 def p2(self):
833 def p2(self):
827 p = self.parents()
834 p = self.parents()
828 if len(p) == 2:
835 if len(p) == 2:
829 return p[1]
836 return p[1]
830 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
837 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
831
838
832 def annotate(self, follow=False, skiprevs=None, diffopts=None):
839 def annotate(self, follow=False, skiprevs=None, diffopts=None):
833 """Returns a list of annotateline objects for each line in the file
840 """Returns a list of annotateline objects for each line in the file
834
841
835 - line.fctx is the filectx of the node where that line was last changed
842 - line.fctx is the filectx of the node where that line was last changed
836 - line.lineno is the line number at the first appearance in the managed
843 - line.lineno is the line number at the first appearance in the managed
837 file
844 file
838 - line.text is the data on that line (including newline character)
845 - line.text is the data on that line (including newline character)
839 """
846 """
840 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
847 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
841
848
842 def parents(f):
849 def parents(f):
843 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
850 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
844 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
851 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
845 # from the topmost introrev (= srcrev) down to p.linkrev() if it
852 # from the topmost introrev (= srcrev) down to p.linkrev() if it
846 # isn't an ancestor of the srcrev.
853 # isn't an ancestor of the srcrev.
847 f._changeid
854 f._changeid
848 pl = f.parents()
855 pl = f.parents()
849
856
850 # Don't return renamed parents if we aren't following.
857 # Don't return renamed parents if we aren't following.
851 if not follow:
858 if not follow:
852 pl = [p for p in pl if p.path() == f.path()]
859 pl = [p for p in pl if p.path() == f.path()]
853
860
854 # renamed filectx won't have a filelog yet, so set it
861 # renamed filectx won't have a filelog yet, so set it
855 # from the cache to save time
862 # from the cache to save time
856 for p in pl:
863 for p in pl:
857 if not r'_filelog' in p.__dict__:
864 if not r'_filelog' in p.__dict__:
858 p._filelog = getlog(p.path())
865 p._filelog = getlog(p.path())
859
866
860 return pl
867 return pl
861
868
862 # use linkrev to find the first changeset where self appeared
869 # use linkrev to find the first changeset where self appeared
863 base = self.introfilectx()
870 base = self.introfilectx()
864 if getattr(base, '_ancestrycontext', None) is None:
871 if getattr(base, '_ancestrycontext', None) is None:
865 cl = self._repo.changelog
872 cl = self._repo.changelog
866 if base.rev() is None:
873 if base.rev() is None:
867 # wctx is not inclusive, but works because _ancestrycontext
874 # wctx is not inclusive, but works because _ancestrycontext
868 # is used to test filelog revisions
875 # is used to test filelog revisions
869 ac = cl.ancestors([p.rev() for p in base.parents()],
876 ac = cl.ancestors([p.rev() for p in base.parents()],
870 inclusive=True)
877 inclusive=True)
871 else:
878 else:
872 ac = cl.ancestors([base.rev()], inclusive=True)
879 ac = cl.ancestors([base.rev()], inclusive=True)
873 base._ancestrycontext = ac
880 base._ancestrycontext = ac
874
881
875 return dagop.annotate(base, parents, skiprevs=skiprevs,
882 return dagop.annotate(base, parents, skiprevs=skiprevs,
876 diffopts=diffopts)
883 diffopts=diffopts)
877
884
878 def ancestors(self, followfirst=False):
885 def ancestors(self, followfirst=False):
879 visit = {}
886 visit = {}
880 c = self
887 c = self
881 if followfirst:
888 if followfirst:
882 cut = 1
889 cut = 1
883 else:
890 else:
884 cut = None
891 cut = None
885
892
886 while True:
893 while True:
887 for parent in c.parents()[:cut]:
894 for parent in c.parents()[:cut]:
888 visit[(parent.linkrev(), parent.filenode())] = parent
895 visit[(parent.linkrev(), parent.filenode())] = parent
889 if not visit:
896 if not visit:
890 break
897 break
891 c = visit.pop(max(visit))
898 c = visit.pop(max(visit))
892 yield c
899 yield c
893
900
894 def decodeddata(self):
901 def decodeddata(self):
895 """Returns `data()` after running repository decoding filters.
902 """Returns `data()` after running repository decoding filters.
896
903
897 This is often equivalent to how the data would be expressed on disk.
904 This is often equivalent to how the data would be expressed on disk.
898 """
905 """
899 return self._repo.wwritedata(self.path(), self.data())
906 return self._repo.wwritedata(self.path(), self.data())
900
907
901 class filectx(basefilectx):
908 class filectx(basefilectx):
902 """A filecontext object makes access to data related to a particular
909 """A filecontext object makes access to data related to a particular
903 filerevision convenient."""
910 filerevision convenient."""
904 def __init__(self, repo, path, changeid=None, fileid=None,
911 def __init__(self, repo, path, changeid=None, fileid=None,
905 filelog=None, changectx=None):
912 filelog=None, changectx=None):
906 """changeid must be a revision number, if specified.
913 """changeid must be a revision number, if specified.
907 fileid can be a file revision or node."""
914 fileid can be a file revision or node."""
908 self._repo = repo
915 self._repo = repo
909 self._path = path
916 self._path = path
910
917
911 assert (changeid is not None
918 assert (changeid is not None
912 or fileid is not None
919 or fileid is not None
913 or changectx is not None), \
920 or changectx is not None), \
914 ("bad args: changeid=%r, fileid=%r, changectx=%r"
921 ("bad args: changeid=%r, fileid=%r, changectx=%r"
915 % (changeid, fileid, changectx))
922 % (changeid, fileid, changectx))
916
923
917 if filelog is not None:
924 if filelog is not None:
918 self._filelog = filelog
925 self._filelog = filelog
919
926
920 if changeid is not None:
927 if changeid is not None:
921 self._changeid = changeid
928 self._changeid = changeid
922 if changectx is not None:
929 if changectx is not None:
923 self._changectx = changectx
930 self._changectx = changectx
924 if fileid is not None:
931 if fileid is not None:
925 self._fileid = fileid
932 self._fileid = fileid
926
933
927 @propertycache
934 @propertycache
928 def _changectx(self):
935 def _changectx(self):
929 try:
936 try:
930 return self._repo[self._changeid]
937 return self._repo[self._changeid]
931 except error.FilteredRepoLookupError:
938 except error.FilteredRepoLookupError:
932 # Linkrev may point to any revision in the repository. When the
939 # Linkrev may point to any revision in the repository. When the
933 # repository is filtered this may lead to `filectx` trying to build
940 # repository is filtered this may lead to `filectx` trying to build
934 # `changectx` for filtered revision. In such case we fallback to
941 # `changectx` for filtered revision. In such case we fallback to
935 # creating `changectx` on the unfiltered version of the reposition.
942 # creating `changectx` on the unfiltered version of the reposition.
936 # This fallback should not be an issue because `changectx` from
943 # This fallback should not be an issue because `changectx` from
937 # `filectx` are not used in complex operations that care about
944 # `filectx` are not used in complex operations that care about
938 # filtering.
945 # filtering.
939 #
946 #
940 # This fallback is a cheap and dirty fix that prevent several
947 # This fallback is a cheap and dirty fix that prevent several
941 # crashes. It does not ensure the behavior is correct. However the
948 # crashes. It does not ensure the behavior is correct. However the
942 # behavior was not correct before filtering either and "incorrect
949 # behavior was not correct before filtering either and "incorrect
943 # behavior" is seen as better as "crash"
950 # behavior" is seen as better as "crash"
944 #
951 #
945 # Linkrevs have several serious troubles with filtering that are
952 # Linkrevs have several serious troubles with filtering that are
946 # complicated to solve. Proper handling of the issue here should be
953 # complicated to solve. Proper handling of the issue here should be
947 # considered when solving linkrev issue are on the table.
954 # considered when solving linkrev issue are on the table.
948 return self._repo.unfiltered()[self._changeid]
955 return self._repo.unfiltered()[self._changeid]
949
956
950 def filectx(self, fileid, changeid=None):
957 def filectx(self, fileid, changeid=None):
951 '''opens an arbitrary revision of the file without
958 '''opens an arbitrary revision of the file without
952 opening a new filelog'''
959 opening a new filelog'''
953 return filectx(self._repo, self._path, fileid=fileid,
960 return filectx(self._repo, self._path, fileid=fileid,
954 filelog=self._filelog, changeid=changeid)
961 filelog=self._filelog, changeid=changeid)
955
962
956 def rawdata(self):
963 def rawdata(self):
957 return self._filelog.revision(self._filenode, raw=True)
964 return self._filelog.revision(self._filenode, raw=True)
958
965
959 def rawflags(self):
966 def rawflags(self):
960 """low-level revlog flags"""
967 """low-level revlog flags"""
961 return self._filelog.flags(self._filerev)
968 return self._filelog.flags(self._filerev)
962
969
963 def data(self):
970 def data(self):
964 try:
971 try:
965 return self._filelog.read(self._filenode)
972 return self._filelog.read(self._filenode)
966 except error.CensoredNodeError:
973 except error.CensoredNodeError:
967 if self._repo.ui.config("censor", "policy") == "ignore":
974 if self._repo.ui.config("censor", "policy") == "ignore":
968 return ""
975 return ""
969 raise error.Abort(_("censored node: %s") % short(self._filenode),
976 raise error.Abort(_("censored node: %s") % short(self._filenode),
970 hint=_("set censor.policy to ignore errors"))
977 hint=_("set censor.policy to ignore errors"))
971
978
972 def size(self):
979 def size(self):
973 return self._filelog.size(self._filerev)
980 return self._filelog.size(self._filerev)
974
981
975 @propertycache
982 @propertycache
976 def _copied(self):
983 def _copied(self):
977 """check if file was actually renamed in this changeset revision
984 """check if file was actually renamed in this changeset revision
978
985
979 If rename logged in file revision, we report copy for changeset only
986 If rename logged in file revision, we report copy for changeset only
980 if file revisions linkrev points back to the changeset in question
987 if file revisions linkrev points back to the changeset in question
981 or both changeset parents contain different file revisions.
988 or both changeset parents contain different file revisions.
982 """
989 """
983
990
984 renamed = self._filelog.renamed(self._filenode)
991 renamed = self._filelog.renamed(self._filenode)
985 if not renamed:
992 if not renamed:
986 return None
993 return None
987
994
988 if self.rev() == self.linkrev():
995 if self.rev() == self.linkrev():
989 return renamed
996 return renamed
990
997
991 name = self.path()
998 name = self.path()
992 fnode = self._filenode
999 fnode = self._filenode
993 for p in self._changectx.parents():
1000 for p in self._changectx.parents():
994 try:
1001 try:
995 if fnode == p.filenode(name):
1002 if fnode == p.filenode(name):
996 return None
1003 return None
997 except error.LookupError:
1004 except error.LookupError:
998 pass
1005 pass
999 return renamed
1006 return renamed
1000
1007
1001 def children(self):
1008 def children(self):
1002 # hard for renames
1009 # hard for renames
1003 c = self._filelog.children(self._filenode)
1010 c = self._filelog.children(self._filenode)
1004 return [filectx(self._repo, self._path, fileid=x,
1011 return [filectx(self._repo, self._path, fileid=x,
1005 filelog=self._filelog) for x in c]
1012 filelog=self._filelog) for x in c]
1006
1013
1007 class committablectx(basectx):
1014 class committablectx(basectx):
1008 """A committablectx object provides common functionality for a context that
1015 """A committablectx object provides common functionality for a context that
1009 wants the ability to commit, e.g. workingctx or memctx."""
1016 wants the ability to commit, e.g. workingctx or memctx."""
1010 def __init__(self, repo, text="", user=None, date=None, extra=None,
1017 def __init__(self, repo, text="", user=None, date=None, extra=None,
1011 changes=None):
1018 changes=None):
1012 super(committablectx, self).__init__(repo)
1019 super(committablectx, self).__init__(repo)
1013 self._rev = None
1020 self._rev = None
1014 self._node = None
1021 self._node = None
1015 self._text = text
1022 self._text = text
1016 if date:
1023 if date:
1017 self._date = dateutil.parsedate(date)
1024 self._date = dateutil.parsedate(date)
1018 if user:
1025 if user:
1019 self._user = user
1026 self._user = user
1020 if changes:
1027 if changes:
1021 self._status = changes
1028 self._status = changes
1022
1029
1023 self._extra = {}
1030 self._extra = {}
1024 if extra:
1031 if extra:
1025 self._extra = extra.copy()
1032 self._extra = extra.copy()
1026 if 'branch' not in self._extra:
1033 if 'branch' not in self._extra:
1027 try:
1034 try:
1028 branch = encoding.fromlocal(self._repo.dirstate.branch())
1035 branch = encoding.fromlocal(self._repo.dirstate.branch())
1029 except UnicodeDecodeError:
1036 except UnicodeDecodeError:
1030 raise error.Abort(_('branch name not in UTF-8!'))
1037 raise error.Abort(_('branch name not in UTF-8!'))
1031 self._extra['branch'] = branch
1038 self._extra['branch'] = branch
1032 if self._extra['branch'] == '':
1039 if self._extra['branch'] == '':
1033 self._extra['branch'] = 'default'
1040 self._extra['branch'] = 'default'
1034
1041
1035 def __bytes__(self):
1042 def __bytes__(self):
1036 return bytes(self._parents[0]) + "+"
1043 return bytes(self._parents[0]) + "+"
1037
1044
1038 __str__ = encoding.strmethod(__bytes__)
1045 __str__ = encoding.strmethod(__bytes__)
1039
1046
1040 def __nonzero__(self):
1047 def __nonzero__(self):
1041 return True
1048 return True
1042
1049
1043 __bool__ = __nonzero__
1050 __bool__ = __nonzero__
1044
1051
1045 def _buildflagfunc(self):
1052 def _buildflagfunc(self):
1046 # Create a fallback function for getting file flags when the
1053 # Create a fallback function for getting file flags when the
1047 # filesystem doesn't support them
1054 # filesystem doesn't support them
1048
1055
1049 copiesget = self._repo.dirstate.copies().get
1056 copiesget = self._repo.dirstate.copies().get
1050 parents = self.parents()
1057 parents = self.parents()
1051 if len(parents) < 2:
1058 if len(parents) < 2:
1052 # when we have one parent, it's easy: copy from parent
1059 # when we have one parent, it's easy: copy from parent
1053 man = parents[0].manifest()
1060 man = parents[0].manifest()
1054 def func(f):
1061 def func(f):
1055 f = copiesget(f, f)
1062 f = copiesget(f, f)
1056 return man.flags(f)
1063 return man.flags(f)
1057 else:
1064 else:
1058 # merges are tricky: we try to reconstruct the unstored
1065 # merges are tricky: we try to reconstruct the unstored
1059 # result from the merge (issue1802)
1066 # result from the merge (issue1802)
1060 p1, p2 = parents
1067 p1, p2 = parents
1061 pa = p1.ancestor(p2)
1068 pa = p1.ancestor(p2)
1062 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1069 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1063
1070
1064 def func(f):
1071 def func(f):
1065 f = copiesget(f, f) # may be wrong for merges with copies
1072 f = copiesget(f, f) # may be wrong for merges with copies
1066 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1073 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1067 if fl1 == fl2:
1074 if fl1 == fl2:
1068 return fl1
1075 return fl1
1069 if fl1 == fla:
1076 if fl1 == fla:
1070 return fl2
1077 return fl2
1071 if fl2 == fla:
1078 if fl2 == fla:
1072 return fl1
1079 return fl1
1073 return '' # punt for conflicts
1080 return '' # punt for conflicts
1074
1081
1075 return func
1082 return func
1076
1083
1077 @propertycache
1084 @propertycache
1078 def _flagfunc(self):
1085 def _flagfunc(self):
1079 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1086 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1080
1087
1081 @propertycache
1088 @propertycache
1082 def _status(self):
1089 def _status(self):
1083 return self._repo.status()
1090 return self._repo.status()
1084
1091
1085 @propertycache
1092 @propertycache
1086 def _user(self):
1093 def _user(self):
1087 return self._repo.ui.username()
1094 return self._repo.ui.username()
1088
1095
1089 @propertycache
1096 @propertycache
1090 def _date(self):
1097 def _date(self):
1091 ui = self._repo.ui
1098 ui = self._repo.ui
1092 date = ui.configdate('devel', 'default-date')
1099 date = ui.configdate('devel', 'default-date')
1093 if date is None:
1100 if date is None:
1094 date = dateutil.makedate()
1101 date = dateutil.makedate()
1095 return date
1102 return date
1096
1103
1097 def subrev(self, subpath):
1104 def subrev(self, subpath):
1098 return None
1105 return None
1099
1106
1100 def manifestnode(self):
1107 def manifestnode(self):
1101 return None
1108 return None
1102 def user(self):
1109 def user(self):
1103 return self._user or self._repo.ui.username()
1110 return self._user or self._repo.ui.username()
1104 def date(self):
1111 def date(self):
1105 return self._date
1112 return self._date
1106 def description(self):
1113 def description(self):
1107 return self._text
1114 return self._text
1108 def files(self):
1115 def files(self):
1109 return sorted(self._status.modified + self._status.added +
1116 return sorted(self._status.modified + self._status.added +
1110 self._status.removed)
1117 self._status.removed)
1111
1118
1112 def modified(self):
1119 def modified(self):
1113 return self._status.modified
1120 return self._status.modified
1114 def added(self):
1121 def added(self):
1115 return self._status.added
1122 return self._status.added
1116 def removed(self):
1123 def removed(self):
1117 return self._status.removed
1124 return self._status.removed
1118 def deleted(self):
1125 def deleted(self):
1119 return self._status.deleted
1126 return self._status.deleted
1120 def branch(self):
1127 def branch(self):
1121 return encoding.tolocal(self._extra['branch'])
1128 return encoding.tolocal(self._extra['branch'])
1122 def closesbranch(self):
1129 def closesbranch(self):
1123 return 'close' in self._extra
1130 return 'close' in self._extra
1124 def extra(self):
1131 def extra(self):
1125 return self._extra
1132 return self._extra
1126
1133
1127 def isinmemory(self):
1134 def isinmemory(self):
1128 return False
1135 return False
1129
1136
1130 def tags(self):
1137 def tags(self):
1131 return []
1138 return []
1132
1139
1133 def bookmarks(self):
1140 def bookmarks(self):
1134 b = []
1141 b = []
1135 for p in self.parents():
1142 for p in self.parents():
1136 b.extend(p.bookmarks())
1143 b.extend(p.bookmarks())
1137 return b
1144 return b
1138
1145
1139 def phase(self):
1146 def phase(self):
1140 phase = phases.draft # default phase to draft
1147 phase = phases.draft # default phase to draft
1141 for p in self.parents():
1148 for p in self.parents():
1142 phase = max(phase, p.phase())
1149 phase = max(phase, p.phase())
1143 return phase
1150 return phase
1144
1151
1145 def hidden(self):
1152 def hidden(self):
1146 return False
1153 return False
1147
1154
1148 def children(self):
1155 def children(self):
1149 return []
1156 return []
1150
1157
1151 def flags(self, path):
1158 def flags(self, path):
1152 if r'_manifest' in self.__dict__:
1159 if r'_manifest' in self.__dict__:
1153 try:
1160 try:
1154 return self._manifest.flags(path)
1161 return self._manifest.flags(path)
1155 except KeyError:
1162 except KeyError:
1156 return ''
1163 return ''
1157
1164
1158 try:
1165 try:
1159 return self._flagfunc(path)
1166 return self._flagfunc(path)
1160 except OSError:
1167 except OSError:
1161 return ''
1168 return ''
1162
1169
1163 def ancestor(self, c2):
1170 def ancestor(self, c2):
1164 """return the "best" ancestor context of self and c2"""
1171 """return the "best" ancestor context of self and c2"""
1165 return self._parents[0].ancestor(c2) # punt on two parents for now
1172 return self._parents[0].ancestor(c2) # punt on two parents for now
1166
1173
1167 def walk(self, match):
1174 def walk(self, match):
1168 '''Generates matching file names.'''
1175 '''Generates matching file names.'''
1169 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1176 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1170 subrepos=sorted(self.substate),
1177 subrepos=sorted(self.substate),
1171 unknown=True, ignored=False))
1178 unknown=True, ignored=False))
1172
1179
1173 def matches(self, match):
1180 def matches(self, match):
1174 match = self._repo.narrowmatch(match)
1181 match = self._repo.narrowmatch(match)
1175 ds = self._repo.dirstate
1182 ds = self._repo.dirstate
1176 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1183 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1177
1184
1178 def ancestors(self):
1185 def ancestors(self):
1179 for p in self._parents:
1186 for p in self._parents:
1180 yield p
1187 yield p
1181 for a in self._repo.changelog.ancestors(
1188 for a in self._repo.changelog.ancestors(
1182 [p.rev() for p in self._parents]):
1189 [p.rev() for p in self._parents]):
1183 yield self._repo[a]
1190 yield self._repo[a]
1184
1191
1185 def markcommitted(self, node):
1192 def markcommitted(self, node):
1186 """Perform post-commit cleanup necessary after committing this ctx
1193 """Perform post-commit cleanup necessary after committing this ctx
1187
1194
1188 Specifically, this updates backing stores this working context
1195 Specifically, this updates backing stores this working context
1189 wraps to reflect the fact that the changes reflected by this
1196 wraps to reflect the fact that the changes reflected by this
1190 workingctx have been committed. For example, it marks
1197 workingctx have been committed. For example, it marks
1191 modified and added files as normal in the dirstate.
1198 modified and added files as normal in the dirstate.
1192
1199
1193 """
1200 """
1194
1201
1195 with self._repo.dirstate.parentchange():
1202 with self._repo.dirstate.parentchange():
1196 for f in self.modified() + self.added():
1203 for f in self.modified() + self.added():
1197 self._repo.dirstate.normal(f)
1204 self._repo.dirstate.normal(f)
1198 for f in self.removed():
1205 for f in self.removed():
1199 self._repo.dirstate.drop(f)
1206 self._repo.dirstate.drop(f)
1200 self._repo.dirstate.setparents(node)
1207 self._repo.dirstate.setparents(node)
1201
1208
1202 # write changes out explicitly, because nesting wlock at
1209 # write changes out explicitly, because nesting wlock at
1203 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1210 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1204 # from immediately doing so for subsequent changing files
1211 # from immediately doing so for subsequent changing files
1205 self._repo.dirstate.write(self._repo.currenttransaction())
1212 self._repo.dirstate.write(self._repo.currenttransaction())
1206
1213
1207 def dirty(self, missing=False, merge=True, branch=True):
1214 def dirty(self, missing=False, merge=True, branch=True):
1208 return False
1215 return False
1209
1216
1210 class workingctx(committablectx):
1217 class workingctx(committablectx):
1211 """A workingctx object makes access to data related to
1218 """A workingctx object makes access to data related to
1212 the current working directory convenient.
1219 the current working directory convenient.
1213 date - any valid date string or (unixtime, offset), or None.
1220 date - any valid date string or (unixtime, offset), or None.
1214 user - username string, or None.
1221 user - username string, or None.
1215 extra - a dictionary of extra values, or None.
1222 extra - a dictionary of extra values, or None.
1216 changes - a list of file lists as returned by localrepo.status()
1223 changes - a list of file lists as returned by localrepo.status()
1217 or None to use the repository status.
1224 or None to use the repository status.
1218 """
1225 """
1219 def __init__(self, repo, text="", user=None, date=None, extra=None,
1226 def __init__(self, repo, text="", user=None, date=None, extra=None,
1220 changes=None):
1227 changes=None):
1221 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1228 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1222
1229
1223 def __iter__(self):
1230 def __iter__(self):
1224 d = self._repo.dirstate
1231 d = self._repo.dirstate
1225 for f in d:
1232 for f in d:
1226 if d[f] != 'r':
1233 if d[f] != 'r':
1227 yield f
1234 yield f
1228
1235
1229 def __contains__(self, key):
1236 def __contains__(self, key):
1230 return self._repo.dirstate[key] not in "?r"
1237 return self._repo.dirstate[key] not in "?r"
1231
1238
1232 def hex(self):
1239 def hex(self):
1233 return hex(wdirid)
1240 return hex(wdirid)
1234
1241
1235 @propertycache
1242 @propertycache
1236 def _parents(self):
1243 def _parents(self):
1237 p = self._repo.dirstate.parents()
1244 p = self._repo.dirstate.parents()
1238 if p[1] == nullid:
1245 if p[1] == nullid:
1239 p = p[:-1]
1246 p = p[:-1]
1240 # use unfiltered repo to delay/avoid loading obsmarkers
1247 # use unfiltered repo to delay/avoid loading obsmarkers
1241 unfi = self._repo.unfiltered()
1248 unfi = self._repo.unfiltered()
1242 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1249 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1243
1250
1244 def _fileinfo(self, path):
1251 def _fileinfo(self, path):
1245 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1252 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1246 self._manifest
1253 self._manifest
1247 return super(workingctx, self)._fileinfo(path)
1254 return super(workingctx, self)._fileinfo(path)
1248
1255
1249 def filectx(self, path, filelog=None):
1256 def filectx(self, path, filelog=None):
1250 """get a file context from the working directory"""
1257 """get a file context from the working directory"""
1251 return workingfilectx(self._repo, path, workingctx=self,
1258 return workingfilectx(self._repo, path, workingctx=self,
1252 filelog=filelog)
1259 filelog=filelog)
1253
1260
1254 def dirty(self, missing=False, merge=True, branch=True):
1261 def dirty(self, missing=False, merge=True, branch=True):
1255 "check whether a working directory is modified"
1262 "check whether a working directory is modified"
1256 # check subrepos first
1263 # check subrepos first
1257 for s in sorted(self.substate):
1264 for s in sorted(self.substate):
1258 if self.sub(s).dirty(missing=missing):
1265 if self.sub(s).dirty(missing=missing):
1259 return True
1266 return True
1260 # check current working dir
1267 # check current working dir
1261 return ((merge and self.p2()) or
1268 return ((merge and self.p2()) or
1262 (branch and self.branch() != self.p1().branch()) or
1269 (branch and self.branch() != self.p1().branch()) or
1263 self.modified() or self.added() or self.removed() or
1270 self.modified() or self.added() or self.removed() or
1264 (missing and self.deleted()))
1271 (missing and self.deleted()))
1265
1272
1266 def add(self, list, prefix=""):
1273 def add(self, list, prefix=""):
1267 with self._repo.wlock():
1274 with self._repo.wlock():
1268 ui, ds = self._repo.ui, self._repo.dirstate
1275 ui, ds = self._repo.ui, self._repo.dirstate
1269 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1276 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1270 rejected = []
1277 rejected = []
1271 lstat = self._repo.wvfs.lstat
1278 lstat = self._repo.wvfs.lstat
1272 for f in list:
1279 for f in list:
1273 # ds.pathto() returns an absolute file when this is invoked from
1280 # ds.pathto() returns an absolute file when this is invoked from
1274 # the keyword extension. That gets flagged as non-portable on
1281 # the keyword extension. That gets flagged as non-portable on
1275 # Windows, since it contains the drive letter and colon.
1282 # Windows, since it contains the drive letter and colon.
1276 scmutil.checkportable(ui, os.path.join(prefix, f))
1283 scmutil.checkportable(ui, os.path.join(prefix, f))
1277 try:
1284 try:
1278 st = lstat(f)
1285 st = lstat(f)
1279 except OSError:
1286 except OSError:
1280 ui.warn(_("%s does not exist!\n") % uipath(f))
1287 ui.warn(_("%s does not exist!\n") % uipath(f))
1281 rejected.append(f)
1288 rejected.append(f)
1282 continue
1289 continue
1283 limit = ui.configbytes('ui', 'large-file-limit')
1290 limit = ui.configbytes('ui', 'large-file-limit')
1284 if limit != 0 and st.st_size > limit:
1291 if limit != 0 and st.st_size > limit:
1285 ui.warn(_("%s: up to %d MB of RAM may be required "
1292 ui.warn(_("%s: up to %d MB of RAM may be required "
1286 "to manage this file\n"
1293 "to manage this file\n"
1287 "(use 'hg revert %s' to cancel the "
1294 "(use 'hg revert %s' to cancel the "
1288 "pending addition)\n")
1295 "pending addition)\n")
1289 % (f, 3 * st.st_size // 1000000, uipath(f)))
1296 % (f, 3 * st.st_size // 1000000, uipath(f)))
1290 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1297 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1291 ui.warn(_("%s not added: only files and symlinks "
1298 ui.warn(_("%s not added: only files and symlinks "
1292 "supported currently\n") % uipath(f))
1299 "supported currently\n") % uipath(f))
1293 rejected.append(f)
1300 rejected.append(f)
1294 elif ds[f] in 'amn':
1301 elif ds[f] in 'amn':
1295 ui.warn(_("%s already tracked!\n") % uipath(f))
1302 ui.warn(_("%s already tracked!\n") % uipath(f))
1296 elif ds[f] == 'r':
1303 elif ds[f] == 'r':
1297 ds.normallookup(f)
1304 ds.normallookup(f)
1298 else:
1305 else:
1299 ds.add(f)
1306 ds.add(f)
1300 return rejected
1307 return rejected
1301
1308
1302 def forget(self, files, prefix=""):
1309 def forget(self, files, prefix=""):
1303 with self._repo.wlock():
1310 with self._repo.wlock():
1304 ds = self._repo.dirstate
1311 ds = self._repo.dirstate
1305 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1312 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1306 rejected = []
1313 rejected = []
1307 for f in files:
1314 for f in files:
1308 if f not in self._repo.dirstate:
1315 if f not in self._repo.dirstate:
1309 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1316 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1310 rejected.append(f)
1317 rejected.append(f)
1311 elif self._repo.dirstate[f] != 'a':
1318 elif self._repo.dirstate[f] != 'a':
1312 self._repo.dirstate.remove(f)
1319 self._repo.dirstate.remove(f)
1313 else:
1320 else:
1314 self._repo.dirstate.drop(f)
1321 self._repo.dirstate.drop(f)
1315 return rejected
1322 return rejected
1316
1323
1317 def undelete(self, list):
1324 def undelete(self, list):
1318 pctxs = self.parents()
1325 pctxs = self.parents()
1319 with self._repo.wlock():
1326 with self._repo.wlock():
1320 ds = self._repo.dirstate
1327 ds = self._repo.dirstate
1321 for f in list:
1328 for f in list:
1322 if self._repo.dirstate[f] != 'r':
1329 if self._repo.dirstate[f] != 'r':
1323 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1330 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1324 else:
1331 else:
1325 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1332 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1326 t = fctx.data()
1333 t = fctx.data()
1327 self._repo.wwrite(f, t, fctx.flags())
1334 self._repo.wwrite(f, t, fctx.flags())
1328 self._repo.dirstate.normal(f)
1335 self._repo.dirstate.normal(f)
1329
1336
1330 def copy(self, source, dest):
1337 def copy(self, source, dest):
1331 try:
1338 try:
1332 st = self._repo.wvfs.lstat(dest)
1339 st = self._repo.wvfs.lstat(dest)
1333 except OSError as err:
1340 except OSError as err:
1334 if err.errno != errno.ENOENT:
1341 if err.errno != errno.ENOENT:
1335 raise
1342 raise
1336 self._repo.ui.warn(_("%s does not exist!\n")
1343 self._repo.ui.warn(_("%s does not exist!\n")
1337 % self._repo.dirstate.pathto(dest))
1344 % self._repo.dirstate.pathto(dest))
1338 return
1345 return
1339 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1340 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1347 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1341 "symbolic link\n")
1348 "symbolic link\n")
1342 % self._repo.dirstate.pathto(dest))
1349 % self._repo.dirstate.pathto(dest))
1343 else:
1350 else:
1344 with self._repo.wlock():
1351 with self._repo.wlock():
1345 if self._repo.dirstate[dest] in '?':
1352 if self._repo.dirstate[dest] in '?':
1346 self._repo.dirstate.add(dest)
1353 self._repo.dirstate.add(dest)
1347 elif self._repo.dirstate[dest] in 'r':
1354 elif self._repo.dirstate[dest] in 'r':
1348 self._repo.dirstate.normallookup(dest)
1355 self._repo.dirstate.normallookup(dest)
1349 self._repo.dirstate.copy(source, dest)
1356 self._repo.dirstate.copy(source, dest)
1350
1357
1351 def match(self, pats=None, include=None, exclude=None, default='glob',
1358 def match(self, pats=None, include=None, exclude=None, default='glob',
1352 listsubrepos=False, badfn=None):
1359 listsubrepos=False, badfn=None):
1353 r = self._repo
1360 r = self._repo
1354
1361
1355 # Only a case insensitive filesystem needs magic to translate user input
1362 # Only a case insensitive filesystem needs magic to translate user input
1356 # to actual case in the filesystem.
1363 # to actual case in the filesystem.
1357 icasefs = not util.fscasesensitive(r.root)
1364 icasefs = not util.fscasesensitive(r.root)
1358 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1365 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1359 default, auditor=r.auditor, ctx=self,
1366 default, auditor=r.auditor, ctx=self,
1360 listsubrepos=listsubrepos, badfn=badfn,
1367 listsubrepos=listsubrepos, badfn=badfn,
1361 icasefs=icasefs)
1368 icasefs=icasefs)
1362
1369
1363 def _filtersuspectsymlink(self, files):
1370 def _filtersuspectsymlink(self, files):
1364 if not files or self._repo.dirstate._checklink:
1371 if not files or self._repo.dirstate._checklink:
1365 return files
1372 return files
1366
1373
1367 # Symlink placeholders may get non-symlink-like contents
1374 # Symlink placeholders may get non-symlink-like contents
1368 # via user error or dereferencing by NFS or Samba servers,
1375 # via user error or dereferencing by NFS or Samba servers,
1369 # so we filter out any placeholders that don't look like a
1376 # so we filter out any placeholders that don't look like a
1370 # symlink
1377 # symlink
1371 sane = []
1378 sane = []
1372 for f in files:
1379 for f in files:
1373 if self.flags(f) == 'l':
1380 if self.flags(f) == 'l':
1374 d = self[f].data()
1381 d = self[f].data()
1375 if (d == '' or len(d) >= 1024 or '\n' in d
1382 if (d == '' or len(d) >= 1024 or '\n' in d
1376 or stringutil.binary(d)):
1383 or stringutil.binary(d)):
1377 self._repo.ui.debug('ignoring suspect symlink placeholder'
1384 self._repo.ui.debug('ignoring suspect symlink placeholder'
1378 ' "%s"\n' % f)
1385 ' "%s"\n' % f)
1379 continue
1386 continue
1380 sane.append(f)
1387 sane.append(f)
1381 return sane
1388 return sane
1382
1389
1383 def _checklookup(self, files):
1390 def _checklookup(self, files):
1384 # check for any possibly clean files
1391 # check for any possibly clean files
1385 if not files:
1392 if not files:
1386 return [], [], []
1393 return [], [], []
1387
1394
1388 modified = []
1395 modified = []
1389 deleted = []
1396 deleted = []
1390 fixup = []
1397 fixup = []
1391 pctx = self._parents[0]
1398 pctx = self._parents[0]
1392 # do a full compare of any files that might have changed
1399 # do a full compare of any files that might have changed
1393 for f in sorted(files):
1400 for f in sorted(files):
1394 try:
1401 try:
1395 # This will return True for a file that got replaced by a
1402 # This will return True for a file that got replaced by a
1396 # directory in the interim, but fixing that is pretty hard.
1403 # directory in the interim, but fixing that is pretty hard.
1397 if (f not in pctx or self.flags(f) != pctx.flags(f)
1404 if (f not in pctx or self.flags(f) != pctx.flags(f)
1398 or pctx[f].cmp(self[f])):
1405 or pctx[f].cmp(self[f])):
1399 modified.append(f)
1406 modified.append(f)
1400 else:
1407 else:
1401 fixup.append(f)
1408 fixup.append(f)
1402 except (IOError, OSError):
1409 except (IOError, OSError):
1403 # A file become inaccessible in between? Mark it as deleted,
1410 # A file become inaccessible in between? Mark it as deleted,
1404 # matching dirstate behavior (issue5584).
1411 # matching dirstate behavior (issue5584).
1405 # The dirstate has more complex behavior around whether a
1412 # The dirstate has more complex behavior around whether a
1406 # missing file matches a directory, etc, but we don't need to
1413 # missing file matches a directory, etc, but we don't need to
1407 # bother with that: if f has made it to this point, we're sure
1414 # bother with that: if f has made it to this point, we're sure
1408 # it's in the dirstate.
1415 # it's in the dirstate.
1409 deleted.append(f)
1416 deleted.append(f)
1410
1417
1411 return modified, deleted, fixup
1418 return modified, deleted, fixup
1412
1419
1413 def _poststatusfixup(self, status, fixup):
1420 def _poststatusfixup(self, status, fixup):
1414 """update dirstate for files that are actually clean"""
1421 """update dirstate for files that are actually clean"""
1415 poststatus = self._repo.postdsstatus()
1422 poststatus = self._repo.postdsstatus()
1416 if fixup or poststatus:
1423 if fixup or poststatus:
1417 try:
1424 try:
1418 oldid = self._repo.dirstate.identity()
1425 oldid = self._repo.dirstate.identity()
1419
1426
1420 # updating the dirstate is optional
1427 # updating the dirstate is optional
1421 # so we don't wait on the lock
1428 # so we don't wait on the lock
1422 # wlock can invalidate the dirstate, so cache normal _after_
1429 # wlock can invalidate the dirstate, so cache normal _after_
1423 # taking the lock
1430 # taking the lock
1424 with self._repo.wlock(False):
1431 with self._repo.wlock(False):
1425 if self._repo.dirstate.identity() == oldid:
1432 if self._repo.dirstate.identity() == oldid:
1426 if fixup:
1433 if fixup:
1427 normal = self._repo.dirstate.normal
1434 normal = self._repo.dirstate.normal
1428 for f in fixup:
1435 for f in fixup:
1429 normal(f)
1436 normal(f)
1430 # write changes out explicitly, because nesting
1437 # write changes out explicitly, because nesting
1431 # wlock at runtime may prevent 'wlock.release()'
1438 # wlock at runtime may prevent 'wlock.release()'
1432 # after this block from doing so for subsequent
1439 # after this block from doing so for subsequent
1433 # changing files
1440 # changing files
1434 tr = self._repo.currenttransaction()
1441 tr = self._repo.currenttransaction()
1435 self._repo.dirstate.write(tr)
1442 self._repo.dirstate.write(tr)
1436
1443
1437 if poststatus:
1444 if poststatus:
1438 for ps in poststatus:
1445 for ps in poststatus:
1439 ps(self, status)
1446 ps(self, status)
1440 else:
1447 else:
1441 # in this case, writing changes out breaks
1448 # in this case, writing changes out breaks
1442 # consistency, because .hg/dirstate was
1449 # consistency, because .hg/dirstate was
1443 # already changed simultaneously after last
1450 # already changed simultaneously after last
1444 # caching (see also issue5584 for detail)
1451 # caching (see also issue5584 for detail)
1445 self._repo.ui.debug('skip updating dirstate: '
1452 self._repo.ui.debug('skip updating dirstate: '
1446 'identity mismatch\n')
1453 'identity mismatch\n')
1447 except error.LockError:
1454 except error.LockError:
1448 pass
1455 pass
1449 finally:
1456 finally:
1450 # Even if the wlock couldn't be grabbed, clear out the list.
1457 # Even if the wlock couldn't be grabbed, clear out the list.
1451 self._repo.clearpostdsstatus()
1458 self._repo.clearpostdsstatus()
1452
1459
1453 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1460 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1454 '''Gets the status from the dirstate -- internal use only.'''
1461 '''Gets the status from the dirstate -- internal use only.'''
1455 subrepos = []
1462 subrepos = []
1456 if '.hgsub' in self:
1463 if '.hgsub' in self:
1457 subrepos = sorted(self.substate)
1464 subrepos = sorted(self.substate)
1458 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1465 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1459 clean=clean, unknown=unknown)
1466 clean=clean, unknown=unknown)
1460
1467
1461 # check for any possibly clean files
1468 # check for any possibly clean files
1462 fixup = []
1469 fixup = []
1463 if cmp:
1470 if cmp:
1464 modified2, deleted2, fixup = self._checklookup(cmp)
1471 modified2, deleted2, fixup = self._checklookup(cmp)
1465 s.modified.extend(modified2)
1472 s.modified.extend(modified2)
1466 s.deleted.extend(deleted2)
1473 s.deleted.extend(deleted2)
1467
1474
1468 if fixup and clean:
1475 if fixup and clean:
1469 s.clean.extend(fixup)
1476 s.clean.extend(fixup)
1470
1477
1471 self._poststatusfixup(s, fixup)
1478 self._poststatusfixup(s, fixup)
1472
1479
1473 if match.always():
1480 if match.always():
1474 # cache for performance
1481 # cache for performance
1475 if s.unknown or s.ignored or s.clean:
1482 if s.unknown or s.ignored or s.clean:
1476 # "_status" is cached with list*=False in the normal route
1483 # "_status" is cached with list*=False in the normal route
1477 self._status = scmutil.status(s.modified, s.added, s.removed,
1484 self._status = scmutil.status(s.modified, s.added, s.removed,
1478 s.deleted, [], [], [])
1485 s.deleted, [], [], [])
1479 else:
1486 else:
1480 self._status = s
1487 self._status = s
1481
1488
1482 return s
1489 return s
1483
1490
1484 @propertycache
1491 @propertycache
1485 def _manifest(self):
1492 def _manifest(self):
1486 """generate a manifest corresponding to the values in self._status
1493 """generate a manifest corresponding to the values in self._status
1487
1494
1488 This reuse the file nodeid from parent, but we use special node
1495 This reuse the file nodeid from parent, but we use special node
1489 identifiers for added and modified files. This is used by manifests
1496 identifiers for added and modified files. This is used by manifests
1490 merge to see that files are different and by update logic to avoid
1497 merge to see that files are different and by update logic to avoid
1491 deleting newly added files.
1498 deleting newly added files.
1492 """
1499 """
1493 return self._buildstatusmanifest(self._status)
1500 return self._buildstatusmanifest(self._status)
1494
1501
1495 def _buildstatusmanifest(self, status):
1502 def _buildstatusmanifest(self, status):
1496 """Builds a manifest that includes the given status results."""
1503 """Builds a manifest that includes the given status results."""
1497 parents = self.parents()
1504 parents = self.parents()
1498
1505
1499 man = parents[0].manifest().copy()
1506 man = parents[0].manifest().copy()
1500
1507
1501 ff = self._flagfunc
1508 ff = self._flagfunc
1502 for i, l in ((addednodeid, status.added),
1509 for i, l in ((addednodeid, status.added),
1503 (modifiednodeid, status.modified)):
1510 (modifiednodeid, status.modified)):
1504 for f in l:
1511 for f in l:
1505 man[f] = i
1512 man[f] = i
1506 try:
1513 try:
1507 man.setflag(f, ff(f))
1514 man.setflag(f, ff(f))
1508 except OSError:
1515 except OSError:
1509 pass
1516 pass
1510
1517
1511 for f in status.deleted + status.removed:
1518 for f in status.deleted + status.removed:
1512 if f in man:
1519 if f in man:
1513 del man[f]
1520 del man[f]
1514
1521
1515 return man
1522 return man
1516
1523
1517 def _buildstatus(self, other, s, match, listignored, listclean,
1524 def _buildstatus(self, other, s, match, listignored, listclean,
1518 listunknown):
1525 listunknown):
1519 """build a status with respect to another context
1526 """build a status with respect to another context
1520
1527
1521 This includes logic for maintaining the fast path of status when
1528 This includes logic for maintaining the fast path of status when
1522 comparing the working directory against its parent, which is to skip
1529 comparing the working directory against its parent, which is to skip
1523 building a new manifest if self (working directory) is not comparing
1530 building a new manifest if self (working directory) is not comparing
1524 against its parent (repo['.']).
1531 against its parent (repo['.']).
1525 """
1532 """
1526 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1533 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1527 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1534 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1528 # might have accidentally ended up with the entire contents of the file
1535 # might have accidentally ended up with the entire contents of the file
1529 # they are supposed to be linking to.
1536 # they are supposed to be linking to.
1530 s.modified[:] = self._filtersuspectsymlink(s.modified)
1537 s.modified[:] = self._filtersuspectsymlink(s.modified)
1531 if other != self._repo['.']:
1538 if other != self._repo['.']:
1532 s = super(workingctx, self)._buildstatus(other, s, match,
1539 s = super(workingctx, self)._buildstatus(other, s, match,
1533 listignored, listclean,
1540 listignored, listclean,
1534 listunknown)
1541 listunknown)
1535 return s
1542 return s
1536
1543
1537 def _matchstatus(self, other, match):
1544 def _matchstatus(self, other, match):
1538 """override the match method with a filter for directory patterns
1545 """override the match method with a filter for directory patterns
1539
1546
1540 We use inheritance to customize the match.bad method only in cases of
1547 We use inheritance to customize the match.bad method only in cases of
1541 workingctx since it belongs only to the working directory when
1548 workingctx since it belongs only to the working directory when
1542 comparing against the parent changeset.
1549 comparing against the parent changeset.
1543
1550
1544 If we aren't comparing against the working directory's parent, then we
1551 If we aren't comparing against the working directory's parent, then we
1545 just use the default match object sent to us.
1552 just use the default match object sent to us.
1546 """
1553 """
1547 if other != self._repo['.']:
1554 if other != self._repo['.']:
1548 def bad(f, msg):
1555 def bad(f, msg):
1549 # 'f' may be a directory pattern from 'match.files()',
1556 # 'f' may be a directory pattern from 'match.files()',
1550 # so 'f not in ctx1' is not enough
1557 # so 'f not in ctx1' is not enough
1551 if f not in other and not other.hasdir(f):
1558 if f not in other and not other.hasdir(f):
1552 self._repo.ui.warn('%s: %s\n' %
1559 self._repo.ui.warn('%s: %s\n' %
1553 (self._repo.dirstate.pathto(f), msg))
1560 (self._repo.dirstate.pathto(f), msg))
1554 match.bad = bad
1561 match.bad = bad
1555 return match
1562 return match
1556
1563
1557 def markcommitted(self, node):
1564 def markcommitted(self, node):
1558 super(workingctx, self).markcommitted(node)
1565 super(workingctx, self).markcommitted(node)
1559
1566
1560 sparse.aftercommit(self._repo, node)
1567 sparse.aftercommit(self._repo, node)
1561
1568
1562 class committablefilectx(basefilectx):
1569 class committablefilectx(basefilectx):
1563 """A committablefilectx provides common functionality for a file context
1570 """A committablefilectx provides common functionality for a file context
1564 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1571 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1565 def __init__(self, repo, path, filelog=None, ctx=None):
1572 def __init__(self, repo, path, filelog=None, ctx=None):
1566 self._repo = repo
1573 self._repo = repo
1567 self._path = path
1574 self._path = path
1568 self._changeid = None
1575 self._changeid = None
1569 self._filerev = self._filenode = None
1576 self._filerev = self._filenode = None
1570
1577
1571 if filelog is not None:
1578 if filelog is not None:
1572 self._filelog = filelog
1579 self._filelog = filelog
1573 if ctx:
1580 if ctx:
1574 self._changectx = ctx
1581 self._changectx = ctx
1575
1582
1576 def __nonzero__(self):
1583 def __nonzero__(self):
1577 return True
1584 return True
1578
1585
1579 __bool__ = __nonzero__
1586 __bool__ = __nonzero__
1580
1587
1581 def linkrev(self):
1588 def linkrev(self):
1582 # linked to self._changectx no matter if file is modified or not
1589 # linked to self._changectx no matter if file is modified or not
1583 return self.rev()
1590 return self.rev()
1584
1591
1585 def parents(self):
1592 def parents(self):
1586 '''return parent filectxs, following copies if necessary'''
1593 '''return parent filectxs, following copies if necessary'''
1587 def filenode(ctx, path):
1594 def filenode(ctx, path):
1588 return ctx._manifest.get(path, nullid)
1595 return ctx._manifest.get(path, nullid)
1589
1596
1590 path = self._path
1597 path = self._path
1591 fl = self._filelog
1598 fl = self._filelog
1592 pcl = self._changectx._parents
1599 pcl = self._changectx._parents
1593 renamed = self.renamed()
1600 renamed = self.renamed()
1594
1601
1595 if renamed:
1602 if renamed:
1596 pl = [renamed + (None,)]
1603 pl = [renamed + (None,)]
1597 else:
1604 else:
1598 pl = [(path, filenode(pcl[0], path), fl)]
1605 pl = [(path, filenode(pcl[0], path), fl)]
1599
1606
1600 for pc in pcl[1:]:
1607 for pc in pcl[1:]:
1601 pl.append((path, filenode(pc, path), fl))
1608 pl.append((path, filenode(pc, path), fl))
1602
1609
1603 return [self._parentfilectx(p, fileid=n, filelog=l)
1610 return [self._parentfilectx(p, fileid=n, filelog=l)
1604 for p, n, l in pl if n != nullid]
1611 for p, n, l in pl if n != nullid]
1605
1612
1606 def children(self):
1613 def children(self):
1607 return []
1614 return []
1608
1615
1609 class workingfilectx(committablefilectx):
1616 class workingfilectx(committablefilectx):
1610 """A workingfilectx object makes access to data related to a particular
1617 """A workingfilectx object makes access to data related to a particular
1611 file in the working directory convenient."""
1618 file in the working directory convenient."""
1612 def __init__(self, repo, path, filelog=None, workingctx=None):
1619 def __init__(self, repo, path, filelog=None, workingctx=None):
1613 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1620 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1614
1621
1615 @propertycache
1622 @propertycache
1616 def _changectx(self):
1623 def _changectx(self):
1617 return workingctx(self._repo)
1624 return workingctx(self._repo)
1618
1625
1619 def data(self):
1626 def data(self):
1620 return self._repo.wread(self._path)
1627 return self._repo.wread(self._path)
1621 def renamed(self):
1628 def renamed(self):
1622 rp = self._repo.dirstate.copied(self._path)
1629 rp = self._repo.dirstate.copied(self._path)
1623 if not rp:
1630 if not rp:
1624 return None
1631 return None
1625 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1632 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1626
1633
1627 def size(self):
1634 def size(self):
1628 return self._repo.wvfs.lstat(self._path).st_size
1635 return self._repo.wvfs.lstat(self._path).st_size
1629 def date(self):
1636 def date(self):
1630 t, tz = self._changectx.date()
1637 t, tz = self._changectx.date()
1631 try:
1638 try:
1632 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1639 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1633 except OSError as err:
1640 except OSError as err:
1634 if err.errno != errno.ENOENT:
1641 if err.errno != errno.ENOENT:
1635 raise
1642 raise
1636 return (t, tz)
1643 return (t, tz)
1637
1644
1638 def exists(self):
1645 def exists(self):
1639 return self._repo.wvfs.exists(self._path)
1646 return self._repo.wvfs.exists(self._path)
1640
1647
1641 def lexists(self):
1648 def lexists(self):
1642 return self._repo.wvfs.lexists(self._path)
1649 return self._repo.wvfs.lexists(self._path)
1643
1650
1644 def audit(self):
1651 def audit(self):
1645 return self._repo.wvfs.audit(self._path)
1652 return self._repo.wvfs.audit(self._path)
1646
1653
1647 def cmp(self, fctx):
1654 def cmp(self, fctx):
1648 """compare with other file context
1655 """compare with other file context
1649
1656
1650 returns True if different than fctx.
1657 returns True if different than fctx.
1651 """
1658 """
1652 # fctx should be a filectx (not a workingfilectx)
1659 # fctx should be a filectx (not a workingfilectx)
1653 # invert comparison to reuse the same code path
1660 # invert comparison to reuse the same code path
1654 return fctx.cmp(self)
1661 return fctx.cmp(self)
1655
1662
1656 def remove(self, ignoremissing=False):
1663 def remove(self, ignoremissing=False):
1657 """wraps unlink for a repo's working directory"""
1664 """wraps unlink for a repo's working directory"""
1658 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1665 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1659 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1666 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1660 rmdir=rmdir)
1667 rmdir=rmdir)
1661
1668
1662 def write(self, data, flags, backgroundclose=False, **kwargs):
1669 def write(self, data, flags, backgroundclose=False, **kwargs):
1663 """wraps repo.wwrite"""
1670 """wraps repo.wwrite"""
1664 self._repo.wwrite(self._path, data, flags,
1671 self._repo.wwrite(self._path, data, flags,
1665 backgroundclose=backgroundclose,
1672 backgroundclose=backgroundclose,
1666 **kwargs)
1673 **kwargs)
1667
1674
1668 def markcopied(self, src):
1675 def markcopied(self, src):
1669 """marks this file a copy of `src`"""
1676 """marks this file a copy of `src`"""
1670 if self._repo.dirstate[self._path] in "nma":
1677 if self._repo.dirstate[self._path] in "nma":
1671 self._repo.dirstate.copy(src, self._path)
1678 self._repo.dirstate.copy(src, self._path)
1672
1679
1673 def clearunknown(self):
1680 def clearunknown(self):
1674 """Removes conflicting items in the working directory so that
1681 """Removes conflicting items in the working directory so that
1675 ``write()`` can be called successfully.
1682 ``write()`` can be called successfully.
1676 """
1683 """
1677 wvfs = self._repo.wvfs
1684 wvfs = self._repo.wvfs
1678 f = self._path
1685 f = self._path
1679 wvfs.audit(f)
1686 wvfs.audit(f)
1680 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1687 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1681 # remove files under the directory as they should already be
1688 # remove files under the directory as they should already be
1682 # warned and backed up
1689 # warned and backed up
1683 if wvfs.isdir(f) and not wvfs.islink(f):
1690 if wvfs.isdir(f) and not wvfs.islink(f):
1684 wvfs.rmtree(f, forcibly=True)
1691 wvfs.rmtree(f, forcibly=True)
1685 for p in reversed(list(util.finddirs(f))):
1692 for p in reversed(list(util.finddirs(f))):
1686 if wvfs.isfileorlink(p):
1693 if wvfs.isfileorlink(p):
1687 wvfs.unlink(p)
1694 wvfs.unlink(p)
1688 break
1695 break
1689 else:
1696 else:
1690 # don't remove files if path conflicts are not processed
1697 # don't remove files if path conflicts are not processed
1691 if wvfs.isdir(f) and not wvfs.islink(f):
1698 if wvfs.isdir(f) and not wvfs.islink(f):
1692 wvfs.removedirs(f)
1699 wvfs.removedirs(f)
1693
1700
1694 def setflags(self, l, x):
1701 def setflags(self, l, x):
1695 self._repo.wvfs.setflags(self._path, l, x)
1702 self._repo.wvfs.setflags(self._path, l, x)
1696
1703
1697 class overlayworkingctx(committablectx):
1704 class overlayworkingctx(committablectx):
1698 """Wraps another mutable context with a write-back cache that can be
1705 """Wraps another mutable context with a write-back cache that can be
1699 converted into a commit context.
1706 converted into a commit context.
1700
1707
1701 self._cache[path] maps to a dict with keys: {
1708 self._cache[path] maps to a dict with keys: {
1702 'exists': bool?
1709 'exists': bool?
1703 'date': date?
1710 'date': date?
1704 'data': str?
1711 'data': str?
1705 'flags': str?
1712 'flags': str?
1706 'copied': str? (path or None)
1713 'copied': str? (path or None)
1707 }
1714 }
1708 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1715 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1709 is `False`, the file was deleted.
1716 is `False`, the file was deleted.
1710 """
1717 """
1711
1718
1712 def __init__(self, repo):
1719 def __init__(self, repo):
1713 super(overlayworkingctx, self).__init__(repo)
1720 super(overlayworkingctx, self).__init__(repo)
1714 self.clean()
1721 self.clean()
1715
1722
1716 def setbase(self, wrappedctx):
1723 def setbase(self, wrappedctx):
1717 self._wrappedctx = wrappedctx
1724 self._wrappedctx = wrappedctx
1718 self._parents = [wrappedctx]
1725 self._parents = [wrappedctx]
1719 # Drop old manifest cache as it is now out of date.
1726 # Drop old manifest cache as it is now out of date.
1720 # This is necessary when, e.g., rebasing several nodes with one
1727 # This is necessary when, e.g., rebasing several nodes with one
1721 # ``overlayworkingctx`` (e.g. with --collapse).
1728 # ``overlayworkingctx`` (e.g. with --collapse).
1722 util.clearcachedproperty(self, '_manifest')
1729 util.clearcachedproperty(self, '_manifest')
1723
1730
1724 def data(self, path):
1731 def data(self, path):
1725 if self.isdirty(path):
1732 if self.isdirty(path):
1726 if self._cache[path]['exists']:
1733 if self._cache[path]['exists']:
1727 if self._cache[path]['data']:
1734 if self._cache[path]['data']:
1728 return self._cache[path]['data']
1735 return self._cache[path]['data']
1729 else:
1736 else:
1730 # Must fallback here, too, because we only set flags.
1737 # Must fallback here, too, because we only set flags.
1731 return self._wrappedctx[path].data()
1738 return self._wrappedctx[path].data()
1732 else:
1739 else:
1733 raise error.ProgrammingError("No such file or directory: %s" %
1740 raise error.ProgrammingError("No such file or directory: %s" %
1734 path)
1741 path)
1735 else:
1742 else:
1736 return self._wrappedctx[path].data()
1743 return self._wrappedctx[path].data()
1737
1744
1738 @propertycache
1745 @propertycache
1739 def _manifest(self):
1746 def _manifest(self):
1740 parents = self.parents()
1747 parents = self.parents()
1741 man = parents[0].manifest().copy()
1748 man = parents[0].manifest().copy()
1742
1749
1743 flag = self._flagfunc
1750 flag = self._flagfunc
1744 for path in self.added():
1751 for path in self.added():
1745 man[path] = addednodeid
1752 man[path] = addednodeid
1746 man.setflag(path, flag(path))
1753 man.setflag(path, flag(path))
1747 for path in self.modified():
1754 for path in self.modified():
1748 man[path] = modifiednodeid
1755 man[path] = modifiednodeid
1749 man.setflag(path, flag(path))
1756 man.setflag(path, flag(path))
1750 for path in self.removed():
1757 for path in self.removed():
1751 del man[path]
1758 del man[path]
1752 return man
1759 return man
1753
1760
1754 @propertycache
1761 @propertycache
1755 def _flagfunc(self):
1762 def _flagfunc(self):
1756 def f(path):
1763 def f(path):
1757 return self._cache[path]['flags']
1764 return self._cache[path]['flags']
1758 return f
1765 return f
1759
1766
1760 def files(self):
1767 def files(self):
1761 return sorted(self.added() + self.modified() + self.removed())
1768 return sorted(self.added() + self.modified() + self.removed())
1762
1769
1763 def modified(self):
1770 def modified(self):
1764 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1771 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1765 self._existsinparent(f)]
1772 self._existsinparent(f)]
1766
1773
1767 def added(self):
1774 def added(self):
1768 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1775 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1769 not self._existsinparent(f)]
1776 not self._existsinparent(f)]
1770
1777
1771 def removed(self):
1778 def removed(self):
1772 return [f for f in self._cache.keys() if
1779 return [f for f in self._cache.keys() if
1773 not self._cache[f]['exists'] and self._existsinparent(f)]
1780 not self._cache[f]['exists'] and self._existsinparent(f)]
1774
1781
1775 def isinmemory(self):
1782 def isinmemory(self):
1776 return True
1783 return True
1777
1784
1778 def filedate(self, path):
1785 def filedate(self, path):
1779 if self.isdirty(path):
1786 if self.isdirty(path):
1780 return self._cache[path]['date']
1787 return self._cache[path]['date']
1781 else:
1788 else:
1782 return self._wrappedctx[path].date()
1789 return self._wrappedctx[path].date()
1783
1790
1784 def markcopied(self, path, origin):
1791 def markcopied(self, path, origin):
1785 if self.isdirty(path):
1792 if self.isdirty(path):
1786 self._cache[path]['copied'] = origin
1793 self._cache[path]['copied'] = origin
1787 else:
1794 else:
1788 raise error.ProgrammingError('markcopied() called on clean context')
1795 raise error.ProgrammingError('markcopied() called on clean context')
1789
1796
1790 def copydata(self, path):
1797 def copydata(self, path):
1791 if self.isdirty(path):
1798 if self.isdirty(path):
1792 return self._cache[path]['copied']
1799 return self._cache[path]['copied']
1793 else:
1800 else:
1794 raise error.ProgrammingError('copydata() called on clean context')
1801 raise error.ProgrammingError('copydata() called on clean context')
1795
1802
1796 def flags(self, path):
1803 def flags(self, path):
1797 if self.isdirty(path):
1804 if self.isdirty(path):
1798 if self._cache[path]['exists']:
1805 if self._cache[path]['exists']:
1799 return self._cache[path]['flags']
1806 return self._cache[path]['flags']
1800 else:
1807 else:
1801 raise error.ProgrammingError("No such file or directory: %s" %
1808 raise error.ProgrammingError("No such file or directory: %s" %
1802 self._path)
1809 self._path)
1803 else:
1810 else:
1804 return self._wrappedctx[path].flags()
1811 return self._wrappedctx[path].flags()
1805
1812
1806 def _existsinparent(self, path):
1813 def _existsinparent(self, path):
1807 try:
1814 try:
1808 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1815 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1809 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1816 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1810 # with an ``exists()`` function.
1817 # with an ``exists()`` function.
1811 self._wrappedctx[path]
1818 self._wrappedctx[path]
1812 return True
1819 return True
1813 except error.ManifestLookupError:
1820 except error.ManifestLookupError:
1814 return False
1821 return False
1815
1822
1816 def _auditconflicts(self, path):
1823 def _auditconflicts(self, path):
1817 """Replicates conflict checks done by wvfs.write().
1824 """Replicates conflict checks done by wvfs.write().
1818
1825
1819 Since we never write to the filesystem and never call `applyupdates` in
1826 Since we never write to the filesystem and never call `applyupdates` in
1820 IMM, we'll never check that a path is actually writable -- e.g., because
1827 IMM, we'll never check that a path is actually writable -- e.g., because
1821 it adds `a/foo`, but `a` is actually a file in the other commit.
1828 it adds `a/foo`, but `a` is actually a file in the other commit.
1822 """
1829 """
1823 def fail(path, component):
1830 def fail(path, component):
1824 # p1() is the base and we're receiving "writes" for p2()'s
1831 # p1() is the base and we're receiving "writes" for p2()'s
1825 # files.
1832 # files.
1826 if 'l' in self.p1()[component].flags():
1833 if 'l' in self.p1()[component].flags():
1827 raise error.Abort("error: %s conflicts with symlink %s "
1834 raise error.Abort("error: %s conflicts with symlink %s "
1828 "in %s." % (path, component,
1835 "in %s." % (path, component,
1829 self.p1().rev()))
1836 self.p1().rev()))
1830 else:
1837 else:
1831 raise error.Abort("error: '%s' conflicts with file '%s' in "
1838 raise error.Abort("error: '%s' conflicts with file '%s' in "
1832 "%s." % (path, component,
1839 "%s." % (path, component,
1833 self.p1().rev()))
1840 self.p1().rev()))
1834
1841
1835 # Test that each new directory to be created to write this path from p2
1842 # Test that each new directory to be created to write this path from p2
1836 # is not a file in p1.
1843 # is not a file in p1.
1837 components = path.split('/')
1844 components = path.split('/')
1838 for i in pycompat.xrange(len(components)):
1845 for i in pycompat.xrange(len(components)):
1839 component = "/".join(components[0:i])
1846 component = "/".join(components[0:i])
1840 if component in self.p1() and self._cache[component]['exists']:
1847 if component in self.p1() and self._cache[component]['exists']:
1841 fail(path, component)
1848 fail(path, component)
1842
1849
1843 # Test the other direction -- that this path from p2 isn't a directory
1850 # Test the other direction -- that this path from p2 isn't a directory
1844 # in p1 (test that p1 doesn't any paths matching `path/*`).
1851 # in p1 (test that p1 doesn't any paths matching `path/*`).
1845 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1852 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1846 matches = self.p1().manifest().matches(match)
1853 matches = self.p1().manifest().matches(match)
1847 mfiles = matches.keys()
1854 mfiles = matches.keys()
1848 if len(mfiles) > 0:
1855 if len(mfiles) > 0:
1849 if len(mfiles) == 1 and mfiles[0] == path:
1856 if len(mfiles) == 1 and mfiles[0] == path:
1850 return
1857 return
1851 # omit the files which are deleted in current IMM wctx
1858 # omit the files which are deleted in current IMM wctx
1852 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1859 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1853 if not mfiles:
1860 if not mfiles:
1854 return
1861 return
1855 raise error.Abort("error: file '%s' cannot be written because "
1862 raise error.Abort("error: file '%s' cannot be written because "
1856 " '%s/' is a folder in %s (containing %d "
1863 " '%s/' is a folder in %s (containing %d "
1857 "entries: %s)"
1864 "entries: %s)"
1858 % (path, path, self.p1(), len(mfiles),
1865 % (path, path, self.p1(), len(mfiles),
1859 ', '.join(mfiles)))
1866 ', '.join(mfiles)))
1860
1867
1861 def write(self, path, data, flags='', **kwargs):
1868 def write(self, path, data, flags='', **kwargs):
1862 if data is None:
1869 if data is None:
1863 raise error.ProgrammingError("data must be non-None")
1870 raise error.ProgrammingError("data must be non-None")
1864 self._auditconflicts(path)
1871 self._auditconflicts(path)
1865 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1872 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1866 flags=flags)
1873 flags=flags)
1867
1874
1868 def setflags(self, path, l, x):
1875 def setflags(self, path, l, x):
1869 flag = ''
1876 flag = ''
1870 if l:
1877 if l:
1871 flag = 'l'
1878 flag = 'l'
1872 elif x:
1879 elif x:
1873 flag = 'x'
1880 flag = 'x'
1874 self._markdirty(path, exists=True, date=dateutil.makedate(),
1881 self._markdirty(path, exists=True, date=dateutil.makedate(),
1875 flags=flag)
1882 flags=flag)
1876
1883
1877 def remove(self, path):
1884 def remove(self, path):
1878 self._markdirty(path, exists=False)
1885 self._markdirty(path, exists=False)
1879
1886
1880 def exists(self, path):
1887 def exists(self, path):
1881 """exists behaves like `lexists`, but needs to follow symlinks and
1888 """exists behaves like `lexists`, but needs to follow symlinks and
1882 return False if they are broken.
1889 return False if they are broken.
1883 """
1890 """
1884 if self.isdirty(path):
1891 if self.isdirty(path):
1885 # If this path exists and is a symlink, "follow" it by calling
1892 # If this path exists and is a symlink, "follow" it by calling
1886 # exists on the destination path.
1893 # exists on the destination path.
1887 if (self._cache[path]['exists'] and
1894 if (self._cache[path]['exists'] and
1888 'l' in self._cache[path]['flags']):
1895 'l' in self._cache[path]['flags']):
1889 return self.exists(self._cache[path]['data'].strip())
1896 return self.exists(self._cache[path]['data'].strip())
1890 else:
1897 else:
1891 return self._cache[path]['exists']
1898 return self._cache[path]['exists']
1892
1899
1893 return self._existsinparent(path)
1900 return self._existsinparent(path)
1894
1901
1895 def lexists(self, path):
1902 def lexists(self, path):
1896 """lexists returns True if the path exists"""
1903 """lexists returns True if the path exists"""
1897 if self.isdirty(path):
1904 if self.isdirty(path):
1898 return self._cache[path]['exists']
1905 return self._cache[path]['exists']
1899
1906
1900 return self._existsinparent(path)
1907 return self._existsinparent(path)
1901
1908
1902 def size(self, path):
1909 def size(self, path):
1903 if self.isdirty(path):
1910 if self.isdirty(path):
1904 if self._cache[path]['exists']:
1911 if self._cache[path]['exists']:
1905 return len(self._cache[path]['data'])
1912 return len(self._cache[path]['data'])
1906 else:
1913 else:
1907 raise error.ProgrammingError("No such file or directory: %s" %
1914 raise error.ProgrammingError("No such file or directory: %s" %
1908 self._path)
1915 self._path)
1909 return self._wrappedctx[path].size()
1916 return self._wrappedctx[path].size()
1910
1917
1911 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1918 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1912 user=None, editor=None):
1919 user=None, editor=None):
1913 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1920 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1914 committed.
1921 committed.
1915
1922
1916 ``text`` is the commit message.
1923 ``text`` is the commit message.
1917 ``parents`` (optional) are rev numbers.
1924 ``parents`` (optional) are rev numbers.
1918 """
1925 """
1919 # Default parents to the wrapped contexts' if not passed.
1926 # Default parents to the wrapped contexts' if not passed.
1920 if parents is None:
1927 if parents is None:
1921 parents = self._wrappedctx.parents()
1928 parents = self._wrappedctx.parents()
1922 if len(parents) == 1:
1929 if len(parents) == 1:
1923 parents = (parents[0], None)
1930 parents = (parents[0], None)
1924
1931
1925 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1932 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1926 if parents[1] is None:
1933 if parents[1] is None:
1927 parents = (self._repo[parents[0]], None)
1934 parents = (self._repo[parents[0]], None)
1928 else:
1935 else:
1929 parents = (self._repo[parents[0]], self._repo[parents[1]])
1936 parents = (self._repo[parents[0]], self._repo[parents[1]])
1930
1937
1931 files = self._cache.keys()
1938 files = self._cache.keys()
1932 def getfile(repo, memctx, path):
1939 def getfile(repo, memctx, path):
1933 if self._cache[path]['exists']:
1940 if self._cache[path]['exists']:
1934 return memfilectx(repo, memctx, path,
1941 return memfilectx(repo, memctx, path,
1935 self._cache[path]['data'],
1942 self._cache[path]['data'],
1936 'l' in self._cache[path]['flags'],
1943 'l' in self._cache[path]['flags'],
1937 'x' in self._cache[path]['flags'],
1944 'x' in self._cache[path]['flags'],
1938 self._cache[path]['copied'])
1945 self._cache[path]['copied'])
1939 else:
1946 else:
1940 # Returning None, but including the path in `files`, is
1947 # Returning None, but including the path in `files`, is
1941 # necessary for memctx to register a deletion.
1948 # necessary for memctx to register a deletion.
1942 return None
1949 return None
1943 return memctx(self._repo, parents, text, files, getfile, date=date,
1950 return memctx(self._repo, parents, text, files, getfile, date=date,
1944 extra=extra, user=user, branch=branch, editor=editor)
1951 extra=extra, user=user, branch=branch, editor=editor)
1945
1952
1946 def isdirty(self, path):
1953 def isdirty(self, path):
1947 return path in self._cache
1954 return path in self._cache
1948
1955
1949 def isempty(self):
1956 def isempty(self):
1950 # We need to discard any keys that are actually clean before the empty
1957 # We need to discard any keys that are actually clean before the empty
1951 # commit check.
1958 # commit check.
1952 self._compact()
1959 self._compact()
1953 return len(self._cache) == 0
1960 return len(self._cache) == 0
1954
1961
1955 def clean(self):
1962 def clean(self):
1956 self._cache = {}
1963 self._cache = {}
1957
1964
1958 def _compact(self):
1965 def _compact(self):
1959 """Removes keys from the cache that are actually clean, by comparing
1966 """Removes keys from the cache that are actually clean, by comparing
1960 them with the underlying context.
1967 them with the underlying context.
1961
1968
1962 This can occur during the merge process, e.g. by passing --tool :local
1969 This can occur during the merge process, e.g. by passing --tool :local
1963 to resolve a conflict.
1970 to resolve a conflict.
1964 """
1971 """
1965 keys = []
1972 keys = []
1966 for path in self._cache.keys():
1973 for path in self._cache.keys():
1967 cache = self._cache[path]
1974 cache = self._cache[path]
1968 try:
1975 try:
1969 underlying = self._wrappedctx[path]
1976 underlying = self._wrappedctx[path]
1970 if (underlying.data() == cache['data'] and
1977 if (underlying.data() == cache['data'] and
1971 underlying.flags() == cache['flags']):
1978 underlying.flags() == cache['flags']):
1972 keys.append(path)
1979 keys.append(path)
1973 except error.ManifestLookupError:
1980 except error.ManifestLookupError:
1974 # Path not in the underlying manifest (created).
1981 # Path not in the underlying manifest (created).
1975 continue
1982 continue
1976
1983
1977 for path in keys:
1984 for path in keys:
1978 del self._cache[path]
1985 del self._cache[path]
1979 return keys
1986 return keys
1980
1987
1981 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1988 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1982 # data not provided, let's see if we already have some; if not, let's
1989 # data not provided, let's see if we already have some; if not, let's
1983 # grab it from our underlying context, so that we always have data if
1990 # grab it from our underlying context, so that we always have data if
1984 # the file is marked as existing.
1991 # the file is marked as existing.
1985 if exists and data is None:
1992 if exists and data is None:
1986 oldentry = self._cache.get(path) or {}
1993 oldentry = self._cache.get(path) or {}
1987 data = oldentry.get('data') or self._wrappedctx[path].data()
1994 data = oldentry.get('data') or self._wrappedctx[path].data()
1988
1995
1989 self._cache[path] = {
1996 self._cache[path] = {
1990 'exists': exists,
1997 'exists': exists,
1991 'data': data,
1998 'data': data,
1992 'date': date,
1999 'date': date,
1993 'flags': flags,
2000 'flags': flags,
1994 'copied': None,
2001 'copied': None,
1995 }
2002 }
1996
2003
1997 def filectx(self, path, filelog=None):
2004 def filectx(self, path, filelog=None):
1998 return overlayworkingfilectx(self._repo, path, parent=self,
2005 return overlayworkingfilectx(self._repo, path, parent=self,
1999 filelog=filelog)
2006 filelog=filelog)
2000
2007
2001 class overlayworkingfilectx(committablefilectx):
2008 class overlayworkingfilectx(committablefilectx):
2002 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2009 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2003 cache, which can be flushed through later by calling ``flush()``."""
2010 cache, which can be flushed through later by calling ``flush()``."""
2004
2011
2005 def __init__(self, repo, path, filelog=None, parent=None):
2012 def __init__(self, repo, path, filelog=None, parent=None):
2006 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2013 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2007 parent)
2014 parent)
2008 self._repo = repo
2015 self._repo = repo
2009 self._parent = parent
2016 self._parent = parent
2010 self._path = path
2017 self._path = path
2011
2018
2012 def cmp(self, fctx):
2019 def cmp(self, fctx):
2013 return self.data() != fctx.data()
2020 return self.data() != fctx.data()
2014
2021
2015 def changectx(self):
2022 def changectx(self):
2016 return self._parent
2023 return self._parent
2017
2024
2018 def data(self):
2025 def data(self):
2019 return self._parent.data(self._path)
2026 return self._parent.data(self._path)
2020
2027
2021 def date(self):
2028 def date(self):
2022 return self._parent.filedate(self._path)
2029 return self._parent.filedate(self._path)
2023
2030
2024 def exists(self):
2031 def exists(self):
2025 return self.lexists()
2032 return self.lexists()
2026
2033
2027 def lexists(self):
2034 def lexists(self):
2028 return self._parent.exists(self._path)
2035 return self._parent.exists(self._path)
2029
2036
2030 def renamed(self):
2037 def renamed(self):
2031 path = self._parent.copydata(self._path)
2038 path = self._parent.copydata(self._path)
2032 if not path:
2039 if not path:
2033 return None
2040 return None
2034 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2041 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2035
2042
2036 def size(self):
2043 def size(self):
2037 return self._parent.size(self._path)
2044 return self._parent.size(self._path)
2038
2045
2039 def markcopied(self, origin):
2046 def markcopied(self, origin):
2040 self._parent.markcopied(self._path, origin)
2047 self._parent.markcopied(self._path, origin)
2041
2048
2042 def audit(self):
2049 def audit(self):
2043 pass
2050 pass
2044
2051
2045 def flags(self):
2052 def flags(self):
2046 return self._parent.flags(self._path)
2053 return self._parent.flags(self._path)
2047
2054
2048 def setflags(self, islink, isexec):
2055 def setflags(self, islink, isexec):
2049 return self._parent.setflags(self._path, islink, isexec)
2056 return self._parent.setflags(self._path, islink, isexec)
2050
2057
2051 def write(self, data, flags, backgroundclose=False, **kwargs):
2058 def write(self, data, flags, backgroundclose=False, **kwargs):
2052 return self._parent.write(self._path, data, flags, **kwargs)
2059 return self._parent.write(self._path, data, flags, **kwargs)
2053
2060
2054 def remove(self, ignoremissing=False):
2061 def remove(self, ignoremissing=False):
2055 return self._parent.remove(self._path)
2062 return self._parent.remove(self._path)
2056
2063
2057 def clearunknown(self):
2064 def clearunknown(self):
2058 pass
2065 pass
2059
2066
2060 class workingcommitctx(workingctx):
2067 class workingcommitctx(workingctx):
2061 """A workingcommitctx object makes access to data related to
2068 """A workingcommitctx object makes access to data related to
2062 the revision being committed convenient.
2069 the revision being committed convenient.
2063
2070
2064 This hides changes in the working directory, if they aren't
2071 This hides changes in the working directory, if they aren't
2065 committed in this context.
2072 committed in this context.
2066 """
2073 """
2067 def __init__(self, repo, changes,
2074 def __init__(self, repo, changes,
2068 text="", user=None, date=None, extra=None):
2075 text="", user=None, date=None, extra=None):
2069 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2076 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2070 changes)
2077 changes)
2071
2078
2072 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2079 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2073 """Return matched files only in ``self._status``
2080 """Return matched files only in ``self._status``
2074
2081
2075 Uncommitted files appear "clean" via this context, even if
2082 Uncommitted files appear "clean" via this context, even if
2076 they aren't actually so in the working directory.
2083 they aren't actually so in the working directory.
2077 """
2084 """
2078 if clean:
2085 if clean:
2079 clean = [f for f in self._manifest if f not in self._changedset]
2086 clean = [f for f in self._manifest if f not in self._changedset]
2080 else:
2087 else:
2081 clean = []
2088 clean = []
2082 return scmutil.status([f for f in self._status.modified if match(f)],
2089 return scmutil.status([f for f in self._status.modified if match(f)],
2083 [f for f in self._status.added if match(f)],
2090 [f for f in self._status.added if match(f)],
2084 [f for f in self._status.removed if match(f)],
2091 [f for f in self._status.removed if match(f)],
2085 [], [], [], clean)
2092 [], [], [], clean)
2086
2093
2087 @propertycache
2094 @propertycache
2088 def _changedset(self):
2095 def _changedset(self):
2089 """Return the set of files changed in this context
2096 """Return the set of files changed in this context
2090 """
2097 """
2091 changed = set(self._status.modified)
2098 changed = set(self._status.modified)
2092 changed.update(self._status.added)
2099 changed.update(self._status.added)
2093 changed.update(self._status.removed)
2100 changed.update(self._status.removed)
2094 return changed
2101 return changed
2095
2102
2096 def makecachingfilectxfn(func):
2103 def makecachingfilectxfn(func):
2097 """Create a filectxfn that caches based on the path.
2104 """Create a filectxfn that caches based on the path.
2098
2105
2099 We can't use util.cachefunc because it uses all arguments as the cache
2106 We can't use util.cachefunc because it uses all arguments as the cache
2100 key and this creates a cycle since the arguments include the repo and
2107 key and this creates a cycle since the arguments include the repo and
2101 memctx.
2108 memctx.
2102 """
2109 """
2103 cache = {}
2110 cache = {}
2104
2111
2105 def getfilectx(repo, memctx, path):
2112 def getfilectx(repo, memctx, path):
2106 if path not in cache:
2113 if path not in cache:
2107 cache[path] = func(repo, memctx, path)
2114 cache[path] = func(repo, memctx, path)
2108 return cache[path]
2115 return cache[path]
2109
2116
2110 return getfilectx
2117 return getfilectx
2111
2118
2112 def memfilefromctx(ctx):
2119 def memfilefromctx(ctx):
2113 """Given a context return a memfilectx for ctx[path]
2120 """Given a context return a memfilectx for ctx[path]
2114
2121
2115 This is a convenience method for building a memctx based on another
2122 This is a convenience method for building a memctx based on another
2116 context.
2123 context.
2117 """
2124 """
2118 def getfilectx(repo, memctx, path):
2125 def getfilectx(repo, memctx, path):
2119 fctx = ctx[path]
2126 fctx = ctx[path]
2120 # this is weird but apparently we only keep track of one parent
2127 # this is weird but apparently we only keep track of one parent
2121 # (why not only store that instead of a tuple?)
2128 # (why not only store that instead of a tuple?)
2122 copied = fctx.renamed()
2129 copied = fctx.renamed()
2123 if copied:
2130 if copied:
2124 copied = copied[0]
2131 copied = copied[0]
2125 return memfilectx(repo, memctx, path, fctx.data(),
2132 return memfilectx(repo, memctx, path, fctx.data(),
2126 islink=fctx.islink(), isexec=fctx.isexec(),
2133 islink=fctx.islink(), isexec=fctx.isexec(),
2127 copied=copied)
2134 copied=copied)
2128
2135
2129 return getfilectx
2136 return getfilectx
2130
2137
2131 def memfilefrompatch(patchstore):
2138 def memfilefrompatch(patchstore):
2132 """Given a patch (e.g. patchstore object) return a memfilectx
2139 """Given a patch (e.g. patchstore object) return a memfilectx
2133
2140
2134 This is a convenience method for building a memctx based on a patchstore.
2141 This is a convenience method for building a memctx based on a patchstore.
2135 """
2142 """
2136 def getfilectx(repo, memctx, path):
2143 def getfilectx(repo, memctx, path):
2137 data, mode, copied = patchstore.getfile(path)
2144 data, mode, copied = patchstore.getfile(path)
2138 if data is None:
2145 if data is None:
2139 return None
2146 return None
2140 islink, isexec = mode
2147 islink, isexec = mode
2141 return memfilectx(repo, memctx, path, data, islink=islink,
2148 return memfilectx(repo, memctx, path, data, islink=islink,
2142 isexec=isexec, copied=copied)
2149 isexec=isexec, copied=copied)
2143
2150
2144 return getfilectx
2151 return getfilectx
2145
2152
2146 class memctx(committablectx):
2153 class memctx(committablectx):
2147 """Use memctx to perform in-memory commits via localrepo.commitctx().
2154 """Use memctx to perform in-memory commits via localrepo.commitctx().
2148
2155
2149 Revision information is supplied at initialization time while
2156 Revision information is supplied at initialization time while
2150 related files data and is made available through a callback
2157 related files data and is made available through a callback
2151 mechanism. 'repo' is the current localrepo, 'parents' is a
2158 mechanism. 'repo' is the current localrepo, 'parents' is a
2152 sequence of two parent revisions identifiers (pass None for every
2159 sequence of two parent revisions identifiers (pass None for every
2153 missing parent), 'text' is the commit message and 'files' lists
2160 missing parent), 'text' is the commit message and 'files' lists
2154 names of files touched by the revision (normalized and relative to
2161 names of files touched by the revision (normalized and relative to
2155 repository root).
2162 repository root).
2156
2163
2157 filectxfn(repo, memctx, path) is a callable receiving the
2164 filectxfn(repo, memctx, path) is a callable receiving the
2158 repository, the current memctx object and the normalized path of
2165 repository, the current memctx object and the normalized path of
2159 requested file, relative to repository root. It is fired by the
2166 requested file, relative to repository root. It is fired by the
2160 commit function for every file in 'files', but calls order is
2167 commit function for every file in 'files', but calls order is
2161 undefined. If the file is available in the revision being
2168 undefined. If the file is available in the revision being
2162 committed (updated or added), filectxfn returns a memfilectx
2169 committed (updated or added), filectxfn returns a memfilectx
2163 object. If the file was removed, filectxfn return None for recent
2170 object. If the file was removed, filectxfn return None for recent
2164 Mercurial. Moved files are represented by marking the source file
2171 Mercurial. Moved files are represented by marking the source file
2165 removed and the new file added with copy information (see
2172 removed and the new file added with copy information (see
2166 memfilectx).
2173 memfilectx).
2167
2174
2168 user receives the committer name and defaults to current
2175 user receives the committer name and defaults to current
2169 repository username, date is the commit date in any format
2176 repository username, date is the commit date in any format
2170 supported by dateutil.parsedate() and defaults to current date, extra
2177 supported by dateutil.parsedate() and defaults to current date, extra
2171 is a dictionary of metadata or is left empty.
2178 is a dictionary of metadata or is left empty.
2172 """
2179 """
2173
2180
2174 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2181 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2175 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2182 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2176 # this field to determine what to do in filectxfn.
2183 # this field to determine what to do in filectxfn.
2177 _returnnoneformissingfiles = True
2184 _returnnoneformissingfiles = True
2178
2185
2179 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2186 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2180 date=None, extra=None, branch=None, editor=False):
2187 date=None, extra=None, branch=None, editor=False):
2181 super(memctx, self).__init__(repo, text, user, date, extra)
2188 super(memctx, self).__init__(repo, text, user, date, extra)
2182 self._rev = None
2189 self._rev = None
2183 self._node = None
2190 self._node = None
2184 parents = [(p or nullid) for p in parents]
2191 parents = [(p or nullid) for p in parents]
2185 p1, p2 = parents
2192 p1, p2 = parents
2186 self._parents = [self._repo[p] for p in (p1, p2)]
2193 self._parents = [self._repo[p] for p in (p1, p2)]
2187 files = sorted(set(files))
2194 files = sorted(set(files))
2188 self._files = files
2195 self._files = files
2189 if branch is not None:
2196 if branch is not None:
2190 self._extra['branch'] = encoding.fromlocal(branch)
2197 self._extra['branch'] = encoding.fromlocal(branch)
2191 self.substate = {}
2198 self.substate = {}
2192
2199
2193 if isinstance(filectxfn, patch.filestore):
2200 if isinstance(filectxfn, patch.filestore):
2194 filectxfn = memfilefrompatch(filectxfn)
2201 filectxfn = memfilefrompatch(filectxfn)
2195 elif not callable(filectxfn):
2202 elif not callable(filectxfn):
2196 # if store is not callable, wrap it in a function
2203 # if store is not callable, wrap it in a function
2197 filectxfn = memfilefromctx(filectxfn)
2204 filectxfn = memfilefromctx(filectxfn)
2198
2205
2199 # memoizing increases performance for e.g. vcs convert scenarios.
2206 # memoizing increases performance for e.g. vcs convert scenarios.
2200 self._filectxfn = makecachingfilectxfn(filectxfn)
2207 self._filectxfn = makecachingfilectxfn(filectxfn)
2201
2208
2202 if editor:
2209 if editor:
2203 self._text = editor(self._repo, self, [])
2210 self._text = editor(self._repo, self, [])
2204 self._repo.savecommitmessage(self._text)
2211 self._repo.savecommitmessage(self._text)
2205
2212
2206 def filectx(self, path, filelog=None):
2213 def filectx(self, path, filelog=None):
2207 """get a file context from the working directory
2214 """get a file context from the working directory
2208
2215
2209 Returns None if file doesn't exist and should be removed."""
2216 Returns None if file doesn't exist and should be removed."""
2210 return self._filectxfn(self._repo, self, path)
2217 return self._filectxfn(self._repo, self, path)
2211
2218
2212 def commit(self):
2219 def commit(self):
2213 """commit context to the repo"""
2220 """commit context to the repo"""
2214 return self._repo.commitctx(self)
2221 return self._repo.commitctx(self)
2215
2222
2216 @propertycache
2223 @propertycache
2217 def _manifest(self):
2224 def _manifest(self):
2218 """generate a manifest based on the return values of filectxfn"""
2225 """generate a manifest based on the return values of filectxfn"""
2219
2226
2220 # keep this simple for now; just worry about p1
2227 # keep this simple for now; just worry about p1
2221 pctx = self._parents[0]
2228 pctx = self._parents[0]
2222 man = pctx.manifest().copy()
2229 man = pctx.manifest().copy()
2223
2230
2224 for f in self._status.modified:
2231 for f in self._status.modified:
2225 man[f] = modifiednodeid
2232 man[f] = modifiednodeid
2226
2233
2227 for f in self._status.added:
2234 for f in self._status.added:
2228 man[f] = addednodeid
2235 man[f] = addednodeid
2229
2236
2230 for f in self._status.removed:
2237 for f in self._status.removed:
2231 if f in man:
2238 if f in man:
2232 del man[f]
2239 del man[f]
2233
2240
2234 return man
2241 return man
2235
2242
2236 @propertycache
2243 @propertycache
2237 def _status(self):
2244 def _status(self):
2238 """Calculate exact status from ``files`` specified at construction
2245 """Calculate exact status from ``files`` specified at construction
2239 """
2246 """
2240 man1 = self.p1().manifest()
2247 man1 = self.p1().manifest()
2241 p2 = self._parents[1]
2248 p2 = self._parents[1]
2242 # "1 < len(self._parents)" can't be used for checking
2249 # "1 < len(self._parents)" can't be used for checking
2243 # existence of the 2nd parent, because "memctx._parents" is
2250 # existence of the 2nd parent, because "memctx._parents" is
2244 # explicitly initialized by the list, of which length is 2.
2251 # explicitly initialized by the list, of which length is 2.
2245 if p2.node() != nullid:
2252 if p2.node() != nullid:
2246 man2 = p2.manifest()
2253 man2 = p2.manifest()
2247 managing = lambda f: f in man1 or f in man2
2254 managing = lambda f: f in man1 or f in man2
2248 else:
2255 else:
2249 managing = lambda f: f in man1
2256 managing = lambda f: f in man1
2250
2257
2251 modified, added, removed = [], [], []
2258 modified, added, removed = [], [], []
2252 for f in self._files:
2259 for f in self._files:
2253 if not managing(f):
2260 if not managing(f):
2254 added.append(f)
2261 added.append(f)
2255 elif self[f]:
2262 elif self[f]:
2256 modified.append(f)
2263 modified.append(f)
2257 else:
2264 else:
2258 removed.append(f)
2265 removed.append(f)
2259
2266
2260 return scmutil.status(modified, added, removed, [], [], [], [])
2267 return scmutil.status(modified, added, removed, [], [], [], [])
2261
2268
2262 class memfilectx(committablefilectx):
2269 class memfilectx(committablefilectx):
2263 """memfilectx represents an in-memory file to commit.
2270 """memfilectx represents an in-memory file to commit.
2264
2271
2265 See memctx and committablefilectx for more details.
2272 See memctx and committablefilectx for more details.
2266 """
2273 """
2267 def __init__(self, repo, changectx, path, data, islink=False,
2274 def __init__(self, repo, changectx, path, data, islink=False,
2268 isexec=False, copied=None):
2275 isexec=False, copied=None):
2269 """
2276 """
2270 path is the normalized file path relative to repository root.
2277 path is the normalized file path relative to repository root.
2271 data is the file content as a string.
2278 data is the file content as a string.
2272 islink is True if the file is a symbolic link.
2279 islink is True if the file is a symbolic link.
2273 isexec is True if the file is executable.
2280 isexec is True if the file is executable.
2274 copied is the source file path if current file was copied in the
2281 copied is the source file path if current file was copied in the
2275 revision being committed, or None."""
2282 revision being committed, or None."""
2276 super(memfilectx, self).__init__(repo, path, None, changectx)
2283 super(memfilectx, self).__init__(repo, path, None, changectx)
2277 self._data = data
2284 self._data = data
2278 if islink:
2285 if islink:
2279 self._flags = 'l'
2286 self._flags = 'l'
2280 elif isexec:
2287 elif isexec:
2281 self._flags = 'x'
2288 self._flags = 'x'
2282 else:
2289 else:
2283 self._flags = ''
2290 self._flags = ''
2284 self._copied = None
2291 self._copied = None
2285 if copied:
2292 if copied:
2286 self._copied = (copied, nullid)
2293 self._copied = (copied, nullid)
2287
2294
2288 def data(self):
2295 def data(self):
2289 return self._data
2296 return self._data
2290
2297
2291 def remove(self, ignoremissing=False):
2298 def remove(self, ignoremissing=False):
2292 """wraps unlink for a repo's working directory"""
2299 """wraps unlink for a repo's working directory"""
2293 # need to figure out what to do here
2300 # need to figure out what to do here
2294 del self._changectx[self._path]
2301 del self._changectx[self._path]
2295
2302
2296 def write(self, data, flags, **kwargs):
2303 def write(self, data, flags, **kwargs):
2297 """wraps repo.wwrite"""
2304 """wraps repo.wwrite"""
2298 self._data = data
2305 self._data = data
2299
2306
2300
2307
2301 class metadataonlyctx(committablectx):
2308 class metadataonlyctx(committablectx):
2302 """Like memctx but it's reusing the manifest of different commit.
2309 """Like memctx but it's reusing the manifest of different commit.
2303 Intended to be used by lightweight operations that are creating
2310 Intended to be used by lightweight operations that are creating
2304 metadata-only changes.
2311 metadata-only changes.
2305
2312
2306 Revision information is supplied at initialization time. 'repo' is the
2313 Revision information is supplied at initialization time. 'repo' is the
2307 current localrepo, 'ctx' is original revision which manifest we're reuisng
2314 current localrepo, 'ctx' is original revision which manifest we're reuisng
2308 'parents' is a sequence of two parent revisions identifiers (pass None for
2315 'parents' is a sequence of two parent revisions identifiers (pass None for
2309 every missing parent), 'text' is the commit.
2316 every missing parent), 'text' is the commit.
2310
2317
2311 user receives the committer name and defaults to current repository
2318 user receives the committer name and defaults to current repository
2312 username, date is the commit date in any format supported by
2319 username, date is the commit date in any format supported by
2313 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2320 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2314 metadata or is left empty.
2321 metadata or is left empty.
2315 """
2322 """
2316 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2323 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2317 date=None, extra=None, editor=False):
2324 date=None, extra=None, editor=False):
2318 if text is None:
2325 if text is None:
2319 text = originalctx.description()
2326 text = originalctx.description()
2320 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2327 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2321 self._rev = None
2328 self._rev = None
2322 self._node = None
2329 self._node = None
2323 self._originalctx = originalctx
2330 self._originalctx = originalctx
2324 self._manifestnode = originalctx.manifestnode()
2331 self._manifestnode = originalctx.manifestnode()
2325 if parents is None:
2332 if parents is None:
2326 parents = originalctx.parents()
2333 parents = originalctx.parents()
2327 else:
2334 else:
2328 parents = [repo[p] for p in parents if p is not None]
2335 parents = [repo[p] for p in parents if p is not None]
2329 parents = parents[:]
2336 parents = parents[:]
2330 while len(parents) < 2:
2337 while len(parents) < 2:
2331 parents.append(repo[nullid])
2338 parents.append(repo[nullid])
2332 p1, p2 = self._parents = parents
2339 p1, p2 = self._parents = parents
2333
2340
2334 # sanity check to ensure that the reused manifest parents are
2341 # sanity check to ensure that the reused manifest parents are
2335 # manifests of our commit parents
2342 # manifests of our commit parents
2336 mp1, mp2 = self.manifestctx().parents
2343 mp1, mp2 = self.manifestctx().parents
2337 if p1 != nullid and p1.manifestnode() != mp1:
2344 if p1 != nullid and p1.manifestnode() != mp1:
2338 raise RuntimeError(r"can't reuse the manifest: its p1 "
2345 raise RuntimeError(r"can't reuse the manifest: its p1 "
2339 r"doesn't match the new ctx p1")
2346 r"doesn't match the new ctx p1")
2340 if p2 != nullid and p2.manifestnode() != mp2:
2347 if p2 != nullid and p2.manifestnode() != mp2:
2341 raise RuntimeError(r"can't reuse the manifest: "
2348 raise RuntimeError(r"can't reuse the manifest: "
2342 r"its p2 doesn't match the new ctx p2")
2349 r"its p2 doesn't match the new ctx p2")
2343
2350
2344 self._files = originalctx.files()
2351 self._files = originalctx.files()
2345 self.substate = {}
2352 self.substate = {}
2346
2353
2347 if editor:
2354 if editor:
2348 self._text = editor(self._repo, self, [])
2355 self._text = editor(self._repo, self, [])
2349 self._repo.savecommitmessage(self._text)
2356 self._repo.savecommitmessage(self._text)
2350
2357
2351 def manifestnode(self):
2358 def manifestnode(self):
2352 return self._manifestnode
2359 return self._manifestnode
2353
2360
2354 @property
2361 @property
2355 def _manifestctx(self):
2362 def _manifestctx(self):
2356 return self._repo.manifestlog[self._manifestnode]
2363 return self._repo.manifestlog[self._manifestnode]
2357
2364
2358 def filectx(self, path, filelog=None):
2365 def filectx(self, path, filelog=None):
2359 return self._originalctx.filectx(path, filelog=filelog)
2366 return self._originalctx.filectx(path, filelog=filelog)
2360
2367
2361 def commit(self):
2368 def commit(self):
2362 """commit context to the repo"""
2369 """commit context to the repo"""
2363 return self._repo.commitctx(self)
2370 return self._repo.commitctx(self)
2364
2371
2365 @property
2372 @property
2366 def _manifest(self):
2373 def _manifest(self):
2367 return self._originalctx.manifest()
2374 return self._originalctx.manifest()
2368
2375
2369 @propertycache
2376 @propertycache
2370 def _status(self):
2377 def _status(self):
2371 """Calculate exact status from ``files`` specified in the ``origctx``
2378 """Calculate exact status from ``files`` specified in the ``origctx``
2372 and parents manifests.
2379 and parents manifests.
2373 """
2380 """
2374 man1 = self.p1().manifest()
2381 man1 = self.p1().manifest()
2375 p2 = self._parents[1]
2382 p2 = self._parents[1]
2376 # "1 < len(self._parents)" can't be used for checking
2383 # "1 < len(self._parents)" can't be used for checking
2377 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2384 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2378 # explicitly initialized by the list, of which length is 2.
2385 # explicitly initialized by the list, of which length is 2.
2379 if p2.node() != nullid:
2386 if p2.node() != nullid:
2380 man2 = p2.manifest()
2387 man2 = p2.manifest()
2381 managing = lambda f: f in man1 or f in man2
2388 managing = lambda f: f in man1 or f in man2
2382 else:
2389 else:
2383 managing = lambda f: f in man1
2390 managing = lambda f: f in man1
2384
2391
2385 modified, added, removed = [], [], []
2392 modified, added, removed = [], [], []
2386 for f in self._files:
2393 for f in self._files:
2387 if not managing(f):
2394 if not managing(f):
2388 added.append(f)
2395 added.append(f)
2389 elif f in self:
2396 elif f in self:
2390 modified.append(f)
2397 modified.append(f)
2391 else:
2398 else:
2392 removed.append(f)
2399 removed.append(f)
2393
2400
2394 return scmutil.status(modified, added, removed, [], [], [], [])
2401 return scmutil.status(modified, added, removed, [], [], [], [])
2395
2402
2396 class arbitraryfilectx(object):
2403 class arbitraryfilectx(object):
2397 """Allows you to use filectx-like functions on a file in an arbitrary
2404 """Allows you to use filectx-like functions on a file in an arbitrary
2398 location on disk, possibly not in the working directory.
2405 location on disk, possibly not in the working directory.
2399 """
2406 """
2400 def __init__(self, path, repo=None):
2407 def __init__(self, path, repo=None):
2401 # Repo is optional because contrib/simplemerge uses this class.
2408 # Repo is optional because contrib/simplemerge uses this class.
2402 self._repo = repo
2409 self._repo = repo
2403 self._path = path
2410 self._path = path
2404
2411
2405 def cmp(self, fctx):
2412 def cmp(self, fctx):
2406 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2413 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2407 # path if either side is a symlink.
2414 # path if either side is a symlink.
2408 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2415 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2409 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2416 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2410 # Add a fast-path for merge if both sides are disk-backed.
2417 # Add a fast-path for merge if both sides are disk-backed.
2411 # Note that filecmp uses the opposite return values (True if same)
2418 # Note that filecmp uses the opposite return values (True if same)
2412 # from our cmp functions (True if different).
2419 # from our cmp functions (True if different).
2413 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2420 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2414 return self.data() != fctx.data()
2421 return self.data() != fctx.data()
2415
2422
2416 def path(self):
2423 def path(self):
2417 return self._path
2424 return self._path
2418
2425
2419 def flags(self):
2426 def flags(self):
2420 return ''
2427 return ''
2421
2428
2422 def data(self):
2429 def data(self):
2423 return util.readfile(self._path)
2430 return util.readfile(self._path)
2424
2431
2425 def decodeddata(self):
2432 def decodeddata(self):
2426 with open(self._path, "rb") as f:
2433 with open(self._path, "rb") as f:
2427 return f.read()
2434 return f.read()
2428
2435
2429 def remove(self):
2436 def remove(self):
2430 util.unlink(self._path)
2437 util.unlink(self._path)
2431
2438
2432 def write(self, data, flags, **kwargs):
2439 def write(self, data, flags, **kwargs):
2433 assert not flags
2440 assert not flags
2434 with open(self._path, "wb") as f:
2441 with open(self._path, "wb") as f:
2435 f.write(data)
2442 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now