##// END OF EJS Templates
context: remove unneeded alias of diffopts
Yuya Nishihara -
r38601:7f4bf811 default
parent child Browse files
Show More
@@ -1,2542 +1,2540
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 sparse,
41 sparse,
42 subrepo,
42 subrepo,
43 subrepoutil,
43 subrepoutil,
44 util,
44 util,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 dateutil,
47 dateutil,
48 stringutil,
48 stringutil,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
70 return r"<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(self, other, s, match, listignored, listclean,
102 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
103 listunknown):
104 """build a status with respect to another context"""
104 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
105 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
110 # delta application.
111 mf2 = None
111 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
112 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
113 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
115 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
116 mf2 = self._buildstatusmanifest(s)
117
117
118 modified, added = [], []
118 modified, added = [], []
119 removed = []
119 removed = []
120 clean = []
120 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
122 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
123 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
124 for fn, value in d.iteritems():
125 if fn in deletedset:
125 if fn in deletedset:
126 continue
126 continue
127 if value is None:
127 if value is None:
128 clean.append(fn)
128 clean.append(fn)
129 continue
129 continue
130 (node1, flag1), (node2, flag2) = value
130 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
131 if node1 is None:
132 added.append(fn)
132 added.append(fn)
133 elif node2 is None:
133 elif node2 is None:
134 removed.append(fn)
134 removed.append(fn)
135 elif flag1 != flag2:
135 elif flag1 != flag2:
136 modified.append(fn)
136 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
137 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
138 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
139 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
140 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
141 # to a file as a modification.
142 modified.append(fn)
142 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
143 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
144 modified.append(fn)
145 else:
145 else:
146 clean.append(fn)
146 clean.append(fn)
147
147
148 if removed:
148 if removed:
149 # need to filter files if they are already reported as removed
149 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
150 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
152 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
153 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
154 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
155 removed = [fn for fn in removed if fn not in deletedset]
156
156
157 return scmutil.status(modified, added, removed, deleted, unknown,
157 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
158 ignored, clean)
159
159
160 @propertycache
160 @propertycache
161 def substate(self):
161 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
162 return subrepoutil.state(self, self._repo.ui)
163
163
164 def subrev(self, subpath):
164 def subrev(self, subpath):
165 return self.substate[subpath][1]
165 return self.substate[subpath][1]
166
166
167 def rev(self):
167 def rev(self):
168 return self._rev
168 return self._rev
169 def node(self):
169 def node(self):
170 return self._node
170 return self._node
171 def hex(self):
171 def hex(self):
172 return hex(self.node())
172 return hex(self.node())
173 def manifest(self):
173 def manifest(self):
174 return self._manifest
174 return self._manifest
175 def manifestctx(self):
175 def manifestctx(self):
176 return self._manifestctx
176 return self._manifestctx
177 def repo(self):
177 def repo(self):
178 return self._repo
178 return self._repo
179 def phasestr(self):
179 def phasestr(self):
180 return phases.phasenames[self.phase()]
180 return phases.phasenames[self.phase()]
181 def mutable(self):
181 def mutable(self):
182 return self.phase() > phases.public
182 return self.phase() > phases.public
183
183
184 def getfileset(self, expr):
184 def getfileset(self, expr):
185 return fileset.getfileset(self, expr)
185 return fileset.getfileset(self, expr)
186
186
187 def obsolete(self):
187 def obsolete(self):
188 """True if the changeset is obsolete"""
188 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
190
191 def extinct(self):
191 def extinct(self):
192 """True if the changeset is extinct"""
192 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
194
195 def orphan(self):
195 def orphan(self):
196 """True if the changeset is not obsolete but it's ancestor are"""
196 """True if the changeset is not obsolete but it's ancestor are"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
198
199 def phasedivergent(self):
199 def phasedivergent(self):
200 """True if the changeset try to be a successor of a public changeset
200 """True if the changeset try to be a successor of a public changeset
201
201
202 Only non-public and non-obsolete changesets may be bumped.
202 Only non-public and non-obsolete changesets may be bumped.
203 """
203 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
205
206 def contentdivergent(self):
206 def contentdivergent(self):
207 """Is a successors of a changeset with multiple possible successors set
207 """Is a successors of a changeset with multiple possible successors set
208
208
209 Only non-public and non-obsolete changesets may be divergent.
209 Only non-public and non-obsolete changesets may be divergent.
210 """
210 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
212
213 def isunstable(self):
213 def isunstable(self):
214 """True if the changeset is either unstable, bumped or divergent"""
214 """True if the changeset is either unstable, bumped or divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
279
280 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
282
282
283 def workingsub(self, path):
283 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
285 context.
286 '''
286 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
288
288
289 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
291 r = self._repo
291 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
293 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
296
296
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
299 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305
306 diffopts = opts
307 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
308 opts=diffopts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
309 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
310 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
311
309
312 def dirs(self):
310 def dirs(self):
313 return self._manifest.dirs()
311 return self._manifest.dirs()
314
312
315 def hasdir(self, dir):
313 def hasdir(self, dir):
316 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
317
315
318 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
319 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
320 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
321 directory.
319 directory.
322
320
323 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
324
322
325 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 """
324 """
327
325
328 ctx1 = self
326 ctx1 = self
329 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
330
328
331 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
332 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
333 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
334 # with its first parent.
332 # with its first parent.
335 #
333 #
336 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
337 #
335 #
338 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
339 #
337 #
340 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
341 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
342 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
343 reversed = False
341 reversed = False
344 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
345 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
346 reversed = True
344 reversed = True
347 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
348
346
349 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
350 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
351 listunknown)
354
352
355 if reversed:
353 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
355 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
357 r.clean)
360
358
361 if listsubrepos:
359 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
361 try:
364 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
365 except KeyError:
363 except KeyError:
366 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
367 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
371 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
374
377 for l in r:
375 for l in r:
378 l.sort()
376 l.sort()
379
377
380 return r
378 return r
381
379
382 class changectx(basectx):
380 class changectx(basectx):
383 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
384 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
385 the repo."""
383 the repo."""
386 def __init__(self, repo, changeid='.'):
384 def __init__(self, repo, changeid='.'):
387 """changeid is a revision number, node, or tag"""
385 """changeid is a revision number, node, or tag"""
388 super(changectx, self).__init__(repo)
386 super(changectx, self).__init__(repo)
389
387
390 try:
388 try:
391 if isinstance(changeid, int):
389 if isinstance(changeid, int):
392 self._node = repo.changelog.node(changeid)
390 self._node = repo.changelog.node(changeid)
393 self._rev = changeid
391 self._rev = changeid
394 return
392 return
395 elif changeid == 'null':
393 elif changeid == 'null':
396 self._node = nullid
394 self._node = nullid
397 self._rev = nullrev
395 self._rev = nullrev
398 return
396 return
399 elif changeid == 'tip':
397 elif changeid == 'tip':
400 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
401 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
402 return
400 return
403 elif (changeid == '.'
401 elif (changeid == '.'
404 or repo.local() and changeid == repo.dirstate.p1()):
402 or repo.local() and changeid == repo.dirstate.p1()):
405 # this is a hack to delay/avoid loading obsmarkers
403 # this is a hack to delay/avoid loading obsmarkers
406 # when we know that '.' won't be hidden
404 # when we know that '.' won't be hidden
407 self._node = repo.dirstate.p1()
405 self._node = repo.dirstate.p1()
408 self._rev = repo.unfiltered().changelog.rev(self._node)
406 self._rev = repo.unfiltered().changelog.rev(self._node)
409 return
407 return
410 elif len(changeid) == 20:
408 elif len(changeid) == 20:
411 try:
409 try:
412 self._node = changeid
410 self._node = changeid
413 self._rev = repo.changelog.rev(changeid)
411 self._rev = repo.changelog.rev(changeid)
414 return
412 return
415 except error.FilteredLookupError:
413 except error.FilteredLookupError:
416 raise
414 raise
417 except LookupError:
415 except LookupError:
418 # check if it might have come from damaged dirstate
416 # check if it might have come from damaged dirstate
419 #
417 #
420 # XXX we could avoid the unfiltered if we had a recognizable
418 # XXX we could avoid the unfiltered if we had a recognizable
421 # exception for filtered changeset access
419 # exception for filtered changeset access
422 if (repo.local()
420 if (repo.local()
423 and changeid in repo.unfiltered().dirstate.parents()):
421 and changeid in repo.unfiltered().dirstate.parents()):
424 msg = _("working directory has unknown parent '%s'!")
422 msg = _("working directory has unknown parent '%s'!")
425 raise error.Abort(msg % short(changeid))
423 raise error.Abort(msg % short(changeid))
426 changeid = hex(changeid) # for the error message
424 changeid = hex(changeid) # for the error message
427
425
428 elif len(changeid) == 40:
426 elif len(changeid) == 40:
429 try:
427 try:
430 self._node = bin(changeid)
428 self._node = bin(changeid)
431 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
432 return
430 return
433 except error.FilteredLookupError:
431 except error.FilteredLookupError:
434 raise
432 raise
435 except (TypeError, LookupError):
433 except (TypeError, LookupError):
436 pass
434 pass
437
435
438 # lookup failed
436 # lookup failed
439 except (error.FilteredIndexError, error.FilteredLookupError):
437 except (error.FilteredIndexError, error.FilteredLookupError):
440 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
438 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
441 % pycompat.bytestr(changeid))
439 % pycompat.bytestr(changeid))
442 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
443 raise
441 raise
444 except IndexError:
442 except IndexError:
445 pass
443 pass
446 raise error.RepoLookupError(
444 raise error.RepoLookupError(
447 _("unknown revision '%s'") % changeid)
445 _("unknown revision '%s'") % changeid)
448
446
449 def __hash__(self):
447 def __hash__(self):
450 try:
448 try:
451 return hash(self._rev)
449 return hash(self._rev)
452 except AttributeError:
450 except AttributeError:
453 return id(self)
451 return id(self)
454
452
455 def __nonzero__(self):
453 def __nonzero__(self):
456 return self._rev != nullrev
454 return self._rev != nullrev
457
455
458 __bool__ = __nonzero__
456 __bool__ = __nonzero__
459
457
460 @propertycache
458 @propertycache
461 def _changeset(self):
459 def _changeset(self):
462 return self._repo.changelog.changelogrevision(self.rev())
460 return self._repo.changelog.changelogrevision(self.rev())
463
461
464 @propertycache
462 @propertycache
465 def _manifest(self):
463 def _manifest(self):
466 return self._manifestctx.read()
464 return self._manifestctx.read()
467
465
468 @property
466 @property
469 def _manifestctx(self):
467 def _manifestctx(self):
470 return self._repo.manifestlog[self._changeset.manifest]
468 return self._repo.manifestlog[self._changeset.manifest]
471
469
472 @propertycache
470 @propertycache
473 def _manifestdelta(self):
471 def _manifestdelta(self):
474 return self._manifestctx.readdelta()
472 return self._manifestctx.readdelta()
475
473
476 @propertycache
474 @propertycache
477 def _parents(self):
475 def _parents(self):
478 repo = self._repo
476 repo = self._repo
479 p1, p2 = repo.changelog.parentrevs(self._rev)
477 p1, p2 = repo.changelog.parentrevs(self._rev)
480 if p2 == nullrev:
478 if p2 == nullrev:
481 return [changectx(repo, p1)]
479 return [changectx(repo, p1)]
482 return [changectx(repo, p1), changectx(repo, p2)]
480 return [changectx(repo, p1), changectx(repo, p2)]
483
481
484 def changeset(self):
482 def changeset(self):
485 c = self._changeset
483 c = self._changeset
486 return (
484 return (
487 c.manifest,
485 c.manifest,
488 c.user,
486 c.user,
489 c.date,
487 c.date,
490 c.files,
488 c.files,
491 c.description,
489 c.description,
492 c.extra,
490 c.extra,
493 )
491 )
494 def manifestnode(self):
492 def manifestnode(self):
495 return self._changeset.manifest
493 return self._changeset.manifest
496
494
497 def user(self):
495 def user(self):
498 return self._changeset.user
496 return self._changeset.user
499 def date(self):
497 def date(self):
500 return self._changeset.date
498 return self._changeset.date
501 def files(self):
499 def files(self):
502 return self._changeset.files
500 return self._changeset.files
503 def description(self):
501 def description(self):
504 return self._changeset.description
502 return self._changeset.description
505 def branch(self):
503 def branch(self):
506 return encoding.tolocal(self._changeset.extra.get("branch"))
504 return encoding.tolocal(self._changeset.extra.get("branch"))
507 def closesbranch(self):
505 def closesbranch(self):
508 return 'close' in self._changeset.extra
506 return 'close' in self._changeset.extra
509 def extra(self):
507 def extra(self):
510 """Return a dict of extra information."""
508 """Return a dict of extra information."""
511 return self._changeset.extra
509 return self._changeset.extra
512 def tags(self):
510 def tags(self):
513 """Return a list of byte tag names"""
511 """Return a list of byte tag names"""
514 return self._repo.nodetags(self._node)
512 return self._repo.nodetags(self._node)
515 def bookmarks(self):
513 def bookmarks(self):
516 """Return a list of byte bookmark names."""
514 """Return a list of byte bookmark names."""
517 return self._repo.nodebookmarks(self._node)
515 return self._repo.nodebookmarks(self._node)
518 def phase(self):
516 def phase(self):
519 return self._repo._phasecache.phase(self._repo, self._rev)
517 return self._repo._phasecache.phase(self._repo, self._rev)
520 def hidden(self):
518 def hidden(self):
521 return self._rev in repoview.filterrevs(self._repo, 'visible')
519 return self._rev in repoview.filterrevs(self._repo, 'visible')
522
520
523 def isinmemory(self):
521 def isinmemory(self):
524 return False
522 return False
525
523
526 def children(self):
524 def children(self):
527 """return list of changectx contexts for each child changeset.
525 """return list of changectx contexts for each child changeset.
528
526
529 This returns only the immediate child changesets. Use descendants() to
527 This returns only the immediate child changesets. Use descendants() to
530 recursively walk children.
528 recursively walk children.
531 """
529 """
532 c = self._repo.changelog.children(self._node)
530 c = self._repo.changelog.children(self._node)
533 return [changectx(self._repo, x) for x in c]
531 return [changectx(self._repo, x) for x in c]
534
532
535 def ancestors(self):
533 def ancestors(self):
536 for a in self._repo.changelog.ancestors([self._rev]):
534 for a in self._repo.changelog.ancestors([self._rev]):
537 yield changectx(self._repo, a)
535 yield changectx(self._repo, a)
538
536
539 def descendants(self):
537 def descendants(self):
540 """Recursively yield all children of the changeset.
538 """Recursively yield all children of the changeset.
541
539
542 For just the immediate children, use children()
540 For just the immediate children, use children()
543 """
541 """
544 for d in self._repo.changelog.descendants([self._rev]):
542 for d in self._repo.changelog.descendants([self._rev]):
545 yield changectx(self._repo, d)
543 yield changectx(self._repo, d)
546
544
547 def filectx(self, path, fileid=None, filelog=None):
545 def filectx(self, path, fileid=None, filelog=None):
548 """get a file context from this changeset"""
546 """get a file context from this changeset"""
549 if fileid is None:
547 if fileid is None:
550 fileid = self.filenode(path)
548 fileid = self.filenode(path)
551 return filectx(self._repo, path, fileid=fileid,
549 return filectx(self._repo, path, fileid=fileid,
552 changectx=self, filelog=filelog)
550 changectx=self, filelog=filelog)
553
551
554 def ancestor(self, c2, warn=False):
552 def ancestor(self, c2, warn=False):
555 """return the "best" ancestor context of self and c2
553 """return the "best" ancestor context of self and c2
556
554
557 If there are multiple candidates, it will show a message and check
555 If there are multiple candidates, it will show a message and check
558 merge.preferancestor configuration before falling back to the
556 merge.preferancestor configuration before falling back to the
559 revlog ancestor."""
557 revlog ancestor."""
560 # deal with workingctxs
558 # deal with workingctxs
561 n2 = c2._node
559 n2 = c2._node
562 if n2 is None:
560 if n2 is None:
563 n2 = c2._parents[0]._node
561 n2 = c2._parents[0]._node
564 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
562 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
565 if not cahs:
563 if not cahs:
566 anc = nullid
564 anc = nullid
567 elif len(cahs) == 1:
565 elif len(cahs) == 1:
568 anc = cahs[0]
566 anc = cahs[0]
569 else:
567 else:
570 # experimental config: merge.preferancestor
568 # experimental config: merge.preferancestor
571 for r in self._repo.ui.configlist('merge', 'preferancestor'):
569 for r in self._repo.ui.configlist('merge', 'preferancestor'):
572 try:
570 try:
573 ctx = scmutil.revsymbol(self._repo, r)
571 ctx = scmutil.revsymbol(self._repo, r)
574 except error.RepoLookupError:
572 except error.RepoLookupError:
575 continue
573 continue
576 anc = ctx.node()
574 anc = ctx.node()
577 if anc in cahs:
575 if anc in cahs:
578 break
576 break
579 else:
577 else:
580 anc = self._repo.changelog.ancestor(self._node, n2)
578 anc = self._repo.changelog.ancestor(self._node, n2)
581 if warn:
579 if warn:
582 self._repo.ui.status(
580 self._repo.ui.status(
583 (_("note: using %s as ancestor of %s and %s\n") %
581 (_("note: using %s as ancestor of %s and %s\n") %
584 (short(anc), short(self._node), short(n2))) +
582 (short(anc), short(self._node), short(n2))) +
585 ''.join(_(" alternatively, use --config "
583 ''.join(_(" alternatively, use --config "
586 "merge.preferancestor=%s\n") %
584 "merge.preferancestor=%s\n") %
587 short(n) for n in sorted(cahs) if n != anc))
585 short(n) for n in sorted(cahs) if n != anc))
588 return changectx(self._repo, anc)
586 return changectx(self._repo, anc)
589
587
590 def descendant(self, other):
588 def descendant(self, other):
591 """True if other is descendant of this changeset"""
589 """True if other is descendant of this changeset"""
592 return self._repo.changelog.descendant(self._rev, other._rev)
590 return self._repo.changelog.descendant(self._rev, other._rev)
593
591
594 def walk(self, match):
592 def walk(self, match):
595 '''Generates matching file names.'''
593 '''Generates matching file names.'''
596
594
597 # Wrap match.bad method to have message with nodeid
595 # Wrap match.bad method to have message with nodeid
598 def bad(fn, msg):
596 def bad(fn, msg):
599 # The manifest doesn't know about subrepos, so don't complain about
597 # The manifest doesn't know about subrepos, so don't complain about
600 # paths into valid subrepos.
598 # paths into valid subrepos.
601 if any(fn == s or fn.startswith(s + '/')
599 if any(fn == s or fn.startswith(s + '/')
602 for s in self.substate):
600 for s in self.substate):
603 return
601 return
604 match.bad(fn, _('no such file in rev %s') % self)
602 match.bad(fn, _('no such file in rev %s') % self)
605
603
606 m = matchmod.badmatch(match, bad)
604 m = matchmod.badmatch(match, bad)
607 return self._manifest.walk(m)
605 return self._manifest.walk(m)
608
606
609 def matches(self, match):
607 def matches(self, match):
610 return self.walk(match)
608 return self.walk(match)
611
609
612 class basefilectx(object):
610 class basefilectx(object):
613 """A filecontext object represents the common logic for its children:
611 """A filecontext object represents the common logic for its children:
614 filectx: read-only access to a filerevision that is already present
612 filectx: read-only access to a filerevision that is already present
615 in the repo,
613 in the repo,
616 workingfilectx: a filecontext that represents files from the working
614 workingfilectx: a filecontext that represents files from the working
617 directory,
615 directory,
618 memfilectx: a filecontext that represents files in-memory,
616 memfilectx: a filecontext that represents files in-memory,
619 overlayfilectx: duplicate another filecontext with some fields overridden.
617 overlayfilectx: duplicate another filecontext with some fields overridden.
620 """
618 """
621 @propertycache
619 @propertycache
622 def _filelog(self):
620 def _filelog(self):
623 return self._repo.file(self._path)
621 return self._repo.file(self._path)
624
622
625 @propertycache
623 @propertycache
626 def _changeid(self):
624 def _changeid(self):
627 if r'_changeid' in self.__dict__:
625 if r'_changeid' in self.__dict__:
628 return self._changeid
626 return self._changeid
629 elif r'_changectx' in self.__dict__:
627 elif r'_changectx' in self.__dict__:
630 return self._changectx.rev()
628 return self._changectx.rev()
631 elif r'_descendantrev' in self.__dict__:
629 elif r'_descendantrev' in self.__dict__:
632 # this file context was created from a revision with a known
630 # this file context was created from a revision with a known
633 # descendant, we can (lazily) correct for linkrev aliases
631 # descendant, we can (lazily) correct for linkrev aliases
634 return self._adjustlinkrev(self._descendantrev)
632 return self._adjustlinkrev(self._descendantrev)
635 else:
633 else:
636 return self._filelog.linkrev(self._filerev)
634 return self._filelog.linkrev(self._filerev)
637
635
638 @propertycache
636 @propertycache
639 def _filenode(self):
637 def _filenode(self):
640 if r'_fileid' in self.__dict__:
638 if r'_fileid' in self.__dict__:
641 return self._filelog.lookup(self._fileid)
639 return self._filelog.lookup(self._fileid)
642 else:
640 else:
643 return self._changectx.filenode(self._path)
641 return self._changectx.filenode(self._path)
644
642
645 @propertycache
643 @propertycache
646 def _filerev(self):
644 def _filerev(self):
647 return self._filelog.rev(self._filenode)
645 return self._filelog.rev(self._filenode)
648
646
649 @propertycache
647 @propertycache
650 def _repopath(self):
648 def _repopath(self):
651 return self._path
649 return self._path
652
650
653 def __nonzero__(self):
651 def __nonzero__(self):
654 try:
652 try:
655 self._filenode
653 self._filenode
656 return True
654 return True
657 except error.LookupError:
655 except error.LookupError:
658 # file is missing
656 # file is missing
659 return False
657 return False
660
658
661 __bool__ = __nonzero__
659 __bool__ = __nonzero__
662
660
663 def __bytes__(self):
661 def __bytes__(self):
664 try:
662 try:
665 return "%s@%s" % (self.path(), self._changectx)
663 return "%s@%s" % (self.path(), self._changectx)
666 except error.LookupError:
664 except error.LookupError:
667 return "%s@???" % self.path()
665 return "%s@???" % self.path()
668
666
669 __str__ = encoding.strmethod(__bytes__)
667 __str__ = encoding.strmethod(__bytes__)
670
668
671 def __repr__(self):
669 def __repr__(self):
672 return r"<%s %s>" % (type(self).__name__, str(self))
670 return r"<%s %s>" % (type(self).__name__, str(self))
673
671
674 def __hash__(self):
672 def __hash__(self):
675 try:
673 try:
676 return hash((self._path, self._filenode))
674 return hash((self._path, self._filenode))
677 except AttributeError:
675 except AttributeError:
678 return id(self)
676 return id(self)
679
677
680 def __eq__(self, other):
678 def __eq__(self, other):
681 try:
679 try:
682 return (type(self) == type(other) and self._path == other._path
680 return (type(self) == type(other) and self._path == other._path
683 and self._filenode == other._filenode)
681 and self._filenode == other._filenode)
684 except AttributeError:
682 except AttributeError:
685 return False
683 return False
686
684
687 def __ne__(self, other):
685 def __ne__(self, other):
688 return not (self == other)
686 return not (self == other)
689
687
690 def filerev(self):
688 def filerev(self):
691 return self._filerev
689 return self._filerev
692 def filenode(self):
690 def filenode(self):
693 return self._filenode
691 return self._filenode
694 @propertycache
692 @propertycache
695 def _flags(self):
693 def _flags(self):
696 return self._changectx.flags(self._path)
694 return self._changectx.flags(self._path)
697 def flags(self):
695 def flags(self):
698 return self._flags
696 return self._flags
699 def filelog(self):
697 def filelog(self):
700 return self._filelog
698 return self._filelog
701 def rev(self):
699 def rev(self):
702 return self._changeid
700 return self._changeid
703 def linkrev(self):
701 def linkrev(self):
704 return self._filelog.linkrev(self._filerev)
702 return self._filelog.linkrev(self._filerev)
705 def node(self):
703 def node(self):
706 return self._changectx.node()
704 return self._changectx.node()
707 def hex(self):
705 def hex(self):
708 return self._changectx.hex()
706 return self._changectx.hex()
709 def user(self):
707 def user(self):
710 return self._changectx.user()
708 return self._changectx.user()
711 def date(self):
709 def date(self):
712 return self._changectx.date()
710 return self._changectx.date()
713 def files(self):
711 def files(self):
714 return self._changectx.files()
712 return self._changectx.files()
715 def description(self):
713 def description(self):
716 return self._changectx.description()
714 return self._changectx.description()
717 def branch(self):
715 def branch(self):
718 return self._changectx.branch()
716 return self._changectx.branch()
719 def extra(self):
717 def extra(self):
720 return self._changectx.extra()
718 return self._changectx.extra()
721 def phase(self):
719 def phase(self):
722 return self._changectx.phase()
720 return self._changectx.phase()
723 def phasestr(self):
721 def phasestr(self):
724 return self._changectx.phasestr()
722 return self._changectx.phasestr()
725 def obsolete(self):
723 def obsolete(self):
726 return self._changectx.obsolete()
724 return self._changectx.obsolete()
727 def instabilities(self):
725 def instabilities(self):
728 return self._changectx.instabilities()
726 return self._changectx.instabilities()
729 def manifest(self):
727 def manifest(self):
730 return self._changectx.manifest()
728 return self._changectx.manifest()
731 def changectx(self):
729 def changectx(self):
732 return self._changectx
730 return self._changectx
733 def renamed(self):
731 def renamed(self):
734 return self._copied
732 return self._copied
735 def repo(self):
733 def repo(self):
736 return self._repo
734 return self._repo
737 def size(self):
735 def size(self):
738 return len(self.data())
736 return len(self.data())
739
737
740 def path(self):
738 def path(self):
741 return self._path
739 return self._path
742
740
743 def isbinary(self):
741 def isbinary(self):
744 try:
742 try:
745 return stringutil.binary(self.data())
743 return stringutil.binary(self.data())
746 except IOError:
744 except IOError:
747 return False
745 return False
748 def isexec(self):
746 def isexec(self):
749 return 'x' in self.flags()
747 return 'x' in self.flags()
750 def islink(self):
748 def islink(self):
751 return 'l' in self.flags()
749 return 'l' in self.flags()
752
750
753 def isabsent(self):
751 def isabsent(self):
754 """whether this filectx represents a file not in self._changectx
752 """whether this filectx represents a file not in self._changectx
755
753
756 This is mainly for merge code to detect change/delete conflicts. This is
754 This is mainly for merge code to detect change/delete conflicts. This is
757 expected to be True for all subclasses of basectx."""
755 expected to be True for all subclasses of basectx."""
758 return False
756 return False
759
757
760 _customcmp = False
758 _customcmp = False
761 def cmp(self, fctx):
759 def cmp(self, fctx):
762 """compare with other file context
760 """compare with other file context
763
761
764 returns True if different than fctx.
762 returns True if different than fctx.
765 """
763 """
766 if fctx._customcmp:
764 if fctx._customcmp:
767 return fctx.cmp(self)
765 return fctx.cmp(self)
768
766
769 if (fctx._filenode is None
767 if (fctx._filenode is None
770 and (self._repo._encodefilterpats
768 and (self._repo._encodefilterpats
771 # if file data starts with '\1\n', empty metadata block is
769 # if file data starts with '\1\n', empty metadata block is
772 # prepended, which adds 4 bytes to filelog.size().
770 # prepended, which adds 4 bytes to filelog.size().
773 or self.size() - 4 == fctx.size())
771 or self.size() - 4 == fctx.size())
774 or self.size() == fctx.size()):
772 or self.size() == fctx.size()):
775 return self._filelog.cmp(self._filenode, fctx.data())
773 return self._filelog.cmp(self._filenode, fctx.data())
776
774
777 return True
775 return True
778
776
779 def _adjustlinkrev(self, srcrev, inclusive=False):
777 def _adjustlinkrev(self, srcrev, inclusive=False):
780 """return the first ancestor of <srcrev> introducing <fnode>
778 """return the first ancestor of <srcrev> introducing <fnode>
781
779
782 If the linkrev of the file revision does not point to an ancestor of
780 If the linkrev of the file revision does not point to an ancestor of
783 srcrev, we'll walk down the ancestors until we find one introducing
781 srcrev, we'll walk down the ancestors until we find one introducing
784 this file revision.
782 this file revision.
785
783
786 :srcrev: the changeset revision we search ancestors from
784 :srcrev: the changeset revision we search ancestors from
787 :inclusive: if true, the src revision will also be checked
785 :inclusive: if true, the src revision will also be checked
788 """
786 """
789 repo = self._repo
787 repo = self._repo
790 cl = repo.unfiltered().changelog
788 cl = repo.unfiltered().changelog
791 mfl = repo.manifestlog
789 mfl = repo.manifestlog
792 # fetch the linkrev
790 # fetch the linkrev
793 lkr = self.linkrev()
791 lkr = self.linkrev()
794 # hack to reuse ancestor computation when searching for renames
792 # hack to reuse ancestor computation when searching for renames
795 memberanc = getattr(self, '_ancestrycontext', None)
793 memberanc = getattr(self, '_ancestrycontext', None)
796 iteranc = None
794 iteranc = None
797 if srcrev is None:
795 if srcrev is None:
798 # wctx case, used by workingfilectx during mergecopy
796 # wctx case, used by workingfilectx during mergecopy
799 revs = [p.rev() for p in self._repo[None].parents()]
797 revs = [p.rev() for p in self._repo[None].parents()]
800 inclusive = True # we skipped the real (revless) source
798 inclusive = True # we skipped the real (revless) source
801 else:
799 else:
802 revs = [srcrev]
800 revs = [srcrev]
803 if memberanc is None:
801 if memberanc is None:
804 memberanc = iteranc = cl.ancestors(revs, lkr,
802 memberanc = iteranc = cl.ancestors(revs, lkr,
805 inclusive=inclusive)
803 inclusive=inclusive)
806 # check if this linkrev is an ancestor of srcrev
804 # check if this linkrev is an ancestor of srcrev
807 if lkr not in memberanc:
805 if lkr not in memberanc:
808 if iteranc is None:
806 if iteranc is None:
809 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
807 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
810 fnode = self._filenode
808 fnode = self._filenode
811 path = self._path
809 path = self._path
812 for a in iteranc:
810 for a in iteranc:
813 ac = cl.read(a) # get changeset data (we avoid object creation)
811 ac = cl.read(a) # get changeset data (we avoid object creation)
814 if path in ac[3]: # checking the 'files' field.
812 if path in ac[3]: # checking the 'files' field.
815 # The file has been touched, check if the content is
813 # The file has been touched, check if the content is
816 # similar to the one we search for.
814 # similar to the one we search for.
817 if fnode == mfl[ac[0]].readfast().get(path):
815 if fnode == mfl[ac[0]].readfast().get(path):
818 return a
816 return a
819 # In theory, we should never get out of that loop without a result.
817 # In theory, we should never get out of that loop without a result.
820 # But if manifest uses a buggy file revision (not children of the
818 # But if manifest uses a buggy file revision (not children of the
821 # one it replaces) we could. Such a buggy situation will likely
819 # one it replaces) we could. Such a buggy situation will likely
822 # result is crash somewhere else at to some point.
820 # result is crash somewhere else at to some point.
823 return lkr
821 return lkr
824
822
825 def introrev(self):
823 def introrev(self):
826 """return the rev of the changeset which introduced this file revision
824 """return the rev of the changeset which introduced this file revision
827
825
828 This method is different from linkrev because it take into account the
826 This method is different from linkrev because it take into account the
829 changeset the filectx was created from. It ensures the returned
827 changeset the filectx was created from. It ensures the returned
830 revision is one of its ancestors. This prevents bugs from
828 revision is one of its ancestors. This prevents bugs from
831 'linkrev-shadowing' when a file revision is used by multiple
829 'linkrev-shadowing' when a file revision is used by multiple
832 changesets.
830 changesets.
833 """
831 """
834 lkr = self.linkrev()
832 lkr = self.linkrev()
835 attrs = vars(self)
833 attrs = vars(self)
836 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
834 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
837 if noctx or self.rev() == lkr:
835 if noctx or self.rev() == lkr:
838 return self.linkrev()
836 return self.linkrev()
839 return self._adjustlinkrev(self.rev(), inclusive=True)
837 return self._adjustlinkrev(self.rev(), inclusive=True)
840
838
841 def introfilectx(self):
839 def introfilectx(self):
842 """Return filectx having identical contents, but pointing to the
840 """Return filectx having identical contents, but pointing to the
843 changeset revision where this filectx was introduced"""
841 changeset revision where this filectx was introduced"""
844 introrev = self.introrev()
842 introrev = self.introrev()
845 if self.rev() == introrev:
843 if self.rev() == introrev:
846 return self
844 return self
847 return self.filectx(self.filenode(), changeid=introrev)
845 return self.filectx(self.filenode(), changeid=introrev)
848
846
849 def _parentfilectx(self, path, fileid, filelog):
847 def _parentfilectx(self, path, fileid, filelog):
850 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
848 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
851 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
849 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
852 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
850 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
853 # If self is associated with a changeset (probably explicitly
851 # If self is associated with a changeset (probably explicitly
854 # fed), ensure the created filectx is associated with a
852 # fed), ensure the created filectx is associated with a
855 # changeset that is an ancestor of self.changectx.
853 # changeset that is an ancestor of self.changectx.
856 # This lets us later use _adjustlinkrev to get a correct link.
854 # This lets us later use _adjustlinkrev to get a correct link.
857 fctx._descendantrev = self.rev()
855 fctx._descendantrev = self.rev()
858 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
859 elif r'_descendantrev' in vars(self):
857 elif r'_descendantrev' in vars(self):
860 # Otherwise propagate _descendantrev if we have one associated.
858 # Otherwise propagate _descendantrev if we have one associated.
861 fctx._descendantrev = self._descendantrev
859 fctx._descendantrev = self._descendantrev
862 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
860 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
863 return fctx
861 return fctx
864
862
865 def parents(self):
863 def parents(self):
866 _path = self._path
864 _path = self._path
867 fl = self._filelog
865 fl = self._filelog
868 parents = self._filelog.parents(self._filenode)
866 parents = self._filelog.parents(self._filenode)
869 pl = [(_path, node, fl) for node in parents if node != nullid]
867 pl = [(_path, node, fl) for node in parents if node != nullid]
870
868
871 r = fl.renamed(self._filenode)
869 r = fl.renamed(self._filenode)
872 if r:
870 if r:
873 # - In the simple rename case, both parent are nullid, pl is empty.
871 # - In the simple rename case, both parent are nullid, pl is empty.
874 # - In case of merge, only one of the parent is null id and should
872 # - In case of merge, only one of the parent is null id and should
875 # be replaced with the rename information. This parent is -always-
873 # be replaced with the rename information. This parent is -always-
876 # the first one.
874 # the first one.
877 #
875 #
878 # As null id have always been filtered out in the previous list
876 # As null id have always been filtered out in the previous list
879 # comprehension, inserting to 0 will always result in "replacing
877 # comprehension, inserting to 0 will always result in "replacing
880 # first nullid parent with rename information.
878 # first nullid parent with rename information.
881 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
879 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
882
880
883 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
881 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
884
882
885 def p1(self):
883 def p1(self):
886 return self.parents()[0]
884 return self.parents()[0]
887
885
888 def p2(self):
886 def p2(self):
889 p = self.parents()
887 p = self.parents()
890 if len(p) == 2:
888 if len(p) == 2:
891 return p[1]
889 return p[1]
892 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
890 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
893
891
894 def annotate(self, follow=False, skiprevs=None, diffopts=None):
892 def annotate(self, follow=False, skiprevs=None, diffopts=None):
895 """Returns a list of annotateline objects for each line in the file
893 """Returns a list of annotateline objects for each line in the file
896
894
897 - line.fctx is the filectx of the node where that line was last changed
895 - line.fctx is the filectx of the node where that line was last changed
898 - line.lineno is the line number at the first appearance in the managed
896 - line.lineno is the line number at the first appearance in the managed
899 file
897 file
900 - line.text is the data on that line (including newline character)
898 - line.text is the data on that line (including newline character)
901 """
899 """
902 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
900 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
903
901
904 def parents(f):
902 def parents(f):
905 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
903 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
906 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
904 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
907 # from the topmost introrev (= srcrev) down to p.linkrev() if it
905 # from the topmost introrev (= srcrev) down to p.linkrev() if it
908 # isn't an ancestor of the srcrev.
906 # isn't an ancestor of the srcrev.
909 f._changeid
907 f._changeid
910 pl = f.parents()
908 pl = f.parents()
911
909
912 # Don't return renamed parents if we aren't following.
910 # Don't return renamed parents if we aren't following.
913 if not follow:
911 if not follow:
914 pl = [p for p in pl if p.path() == f.path()]
912 pl = [p for p in pl if p.path() == f.path()]
915
913
916 # renamed filectx won't have a filelog yet, so set it
914 # renamed filectx won't have a filelog yet, so set it
917 # from the cache to save time
915 # from the cache to save time
918 for p in pl:
916 for p in pl:
919 if not r'_filelog' in p.__dict__:
917 if not r'_filelog' in p.__dict__:
920 p._filelog = getlog(p.path())
918 p._filelog = getlog(p.path())
921
919
922 return pl
920 return pl
923
921
924 # use linkrev to find the first changeset where self appeared
922 # use linkrev to find the first changeset where self appeared
925 base = self.introfilectx()
923 base = self.introfilectx()
926 if getattr(base, '_ancestrycontext', None) is None:
924 if getattr(base, '_ancestrycontext', None) is None:
927 cl = self._repo.changelog
925 cl = self._repo.changelog
928 if base.rev() is None:
926 if base.rev() is None:
929 # wctx is not inclusive, but works because _ancestrycontext
927 # wctx is not inclusive, but works because _ancestrycontext
930 # is used to test filelog revisions
928 # is used to test filelog revisions
931 ac = cl.ancestors([p.rev() for p in base.parents()],
929 ac = cl.ancestors([p.rev() for p in base.parents()],
932 inclusive=True)
930 inclusive=True)
933 else:
931 else:
934 ac = cl.ancestors([base.rev()], inclusive=True)
932 ac = cl.ancestors([base.rev()], inclusive=True)
935 base._ancestrycontext = ac
933 base._ancestrycontext = ac
936
934
937 return dagop.annotate(base, parents, skiprevs=skiprevs,
935 return dagop.annotate(base, parents, skiprevs=skiprevs,
938 diffopts=diffopts)
936 diffopts=diffopts)
939
937
940 def ancestors(self, followfirst=False):
938 def ancestors(self, followfirst=False):
941 visit = {}
939 visit = {}
942 c = self
940 c = self
943 if followfirst:
941 if followfirst:
944 cut = 1
942 cut = 1
945 else:
943 else:
946 cut = None
944 cut = None
947
945
948 while True:
946 while True:
949 for parent in c.parents()[:cut]:
947 for parent in c.parents()[:cut]:
950 visit[(parent.linkrev(), parent.filenode())] = parent
948 visit[(parent.linkrev(), parent.filenode())] = parent
951 if not visit:
949 if not visit:
952 break
950 break
953 c = visit.pop(max(visit))
951 c = visit.pop(max(visit))
954 yield c
952 yield c
955
953
956 def decodeddata(self):
954 def decodeddata(self):
957 """Returns `data()` after running repository decoding filters.
955 """Returns `data()` after running repository decoding filters.
958
956
959 This is often equivalent to how the data would be expressed on disk.
957 This is often equivalent to how the data would be expressed on disk.
960 """
958 """
961 return self._repo.wwritedata(self.path(), self.data())
959 return self._repo.wwritedata(self.path(), self.data())
962
960
963 class filectx(basefilectx):
961 class filectx(basefilectx):
964 """A filecontext object makes access to data related to a particular
962 """A filecontext object makes access to data related to a particular
965 filerevision convenient."""
963 filerevision convenient."""
966 def __init__(self, repo, path, changeid=None, fileid=None,
964 def __init__(self, repo, path, changeid=None, fileid=None,
967 filelog=None, changectx=None):
965 filelog=None, changectx=None):
968 """changeid can be a changeset revision, node, or tag.
966 """changeid can be a changeset revision, node, or tag.
969 fileid can be a file revision or node."""
967 fileid can be a file revision or node."""
970 self._repo = repo
968 self._repo = repo
971 self._path = path
969 self._path = path
972
970
973 assert (changeid is not None
971 assert (changeid is not None
974 or fileid is not None
972 or fileid is not None
975 or changectx is not None), \
973 or changectx is not None), \
976 ("bad args: changeid=%r, fileid=%r, changectx=%r"
974 ("bad args: changeid=%r, fileid=%r, changectx=%r"
977 % (changeid, fileid, changectx))
975 % (changeid, fileid, changectx))
978
976
979 if filelog is not None:
977 if filelog is not None:
980 self._filelog = filelog
978 self._filelog = filelog
981
979
982 if changeid is not None:
980 if changeid is not None:
983 self._changeid = changeid
981 self._changeid = changeid
984 if changectx is not None:
982 if changectx is not None:
985 self._changectx = changectx
983 self._changectx = changectx
986 if fileid is not None:
984 if fileid is not None:
987 self._fileid = fileid
985 self._fileid = fileid
988
986
989 @propertycache
987 @propertycache
990 def _changectx(self):
988 def _changectx(self):
991 try:
989 try:
992 return changectx(self._repo, self._changeid)
990 return changectx(self._repo, self._changeid)
993 except error.FilteredRepoLookupError:
991 except error.FilteredRepoLookupError:
994 # Linkrev may point to any revision in the repository. When the
992 # Linkrev may point to any revision in the repository. When the
995 # repository is filtered this may lead to `filectx` trying to build
993 # repository is filtered this may lead to `filectx` trying to build
996 # `changectx` for filtered revision. In such case we fallback to
994 # `changectx` for filtered revision. In such case we fallback to
997 # creating `changectx` on the unfiltered version of the reposition.
995 # creating `changectx` on the unfiltered version of the reposition.
998 # This fallback should not be an issue because `changectx` from
996 # This fallback should not be an issue because `changectx` from
999 # `filectx` are not used in complex operations that care about
997 # `filectx` are not used in complex operations that care about
1000 # filtering.
998 # filtering.
1001 #
999 #
1002 # This fallback is a cheap and dirty fix that prevent several
1000 # This fallback is a cheap and dirty fix that prevent several
1003 # crashes. It does not ensure the behavior is correct. However the
1001 # crashes. It does not ensure the behavior is correct. However the
1004 # behavior was not correct before filtering either and "incorrect
1002 # behavior was not correct before filtering either and "incorrect
1005 # behavior" is seen as better as "crash"
1003 # behavior" is seen as better as "crash"
1006 #
1004 #
1007 # Linkrevs have several serious troubles with filtering that are
1005 # Linkrevs have several serious troubles with filtering that are
1008 # complicated to solve. Proper handling of the issue here should be
1006 # complicated to solve. Proper handling of the issue here should be
1009 # considered when solving linkrev issue are on the table.
1007 # considered when solving linkrev issue are on the table.
1010 return changectx(self._repo.unfiltered(), self._changeid)
1008 return changectx(self._repo.unfiltered(), self._changeid)
1011
1009
1012 def filectx(self, fileid, changeid=None):
1010 def filectx(self, fileid, changeid=None):
1013 '''opens an arbitrary revision of the file without
1011 '''opens an arbitrary revision of the file without
1014 opening a new filelog'''
1012 opening a new filelog'''
1015 return filectx(self._repo, self._path, fileid=fileid,
1013 return filectx(self._repo, self._path, fileid=fileid,
1016 filelog=self._filelog, changeid=changeid)
1014 filelog=self._filelog, changeid=changeid)
1017
1015
1018 def rawdata(self):
1016 def rawdata(self):
1019 return self._filelog.revision(self._filenode, raw=True)
1017 return self._filelog.revision(self._filenode, raw=True)
1020
1018
1021 def rawflags(self):
1019 def rawflags(self):
1022 """low-level revlog flags"""
1020 """low-level revlog flags"""
1023 return self._filelog.flags(self._filerev)
1021 return self._filelog.flags(self._filerev)
1024
1022
1025 def data(self):
1023 def data(self):
1026 try:
1024 try:
1027 return self._filelog.read(self._filenode)
1025 return self._filelog.read(self._filenode)
1028 except error.CensoredNodeError:
1026 except error.CensoredNodeError:
1029 if self._repo.ui.config("censor", "policy") == "ignore":
1027 if self._repo.ui.config("censor", "policy") == "ignore":
1030 return ""
1028 return ""
1031 raise error.Abort(_("censored node: %s") % short(self._filenode),
1029 raise error.Abort(_("censored node: %s") % short(self._filenode),
1032 hint=_("set censor.policy to ignore errors"))
1030 hint=_("set censor.policy to ignore errors"))
1033
1031
1034 def size(self):
1032 def size(self):
1035 return self._filelog.size(self._filerev)
1033 return self._filelog.size(self._filerev)
1036
1034
1037 @propertycache
1035 @propertycache
1038 def _copied(self):
1036 def _copied(self):
1039 """check if file was actually renamed in this changeset revision
1037 """check if file was actually renamed in this changeset revision
1040
1038
1041 If rename logged in file revision, we report copy for changeset only
1039 If rename logged in file revision, we report copy for changeset only
1042 if file revisions linkrev points back to the changeset in question
1040 if file revisions linkrev points back to the changeset in question
1043 or both changeset parents contain different file revisions.
1041 or both changeset parents contain different file revisions.
1044 """
1042 """
1045
1043
1046 renamed = self._filelog.renamed(self._filenode)
1044 renamed = self._filelog.renamed(self._filenode)
1047 if not renamed:
1045 if not renamed:
1048 return renamed
1046 return renamed
1049
1047
1050 if self.rev() == self.linkrev():
1048 if self.rev() == self.linkrev():
1051 return renamed
1049 return renamed
1052
1050
1053 name = self.path()
1051 name = self.path()
1054 fnode = self._filenode
1052 fnode = self._filenode
1055 for p in self._changectx.parents():
1053 for p in self._changectx.parents():
1056 try:
1054 try:
1057 if fnode == p.filenode(name):
1055 if fnode == p.filenode(name):
1058 return None
1056 return None
1059 except error.LookupError:
1057 except error.LookupError:
1060 pass
1058 pass
1061 return renamed
1059 return renamed
1062
1060
1063 def children(self):
1061 def children(self):
1064 # hard for renames
1062 # hard for renames
1065 c = self._filelog.children(self._filenode)
1063 c = self._filelog.children(self._filenode)
1066 return [filectx(self._repo, self._path, fileid=x,
1064 return [filectx(self._repo, self._path, fileid=x,
1067 filelog=self._filelog) for x in c]
1065 filelog=self._filelog) for x in c]
1068
1066
1069 class committablectx(basectx):
1067 class committablectx(basectx):
1070 """A committablectx object provides common functionality for a context that
1068 """A committablectx object provides common functionality for a context that
1071 wants the ability to commit, e.g. workingctx or memctx."""
1069 wants the ability to commit, e.g. workingctx or memctx."""
1072 def __init__(self, repo, text="", user=None, date=None, extra=None,
1070 def __init__(self, repo, text="", user=None, date=None, extra=None,
1073 changes=None):
1071 changes=None):
1074 super(committablectx, self).__init__(repo)
1072 super(committablectx, self).__init__(repo)
1075 self._rev = None
1073 self._rev = None
1076 self._node = None
1074 self._node = None
1077 self._text = text
1075 self._text = text
1078 if date:
1076 if date:
1079 self._date = dateutil.parsedate(date)
1077 self._date = dateutil.parsedate(date)
1080 if user:
1078 if user:
1081 self._user = user
1079 self._user = user
1082 if changes:
1080 if changes:
1083 self._status = changes
1081 self._status = changes
1084
1082
1085 self._extra = {}
1083 self._extra = {}
1086 if extra:
1084 if extra:
1087 self._extra = extra.copy()
1085 self._extra = extra.copy()
1088 if 'branch' not in self._extra:
1086 if 'branch' not in self._extra:
1089 try:
1087 try:
1090 branch = encoding.fromlocal(self._repo.dirstate.branch())
1088 branch = encoding.fromlocal(self._repo.dirstate.branch())
1091 except UnicodeDecodeError:
1089 except UnicodeDecodeError:
1092 raise error.Abort(_('branch name not in UTF-8!'))
1090 raise error.Abort(_('branch name not in UTF-8!'))
1093 self._extra['branch'] = branch
1091 self._extra['branch'] = branch
1094 if self._extra['branch'] == '':
1092 if self._extra['branch'] == '':
1095 self._extra['branch'] = 'default'
1093 self._extra['branch'] = 'default'
1096
1094
1097 def __bytes__(self):
1095 def __bytes__(self):
1098 return bytes(self._parents[0]) + "+"
1096 return bytes(self._parents[0]) + "+"
1099
1097
1100 __str__ = encoding.strmethod(__bytes__)
1098 __str__ = encoding.strmethod(__bytes__)
1101
1099
1102 def __nonzero__(self):
1100 def __nonzero__(self):
1103 return True
1101 return True
1104
1102
1105 __bool__ = __nonzero__
1103 __bool__ = __nonzero__
1106
1104
1107 def _buildflagfunc(self):
1105 def _buildflagfunc(self):
1108 # Create a fallback function for getting file flags when the
1106 # Create a fallback function for getting file flags when the
1109 # filesystem doesn't support them
1107 # filesystem doesn't support them
1110
1108
1111 copiesget = self._repo.dirstate.copies().get
1109 copiesget = self._repo.dirstate.copies().get
1112 parents = self.parents()
1110 parents = self.parents()
1113 if len(parents) < 2:
1111 if len(parents) < 2:
1114 # when we have one parent, it's easy: copy from parent
1112 # when we have one parent, it's easy: copy from parent
1115 man = parents[0].manifest()
1113 man = parents[0].manifest()
1116 def func(f):
1114 def func(f):
1117 f = copiesget(f, f)
1115 f = copiesget(f, f)
1118 return man.flags(f)
1116 return man.flags(f)
1119 else:
1117 else:
1120 # merges are tricky: we try to reconstruct the unstored
1118 # merges are tricky: we try to reconstruct the unstored
1121 # result from the merge (issue1802)
1119 # result from the merge (issue1802)
1122 p1, p2 = parents
1120 p1, p2 = parents
1123 pa = p1.ancestor(p2)
1121 pa = p1.ancestor(p2)
1124 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1122 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1125
1123
1126 def func(f):
1124 def func(f):
1127 f = copiesget(f, f) # may be wrong for merges with copies
1125 f = copiesget(f, f) # may be wrong for merges with copies
1128 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1126 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1129 if fl1 == fl2:
1127 if fl1 == fl2:
1130 return fl1
1128 return fl1
1131 if fl1 == fla:
1129 if fl1 == fla:
1132 return fl2
1130 return fl2
1133 if fl2 == fla:
1131 if fl2 == fla:
1134 return fl1
1132 return fl1
1135 return '' # punt for conflicts
1133 return '' # punt for conflicts
1136
1134
1137 return func
1135 return func
1138
1136
1139 @propertycache
1137 @propertycache
1140 def _flagfunc(self):
1138 def _flagfunc(self):
1141 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1139 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1142
1140
1143 @propertycache
1141 @propertycache
1144 def _status(self):
1142 def _status(self):
1145 return self._repo.status()
1143 return self._repo.status()
1146
1144
1147 @propertycache
1145 @propertycache
1148 def _user(self):
1146 def _user(self):
1149 return self._repo.ui.username()
1147 return self._repo.ui.username()
1150
1148
1151 @propertycache
1149 @propertycache
1152 def _date(self):
1150 def _date(self):
1153 ui = self._repo.ui
1151 ui = self._repo.ui
1154 date = ui.configdate('devel', 'default-date')
1152 date = ui.configdate('devel', 'default-date')
1155 if date is None:
1153 if date is None:
1156 date = dateutil.makedate()
1154 date = dateutil.makedate()
1157 return date
1155 return date
1158
1156
1159 def subrev(self, subpath):
1157 def subrev(self, subpath):
1160 return None
1158 return None
1161
1159
1162 def manifestnode(self):
1160 def manifestnode(self):
1163 return None
1161 return None
1164 def user(self):
1162 def user(self):
1165 return self._user or self._repo.ui.username()
1163 return self._user or self._repo.ui.username()
1166 def date(self):
1164 def date(self):
1167 return self._date
1165 return self._date
1168 def description(self):
1166 def description(self):
1169 return self._text
1167 return self._text
1170 def files(self):
1168 def files(self):
1171 return sorted(self._status.modified + self._status.added +
1169 return sorted(self._status.modified + self._status.added +
1172 self._status.removed)
1170 self._status.removed)
1173
1171
1174 def modified(self):
1172 def modified(self):
1175 return self._status.modified
1173 return self._status.modified
1176 def added(self):
1174 def added(self):
1177 return self._status.added
1175 return self._status.added
1178 def removed(self):
1176 def removed(self):
1179 return self._status.removed
1177 return self._status.removed
1180 def deleted(self):
1178 def deleted(self):
1181 return self._status.deleted
1179 return self._status.deleted
1182 def branch(self):
1180 def branch(self):
1183 return encoding.tolocal(self._extra['branch'])
1181 return encoding.tolocal(self._extra['branch'])
1184 def closesbranch(self):
1182 def closesbranch(self):
1185 return 'close' in self._extra
1183 return 'close' in self._extra
1186 def extra(self):
1184 def extra(self):
1187 return self._extra
1185 return self._extra
1188
1186
1189 def isinmemory(self):
1187 def isinmemory(self):
1190 return False
1188 return False
1191
1189
1192 def tags(self):
1190 def tags(self):
1193 return []
1191 return []
1194
1192
1195 def bookmarks(self):
1193 def bookmarks(self):
1196 b = []
1194 b = []
1197 for p in self.parents():
1195 for p in self.parents():
1198 b.extend(p.bookmarks())
1196 b.extend(p.bookmarks())
1199 return b
1197 return b
1200
1198
1201 def phase(self):
1199 def phase(self):
1202 phase = phases.draft # default phase to draft
1200 phase = phases.draft # default phase to draft
1203 for p in self.parents():
1201 for p in self.parents():
1204 phase = max(phase, p.phase())
1202 phase = max(phase, p.phase())
1205 return phase
1203 return phase
1206
1204
1207 def hidden(self):
1205 def hidden(self):
1208 return False
1206 return False
1209
1207
1210 def children(self):
1208 def children(self):
1211 return []
1209 return []
1212
1210
1213 def flags(self, path):
1211 def flags(self, path):
1214 if r'_manifest' in self.__dict__:
1212 if r'_manifest' in self.__dict__:
1215 try:
1213 try:
1216 return self._manifest.flags(path)
1214 return self._manifest.flags(path)
1217 except KeyError:
1215 except KeyError:
1218 return ''
1216 return ''
1219
1217
1220 try:
1218 try:
1221 return self._flagfunc(path)
1219 return self._flagfunc(path)
1222 except OSError:
1220 except OSError:
1223 return ''
1221 return ''
1224
1222
1225 def ancestor(self, c2):
1223 def ancestor(self, c2):
1226 """return the "best" ancestor context of self and c2"""
1224 """return the "best" ancestor context of self and c2"""
1227 return self._parents[0].ancestor(c2) # punt on two parents for now
1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1228
1226
1229 def walk(self, match):
1227 def walk(self, match):
1230 '''Generates matching file names.'''
1228 '''Generates matching file names.'''
1231 return sorted(self._repo.dirstate.walk(match,
1229 return sorted(self._repo.dirstate.walk(match,
1232 subrepos=sorted(self.substate),
1230 subrepos=sorted(self.substate),
1233 unknown=True, ignored=False))
1231 unknown=True, ignored=False))
1234
1232
1235 def matches(self, match):
1233 def matches(self, match):
1236 ds = self._repo.dirstate
1234 ds = self._repo.dirstate
1237 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1235 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1238
1236
1239 def ancestors(self):
1237 def ancestors(self):
1240 for p in self._parents:
1238 for p in self._parents:
1241 yield p
1239 yield p
1242 for a in self._repo.changelog.ancestors(
1240 for a in self._repo.changelog.ancestors(
1243 [p.rev() for p in self._parents]):
1241 [p.rev() for p in self._parents]):
1244 yield changectx(self._repo, a)
1242 yield changectx(self._repo, a)
1245
1243
1246 def markcommitted(self, node):
1244 def markcommitted(self, node):
1247 """Perform post-commit cleanup necessary after committing this ctx
1245 """Perform post-commit cleanup necessary after committing this ctx
1248
1246
1249 Specifically, this updates backing stores this working context
1247 Specifically, this updates backing stores this working context
1250 wraps to reflect the fact that the changes reflected by this
1248 wraps to reflect the fact that the changes reflected by this
1251 workingctx have been committed. For example, it marks
1249 workingctx have been committed. For example, it marks
1252 modified and added files as normal in the dirstate.
1250 modified and added files as normal in the dirstate.
1253
1251
1254 """
1252 """
1255
1253
1256 with self._repo.dirstate.parentchange():
1254 with self._repo.dirstate.parentchange():
1257 for f in self.modified() + self.added():
1255 for f in self.modified() + self.added():
1258 self._repo.dirstate.normal(f)
1256 self._repo.dirstate.normal(f)
1259 for f in self.removed():
1257 for f in self.removed():
1260 self._repo.dirstate.drop(f)
1258 self._repo.dirstate.drop(f)
1261 self._repo.dirstate.setparents(node)
1259 self._repo.dirstate.setparents(node)
1262
1260
1263 # write changes out explicitly, because nesting wlock at
1261 # write changes out explicitly, because nesting wlock at
1264 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1262 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1265 # from immediately doing so for subsequent changing files
1263 # from immediately doing so for subsequent changing files
1266 self._repo.dirstate.write(self._repo.currenttransaction())
1264 self._repo.dirstate.write(self._repo.currenttransaction())
1267
1265
1268 def dirty(self, missing=False, merge=True, branch=True):
1266 def dirty(self, missing=False, merge=True, branch=True):
1269 return False
1267 return False
1270
1268
1271 class workingctx(committablectx):
1269 class workingctx(committablectx):
1272 """A workingctx object makes access to data related to
1270 """A workingctx object makes access to data related to
1273 the current working directory convenient.
1271 the current working directory convenient.
1274 date - any valid date string or (unixtime, offset), or None.
1272 date - any valid date string or (unixtime, offset), or None.
1275 user - username string, or None.
1273 user - username string, or None.
1276 extra - a dictionary of extra values, or None.
1274 extra - a dictionary of extra values, or None.
1277 changes - a list of file lists as returned by localrepo.status()
1275 changes - a list of file lists as returned by localrepo.status()
1278 or None to use the repository status.
1276 or None to use the repository status.
1279 """
1277 """
1280 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1281 changes=None):
1279 changes=None):
1282 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1280 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1283
1281
1284 def __iter__(self):
1282 def __iter__(self):
1285 d = self._repo.dirstate
1283 d = self._repo.dirstate
1286 for f in d:
1284 for f in d:
1287 if d[f] != 'r':
1285 if d[f] != 'r':
1288 yield f
1286 yield f
1289
1287
1290 def __contains__(self, key):
1288 def __contains__(self, key):
1291 return self._repo.dirstate[key] not in "?r"
1289 return self._repo.dirstate[key] not in "?r"
1292
1290
1293 def hex(self):
1291 def hex(self):
1294 return hex(wdirid)
1292 return hex(wdirid)
1295
1293
1296 @propertycache
1294 @propertycache
1297 def _parents(self):
1295 def _parents(self):
1298 p = self._repo.dirstate.parents()
1296 p = self._repo.dirstate.parents()
1299 if p[1] == nullid:
1297 if p[1] == nullid:
1300 p = p[:-1]
1298 p = p[:-1]
1301 return [changectx(self._repo, x) for x in p]
1299 return [changectx(self._repo, x) for x in p]
1302
1300
1303 def _fileinfo(self, path):
1301 def _fileinfo(self, path):
1304 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1302 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1305 self._manifest
1303 self._manifest
1306 return super(workingctx, self)._fileinfo(path)
1304 return super(workingctx, self)._fileinfo(path)
1307
1305
1308 def filectx(self, path, filelog=None):
1306 def filectx(self, path, filelog=None):
1309 """get a file context from the working directory"""
1307 """get a file context from the working directory"""
1310 return workingfilectx(self._repo, path, workingctx=self,
1308 return workingfilectx(self._repo, path, workingctx=self,
1311 filelog=filelog)
1309 filelog=filelog)
1312
1310
1313 def dirty(self, missing=False, merge=True, branch=True):
1311 def dirty(self, missing=False, merge=True, branch=True):
1314 "check whether a working directory is modified"
1312 "check whether a working directory is modified"
1315 # check subrepos first
1313 # check subrepos first
1316 for s in sorted(self.substate):
1314 for s in sorted(self.substate):
1317 if self.sub(s).dirty(missing=missing):
1315 if self.sub(s).dirty(missing=missing):
1318 return True
1316 return True
1319 # check current working dir
1317 # check current working dir
1320 return ((merge and self.p2()) or
1318 return ((merge and self.p2()) or
1321 (branch and self.branch() != self.p1().branch()) or
1319 (branch and self.branch() != self.p1().branch()) or
1322 self.modified() or self.added() or self.removed() or
1320 self.modified() or self.added() or self.removed() or
1323 (missing and self.deleted()))
1321 (missing and self.deleted()))
1324
1322
1325 def add(self, list, prefix=""):
1323 def add(self, list, prefix=""):
1326 with self._repo.wlock():
1324 with self._repo.wlock():
1327 ui, ds = self._repo.ui, self._repo.dirstate
1325 ui, ds = self._repo.ui, self._repo.dirstate
1328 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1326 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1329 rejected = []
1327 rejected = []
1330 lstat = self._repo.wvfs.lstat
1328 lstat = self._repo.wvfs.lstat
1331 for f in list:
1329 for f in list:
1332 # ds.pathto() returns an absolute file when this is invoked from
1330 # ds.pathto() returns an absolute file when this is invoked from
1333 # the keyword extension. That gets flagged as non-portable on
1331 # the keyword extension. That gets flagged as non-portable on
1334 # Windows, since it contains the drive letter and colon.
1332 # Windows, since it contains the drive letter and colon.
1335 scmutil.checkportable(ui, os.path.join(prefix, f))
1333 scmutil.checkportable(ui, os.path.join(prefix, f))
1336 try:
1334 try:
1337 st = lstat(f)
1335 st = lstat(f)
1338 except OSError:
1336 except OSError:
1339 ui.warn(_("%s does not exist!\n") % uipath(f))
1337 ui.warn(_("%s does not exist!\n") % uipath(f))
1340 rejected.append(f)
1338 rejected.append(f)
1341 continue
1339 continue
1342 if st.st_size > 10000000:
1340 if st.st_size > 10000000:
1343 ui.warn(_("%s: up to %d MB of RAM may be required "
1341 ui.warn(_("%s: up to %d MB of RAM may be required "
1344 "to manage this file\n"
1342 "to manage this file\n"
1345 "(use 'hg revert %s' to cancel the "
1343 "(use 'hg revert %s' to cancel the "
1346 "pending addition)\n")
1344 "pending addition)\n")
1347 % (f, 3 * st.st_size // 1000000, uipath(f)))
1345 % (f, 3 * st.st_size // 1000000, uipath(f)))
1348 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1346 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1349 ui.warn(_("%s not added: only files and symlinks "
1347 ui.warn(_("%s not added: only files and symlinks "
1350 "supported currently\n") % uipath(f))
1348 "supported currently\n") % uipath(f))
1351 rejected.append(f)
1349 rejected.append(f)
1352 elif ds[f] in 'amn':
1350 elif ds[f] in 'amn':
1353 ui.warn(_("%s already tracked!\n") % uipath(f))
1351 ui.warn(_("%s already tracked!\n") % uipath(f))
1354 elif ds[f] == 'r':
1352 elif ds[f] == 'r':
1355 ds.normallookup(f)
1353 ds.normallookup(f)
1356 else:
1354 else:
1357 ds.add(f)
1355 ds.add(f)
1358 return rejected
1356 return rejected
1359
1357
1360 def forget(self, files, prefix=""):
1358 def forget(self, files, prefix=""):
1361 with self._repo.wlock():
1359 with self._repo.wlock():
1362 ds = self._repo.dirstate
1360 ds = self._repo.dirstate
1363 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1361 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1364 rejected = []
1362 rejected = []
1365 for f in files:
1363 for f in files:
1366 if f not in self._repo.dirstate:
1364 if f not in self._repo.dirstate:
1367 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1365 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1368 rejected.append(f)
1366 rejected.append(f)
1369 elif self._repo.dirstate[f] != 'a':
1367 elif self._repo.dirstate[f] != 'a':
1370 self._repo.dirstate.remove(f)
1368 self._repo.dirstate.remove(f)
1371 else:
1369 else:
1372 self._repo.dirstate.drop(f)
1370 self._repo.dirstate.drop(f)
1373 return rejected
1371 return rejected
1374
1372
1375 def undelete(self, list):
1373 def undelete(self, list):
1376 pctxs = self.parents()
1374 pctxs = self.parents()
1377 with self._repo.wlock():
1375 with self._repo.wlock():
1378 ds = self._repo.dirstate
1376 ds = self._repo.dirstate
1379 for f in list:
1377 for f in list:
1380 if self._repo.dirstate[f] != 'r':
1378 if self._repo.dirstate[f] != 'r':
1381 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1379 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1382 else:
1380 else:
1383 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1384 t = fctx.data()
1382 t = fctx.data()
1385 self._repo.wwrite(f, t, fctx.flags())
1383 self._repo.wwrite(f, t, fctx.flags())
1386 self._repo.dirstate.normal(f)
1384 self._repo.dirstate.normal(f)
1387
1385
1388 def copy(self, source, dest):
1386 def copy(self, source, dest):
1389 try:
1387 try:
1390 st = self._repo.wvfs.lstat(dest)
1388 st = self._repo.wvfs.lstat(dest)
1391 except OSError as err:
1389 except OSError as err:
1392 if err.errno != errno.ENOENT:
1390 if err.errno != errno.ENOENT:
1393 raise
1391 raise
1394 self._repo.ui.warn(_("%s does not exist!\n")
1392 self._repo.ui.warn(_("%s does not exist!\n")
1395 % self._repo.dirstate.pathto(dest))
1393 % self._repo.dirstate.pathto(dest))
1396 return
1394 return
1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1399 "symbolic link\n")
1397 "symbolic link\n")
1400 % self._repo.dirstate.pathto(dest))
1398 % self._repo.dirstate.pathto(dest))
1401 else:
1399 else:
1402 with self._repo.wlock():
1400 with self._repo.wlock():
1403 if self._repo.dirstate[dest] in '?':
1401 if self._repo.dirstate[dest] in '?':
1404 self._repo.dirstate.add(dest)
1402 self._repo.dirstate.add(dest)
1405 elif self._repo.dirstate[dest] in 'r':
1403 elif self._repo.dirstate[dest] in 'r':
1406 self._repo.dirstate.normallookup(dest)
1404 self._repo.dirstate.normallookup(dest)
1407 self._repo.dirstate.copy(source, dest)
1405 self._repo.dirstate.copy(source, dest)
1408
1406
1409 def match(self, pats=None, include=None, exclude=None, default='glob',
1407 def match(self, pats=None, include=None, exclude=None, default='glob',
1410 listsubrepos=False, badfn=None):
1408 listsubrepos=False, badfn=None):
1411 r = self._repo
1409 r = self._repo
1412
1410
1413 # Only a case insensitive filesystem needs magic to translate user input
1411 # Only a case insensitive filesystem needs magic to translate user input
1414 # to actual case in the filesystem.
1412 # to actual case in the filesystem.
1415 icasefs = not util.fscasesensitive(r.root)
1413 icasefs = not util.fscasesensitive(r.root)
1416 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1414 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1417 default, auditor=r.auditor, ctx=self,
1415 default, auditor=r.auditor, ctx=self,
1418 listsubrepos=listsubrepos, badfn=badfn,
1416 listsubrepos=listsubrepos, badfn=badfn,
1419 icasefs=icasefs)
1417 icasefs=icasefs)
1420
1418
1421 def _filtersuspectsymlink(self, files):
1419 def _filtersuspectsymlink(self, files):
1422 if not files or self._repo.dirstate._checklink:
1420 if not files or self._repo.dirstate._checklink:
1423 return files
1421 return files
1424
1422
1425 # Symlink placeholders may get non-symlink-like contents
1423 # Symlink placeholders may get non-symlink-like contents
1426 # via user error or dereferencing by NFS or Samba servers,
1424 # via user error or dereferencing by NFS or Samba servers,
1427 # so we filter out any placeholders that don't look like a
1425 # so we filter out any placeholders that don't look like a
1428 # symlink
1426 # symlink
1429 sane = []
1427 sane = []
1430 for f in files:
1428 for f in files:
1431 if self.flags(f) == 'l':
1429 if self.flags(f) == 'l':
1432 d = self[f].data()
1430 d = self[f].data()
1433 if (d == '' or len(d) >= 1024 or '\n' in d
1431 if (d == '' or len(d) >= 1024 or '\n' in d
1434 or stringutil.binary(d)):
1432 or stringutil.binary(d)):
1435 self._repo.ui.debug('ignoring suspect symlink placeholder'
1433 self._repo.ui.debug('ignoring suspect symlink placeholder'
1436 ' "%s"\n' % f)
1434 ' "%s"\n' % f)
1437 continue
1435 continue
1438 sane.append(f)
1436 sane.append(f)
1439 return sane
1437 return sane
1440
1438
1441 def _checklookup(self, files):
1439 def _checklookup(self, files):
1442 # check for any possibly clean files
1440 # check for any possibly clean files
1443 if not files:
1441 if not files:
1444 return [], [], []
1442 return [], [], []
1445
1443
1446 modified = []
1444 modified = []
1447 deleted = []
1445 deleted = []
1448 fixup = []
1446 fixup = []
1449 pctx = self._parents[0]
1447 pctx = self._parents[0]
1450 # do a full compare of any files that might have changed
1448 # do a full compare of any files that might have changed
1451 for f in sorted(files):
1449 for f in sorted(files):
1452 try:
1450 try:
1453 # This will return True for a file that got replaced by a
1451 # This will return True for a file that got replaced by a
1454 # directory in the interim, but fixing that is pretty hard.
1452 # directory in the interim, but fixing that is pretty hard.
1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1453 if (f not in pctx or self.flags(f) != pctx.flags(f)
1456 or pctx[f].cmp(self[f])):
1454 or pctx[f].cmp(self[f])):
1457 modified.append(f)
1455 modified.append(f)
1458 else:
1456 else:
1459 fixup.append(f)
1457 fixup.append(f)
1460 except (IOError, OSError):
1458 except (IOError, OSError):
1461 # A file become inaccessible in between? Mark it as deleted,
1459 # A file become inaccessible in between? Mark it as deleted,
1462 # matching dirstate behavior (issue5584).
1460 # matching dirstate behavior (issue5584).
1463 # The dirstate has more complex behavior around whether a
1461 # The dirstate has more complex behavior around whether a
1464 # missing file matches a directory, etc, but we don't need to
1462 # missing file matches a directory, etc, but we don't need to
1465 # bother with that: if f has made it to this point, we're sure
1463 # bother with that: if f has made it to this point, we're sure
1466 # it's in the dirstate.
1464 # it's in the dirstate.
1467 deleted.append(f)
1465 deleted.append(f)
1468
1466
1469 return modified, deleted, fixup
1467 return modified, deleted, fixup
1470
1468
1471 def _poststatusfixup(self, status, fixup):
1469 def _poststatusfixup(self, status, fixup):
1472 """update dirstate for files that are actually clean"""
1470 """update dirstate for files that are actually clean"""
1473 poststatus = self._repo.postdsstatus()
1471 poststatus = self._repo.postdsstatus()
1474 if fixup or poststatus:
1472 if fixup or poststatus:
1475 try:
1473 try:
1476 oldid = self._repo.dirstate.identity()
1474 oldid = self._repo.dirstate.identity()
1477
1475
1478 # updating the dirstate is optional
1476 # updating the dirstate is optional
1479 # so we don't wait on the lock
1477 # so we don't wait on the lock
1480 # wlock can invalidate the dirstate, so cache normal _after_
1478 # wlock can invalidate the dirstate, so cache normal _after_
1481 # taking the lock
1479 # taking the lock
1482 with self._repo.wlock(False):
1480 with self._repo.wlock(False):
1483 if self._repo.dirstate.identity() == oldid:
1481 if self._repo.dirstate.identity() == oldid:
1484 if fixup:
1482 if fixup:
1485 normal = self._repo.dirstate.normal
1483 normal = self._repo.dirstate.normal
1486 for f in fixup:
1484 for f in fixup:
1487 normal(f)
1485 normal(f)
1488 # write changes out explicitly, because nesting
1486 # write changes out explicitly, because nesting
1489 # wlock at runtime may prevent 'wlock.release()'
1487 # wlock at runtime may prevent 'wlock.release()'
1490 # after this block from doing so for subsequent
1488 # after this block from doing so for subsequent
1491 # changing files
1489 # changing files
1492 tr = self._repo.currenttransaction()
1490 tr = self._repo.currenttransaction()
1493 self._repo.dirstate.write(tr)
1491 self._repo.dirstate.write(tr)
1494
1492
1495 if poststatus:
1493 if poststatus:
1496 for ps in poststatus:
1494 for ps in poststatus:
1497 ps(self, status)
1495 ps(self, status)
1498 else:
1496 else:
1499 # in this case, writing changes out breaks
1497 # in this case, writing changes out breaks
1500 # consistency, because .hg/dirstate was
1498 # consistency, because .hg/dirstate was
1501 # already changed simultaneously after last
1499 # already changed simultaneously after last
1502 # caching (see also issue5584 for detail)
1500 # caching (see also issue5584 for detail)
1503 self._repo.ui.debug('skip updating dirstate: '
1501 self._repo.ui.debug('skip updating dirstate: '
1504 'identity mismatch\n')
1502 'identity mismatch\n')
1505 except error.LockError:
1503 except error.LockError:
1506 pass
1504 pass
1507 finally:
1505 finally:
1508 # Even if the wlock couldn't be grabbed, clear out the list.
1506 # Even if the wlock couldn't be grabbed, clear out the list.
1509 self._repo.clearpostdsstatus()
1507 self._repo.clearpostdsstatus()
1510
1508
1511 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1509 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1512 '''Gets the status from the dirstate -- internal use only.'''
1510 '''Gets the status from the dirstate -- internal use only.'''
1513 subrepos = []
1511 subrepos = []
1514 if '.hgsub' in self:
1512 if '.hgsub' in self:
1515 subrepos = sorted(self.substate)
1513 subrepos = sorted(self.substate)
1516 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1514 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1517 clean=clean, unknown=unknown)
1515 clean=clean, unknown=unknown)
1518
1516
1519 # check for any possibly clean files
1517 # check for any possibly clean files
1520 fixup = []
1518 fixup = []
1521 if cmp:
1519 if cmp:
1522 modified2, deleted2, fixup = self._checklookup(cmp)
1520 modified2, deleted2, fixup = self._checklookup(cmp)
1523 s.modified.extend(modified2)
1521 s.modified.extend(modified2)
1524 s.deleted.extend(deleted2)
1522 s.deleted.extend(deleted2)
1525
1523
1526 if fixup and clean:
1524 if fixup and clean:
1527 s.clean.extend(fixup)
1525 s.clean.extend(fixup)
1528
1526
1529 self._poststatusfixup(s, fixup)
1527 self._poststatusfixup(s, fixup)
1530
1528
1531 if match.always():
1529 if match.always():
1532 # cache for performance
1530 # cache for performance
1533 if s.unknown or s.ignored or s.clean:
1531 if s.unknown or s.ignored or s.clean:
1534 # "_status" is cached with list*=False in the normal route
1532 # "_status" is cached with list*=False in the normal route
1535 self._status = scmutil.status(s.modified, s.added, s.removed,
1533 self._status = scmutil.status(s.modified, s.added, s.removed,
1536 s.deleted, [], [], [])
1534 s.deleted, [], [], [])
1537 else:
1535 else:
1538 self._status = s
1536 self._status = s
1539
1537
1540 return s
1538 return s
1541
1539
1542 @propertycache
1540 @propertycache
1543 def _manifest(self):
1541 def _manifest(self):
1544 """generate a manifest corresponding to the values in self._status
1542 """generate a manifest corresponding to the values in self._status
1545
1543
1546 This reuse the file nodeid from parent, but we use special node
1544 This reuse the file nodeid from parent, but we use special node
1547 identifiers for added and modified files. This is used by manifests
1545 identifiers for added and modified files. This is used by manifests
1548 merge to see that files are different and by update logic to avoid
1546 merge to see that files are different and by update logic to avoid
1549 deleting newly added files.
1547 deleting newly added files.
1550 """
1548 """
1551 return self._buildstatusmanifest(self._status)
1549 return self._buildstatusmanifest(self._status)
1552
1550
1553 def _buildstatusmanifest(self, status):
1551 def _buildstatusmanifest(self, status):
1554 """Builds a manifest that includes the given status results."""
1552 """Builds a manifest that includes the given status results."""
1555 parents = self.parents()
1553 parents = self.parents()
1556
1554
1557 man = parents[0].manifest().copy()
1555 man = parents[0].manifest().copy()
1558
1556
1559 ff = self._flagfunc
1557 ff = self._flagfunc
1560 for i, l in ((addednodeid, status.added),
1558 for i, l in ((addednodeid, status.added),
1561 (modifiednodeid, status.modified)):
1559 (modifiednodeid, status.modified)):
1562 for f in l:
1560 for f in l:
1563 man[f] = i
1561 man[f] = i
1564 try:
1562 try:
1565 man.setflag(f, ff(f))
1563 man.setflag(f, ff(f))
1566 except OSError:
1564 except OSError:
1567 pass
1565 pass
1568
1566
1569 for f in status.deleted + status.removed:
1567 for f in status.deleted + status.removed:
1570 if f in man:
1568 if f in man:
1571 del man[f]
1569 del man[f]
1572
1570
1573 return man
1571 return man
1574
1572
1575 def _buildstatus(self, other, s, match, listignored, listclean,
1573 def _buildstatus(self, other, s, match, listignored, listclean,
1576 listunknown):
1574 listunknown):
1577 """build a status with respect to another context
1575 """build a status with respect to another context
1578
1576
1579 This includes logic for maintaining the fast path of status when
1577 This includes logic for maintaining the fast path of status when
1580 comparing the working directory against its parent, which is to skip
1578 comparing the working directory against its parent, which is to skip
1581 building a new manifest if self (working directory) is not comparing
1579 building a new manifest if self (working directory) is not comparing
1582 against its parent (repo['.']).
1580 against its parent (repo['.']).
1583 """
1581 """
1584 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1585 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1586 # might have accidentally ended up with the entire contents of the file
1584 # might have accidentally ended up with the entire contents of the file
1587 # they are supposed to be linking to.
1585 # they are supposed to be linking to.
1588 s.modified[:] = self._filtersuspectsymlink(s.modified)
1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1589 if other != self._repo['.']:
1587 if other != self._repo['.']:
1590 s = super(workingctx, self)._buildstatus(other, s, match,
1588 s = super(workingctx, self)._buildstatus(other, s, match,
1591 listignored, listclean,
1589 listignored, listclean,
1592 listunknown)
1590 listunknown)
1593 return s
1591 return s
1594
1592
1595 def _matchstatus(self, other, match):
1593 def _matchstatus(self, other, match):
1596 """override the match method with a filter for directory patterns
1594 """override the match method with a filter for directory patterns
1597
1595
1598 We use inheritance to customize the match.bad method only in cases of
1596 We use inheritance to customize the match.bad method only in cases of
1599 workingctx since it belongs only to the working directory when
1597 workingctx since it belongs only to the working directory when
1600 comparing against the parent changeset.
1598 comparing against the parent changeset.
1601
1599
1602 If we aren't comparing against the working directory's parent, then we
1600 If we aren't comparing against the working directory's parent, then we
1603 just use the default match object sent to us.
1601 just use the default match object sent to us.
1604 """
1602 """
1605 if other != self._repo['.']:
1603 if other != self._repo['.']:
1606 def bad(f, msg):
1604 def bad(f, msg):
1607 # 'f' may be a directory pattern from 'match.files()',
1605 # 'f' may be a directory pattern from 'match.files()',
1608 # so 'f not in ctx1' is not enough
1606 # so 'f not in ctx1' is not enough
1609 if f not in other and not other.hasdir(f):
1607 if f not in other and not other.hasdir(f):
1610 self._repo.ui.warn('%s: %s\n' %
1608 self._repo.ui.warn('%s: %s\n' %
1611 (self._repo.dirstate.pathto(f), msg))
1609 (self._repo.dirstate.pathto(f), msg))
1612 match.bad = bad
1610 match.bad = bad
1613 return match
1611 return match
1614
1612
1615 def markcommitted(self, node):
1613 def markcommitted(self, node):
1616 super(workingctx, self).markcommitted(node)
1614 super(workingctx, self).markcommitted(node)
1617
1615
1618 sparse.aftercommit(self._repo, node)
1616 sparse.aftercommit(self._repo, node)
1619
1617
1620 class committablefilectx(basefilectx):
1618 class committablefilectx(basefilectx):
1621 """A committablefilectx provides common functionality for a file context
1619 """A committablefilectx provides common functionality for a file context
1622 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1620 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1623 def __init__(self, repo, path, filelog=None, ctx=None):
1621 def __init__(self, repo, path, filelog=None, ctx=None):
1624 self._repo = repo
1622 self._repo = repo
1625 self._path = path
1623 self._path = path
1626 self._changeid = None
1624 self._changeid = None
1627 self._filerev = self._filenode = None
1625 self._filerev = self._filenode = None
1628
1626
1629 if filelog is not None:
1627 if filelog is not None:
1630 self._filelog = filelog
1628 self._filelog = filelog
1631 if ctx:
1629 if ctx:
1632 self._changectx = ctx
1630 self._changectx = ctx
1633
1631
1634 def __nonzero__(self):
1632 def __nonzero__(self):
1635 return True
1633 return True
1636
1634
1637 __bool__ = __nonzero__
1635 __bool__ = __nonzero__
1638
1636
1639 def linkrev(self):
1637 def linkrev(self):
1640 # linked to self._changectx no matter if file is modified or not
1638 # linked to self._changectx no matter if file is modified or not
1641 return self.rev()
1639 return self.rev()
1642
1640
1643 def parents(self):
1641 def parents(self):
1644 '''return parent filectxs, following copies if necessary'''
1642 '''return parent filectxs, following copies if necessary'''
1645 def filenode(ctx, path):
1643 def filenode(ctx, path):
1646 return ctx._manifest.get(path, nullid)
1644 return ctx._manifest.get(path, nullid)
1647
1645
1648 path = self._path
1646 path = self._path
1649 fl = self._filelog
1647 fl = self._filelog
1650 pcl = self._changectx._parents
1648 pcl = self._changectx._parents
1651 renamed = self.renamed()
1649 renamed = self.renamed()
1652
1650
1653 if renamed:
1651 if renamed:
1654 pl = [renamed + (None,)]
1652 pl = [renamed + (None,)]
1655 else:
1653 else:
1656 pl = [(path, filenode(pcl[0], path), fl)]
1654 pl = [(path, filenode(pcl[0], path), fl)]
1657
1655
1658 for pc in pcl[1:]:
1656 for pc in pcl[1:]:
1659 pl.append((path, filenode(pc, path), fl))
1657 pl.append((path, filenode(pc, path), fl))
1660
1658
1661 return [self._parentfilectx(p, fileid=n, filelog=l)
1659 return [self._parentfilectx(p, fileid=n, filelog=l)
1662 for p, n, l in pl if n != nullid]
1660 for p, n, l in pl if n != nullid]
1663
1661
1664 def children(self):
1662 def children(self):
1665 return []
1663 return []
1666
1664
1667 class workingfilectx(committablefilectx):
1665 class workingfilectx(committablefilectx):
1668 """A workingfilectx object makes access to data related to a particular
1666 """A workingfilectx object makes access to data related to a particular
1669 file in the working directory convenient."""
1667 file in the working directory convenient."""
1670 def __init__(self, repo, path, filelog=None, workingctx=None):
1668 def __init__(self, repo, path, filelog=None, workingctx=None):
1671 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1669 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1672
1670
1673 @propertycache
1671 @propertycache
1674 def _changectx(self):
1672 def _changectx(self):
1675 return workingctx(self._repo)
1673 return workingctx(self._repo)
1676
1674
1677 def data(self):
1675 def data(self):
1678 return self._repo.wread(self._path)
1676 return self._repo.wread(self._path)
1679 def renamed(self):
1677 def renamed(self):
1680 rp = self._repo.dirstate.copied(self._path)
1678 rp = self._repo.dirstate.copied(self._path)
1681 if not rp:
1679 if not rp:
1682 return None
1680 return None
1683 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1681 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1684
1682
1685 def size(self):
1683 def size(self):
1686 return self._repo.wvfs.lstat(self._path).st_size
1684 return self._repo.wvfs.lstat(self._path).st_size
1687 def date(self):
1685 def date(self):
1688 t, tz = self._changectx.date()
1686 t, tz = self._changectx.date()
1689 try:
1687 try:
1690 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1688 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1691 except OSError as err:
1689 except OSError as err:
1692 if err.errno != errno.ENOENT:
1690 if err.errno != errno.ENOENT:
1693 raise
1691 raise
1694 return (t, tz)
1692 return (t, tz)
1695
1693
1696 def exists(self):
1694 def exists(self):
1697 return self._repo.wvfs.exists(self._path)
1695 return self._repo.wvfs.exists(self._path)
1698
1696
1699 def lexists(self):
1697 def lexists(self):
1700 return self._repo.wvfs.lexists(self._path)
1698 return self._repo.wvfs.lexists(self._path)
1701
1699
1702 def audit(self):
1700 def audit(self):
1703 return self._repo.wvfs.audit(self._path)
1701 return self._repo.wvfs.audit(self._path)
1704
1702
1705 def cmp(self, fctx):
1703 def cmp(self, fctx):
1706 """compare with other file context
1704 """compare with other file context
1707
1705
1708 returns True if different than fctx.
1706 returns True if different than fctx.
1709 """
1707 """
1710 # fctx should be a filectx (not a workingfilectx)
1708 # fctx should be a filectx (not a workingfilectx)
1711 # invert comparison to reuse the same code path
1709 # invert comparison to reuse the same code path
1712 return fctx.cmp(self)
1710 return fctx.cmp(self)
1713
1711
1714 def remove(self, ignoremissing=False):
1712 def remove(self, ignoremissing=False):
1715 """wraps unlink for a repo's working directory"""
1713 """wraps unlink for a repo's working directory"""
1716 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1714 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1717 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1715 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1718 rmdir=rmdir)
1716 rmdir=rmdir)
1719
1717
1720 def write(self, data, flags, backgroundclose=False, **kwargs):
1718 def write(self, data, flags, backgroundclose=False, **kwargs):
1721 """wraps repo.wwrite"""
1719 """wraps repo.wwrite"""
1722 self._repo.wwrite(self._path, data, flags,
1720 self._repo.wwrite(self._path, data, flags,
1723 backgroundclose=backgroundclose,
1721 backgroundclose=backgroundclose,
1724 **kwargs)
1722 **kwargs)
1725
1723
1726 def markcopied(self, src):
1724 def markcopied(self, src):
1727 """marks this file a copy of `src`"""
1725 """marks this file a copy of `src`"""
1728 if self._repo.dirstate[self._path] in "nma":
1726 if self._repo.dirstate[self._path] in "nma":
1729 self._repo.dirstate.copy(src, self._path)
1727 self._repo.dirstate.copy(src, self._path)
1730
1728
1731 def clearunknown(self):
1729 def clearunknown(self):
1732 """Removes conflicting items in the working directory so that
1730 """Removes conflicting items in the working directory so that
1733 ``write()`` can be called successfully.
1731 ``write()`` can be called successfully.
1734 """
1732 """
1735 wvfs = self._repo.wvfs
1733 wvfs = self._repo.wvfs
1736 f = self._path
1734 f = self._path
1737 wvfs.audit(f)
1735 wvfs.audit(f)
1738 if wvfs.isdir(f) and not wvfs.islink(f):
1736 if wvfs.isdir(f) and not wvfs.islink(f):
1739 wvfs.rmtree(f, forcibly=True)
1737 wvfs.rmtree(f, forcibly=True)
1740 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1738 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1741 for p in reversed(list(util.finddirs(f))):
1739 for p in reversed(list(util.finddirs(f))):
1742 if wvfs.isfileorlink(p):
1740 if wvfs.isfileorlink(p):
1743 wvfs.unlink(p)
1741 wvfs.unlink(p)
1744 break
1742 break
1745
1743
1746 def setflags(self, l, x):
1744 def setflags(self, l, x):
1747 self._repo.wvfs.setflags(self._path, l, x)
1745 self._repo.wvfs.setflags(self._path, l, x)
1748
1746
1749 class overlayworkingctx(committablectx):
1747 class overlayworkingctx(committablectx):
1750 """Wraps another mutable context with a write-back cache that can be
1748 """Wraps another mutable context with a write-back cache that can be
1751 converted into a commit context.
1749 converted into a commit context.
1752
1750
1753 self._cache[path] maps to a dict with keys: {
1751 self._cache[path] maps to a dict with keys: {
1754 'exists': bool?
1752 'exists': bool?
1755 'date': date?
1753 'date': date?
1756 'data': str?
1754 'data': str?
1757 'flags': str?
1755 'flags': str?
1758 'copied': str? (path or None)
1756 'copied': str? (path or None)
1759 }
1757 }
1760 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1758 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1761 is `False`, the file was deleted.
1759 is `False`, the file was deleted.
1762 """
1760 """
1763
1761
1764 def __init__(self, repo):
1762 def __init__(self, repo):
1765 super(overlayworkingctx, self).__init__(repo)
1763 super(overlayworkingctx, self).__init__(repo)
1766 self.clean()
1764 self.clean()
1767
1765
1768 def setbase(self, wrappedctx):
1766 def setbase(self, wrappedctx):
1769 self._wrappedctx = wrappedctx
1767 self._wrappedctx = wrappedctx
1770 self._parents = [wrappedctx]
1768 self._parents = [wrappedctx]
1771 # Drop old manifest cache as it is now out of date.
1769 # Drop old manifest cache as it is now out of date.
1772 # This is necessary when, e.g., rebasing several nodes with one
1770 # This is necessary when, e.g., rebasing several nodes with one
1773 # ``overlayworkingctx`` (e.g. with --collapse).
1771 # ``overlayworkingctx`` (e.g. with --collapse).
1774 util.clearcachedproperty(self, '_manifest')
1772 util.clearcachedproperty(self, '_manifest')
1775
1773
1776 def data(self, path):
1774 def data(self, path):
1777 if self.isdirty(path):
1775 if self.isdirty(path):
1778 if self._cache[path]['exists']:
1776 if self._cache[path]['exists']:
1779 if self._cache[path]['data']:
1777 if self._cache[path]['data']:
1780 return self._cache[path]['data']
1778 return self._cache[path]['data']
1781 else:
1779 else:
1782 # Must fallback here, too, because we only set flags.
1780 # Must fallback here, too, because we only set flags.
1783 return self._wrappedctx[path].data()
1781 return self._wrappedctx[path].data()
1784 else:
1782 else:
1785 raise error.ProgrammingError("No such file or directory: %s" %
1783 raise error.ProgrammingError("No such file or directory: %s" %
1786 path)
1784 path)
1787 else:
1785 else:
1788 return self._wrappedctx[path].data()
1786 return self._wrappedctx[path].data()
1789
1787
1790 @propertycache
1788 @propertycache
1791 def _manifest(self):
1789 def _manifest(self):
1792 parents = self.parents()
1790 parents = self.parents()
1793 man = parents[0].manifest().copy()
1791 man = parents[0].manifest().copy()
1794
1792
1795 flag = self._flagfunc
1793 flag = self._flagfunc
1796 for path in self.added():
1794 for path in self.added():
1797 man[path] = addednodeid
1795 man[path] = addednodeid
1798 man.setflag(path, flag(path))
1796 man.setflag(path, flag(path))
1799 for path in self.modified():
1797 for path in self.modified():
1800 man[path] = modifiednodeid
1798 man[path] = modifiednodeid
1801 man.setflag(path, flag(path))
1799 man.setflag(path, flag(path))
1802 for path in self.removed():
1800 for path in self.removed():
1803 del man[path]
1801 del man[path]
1804 return man
1802 return man
1805
1803
1806 @propertycache
1804 @propertycache
1807 def _flagfunc(self):
1805 def _flagfunc(self):
1808 def f(path):
1806 def f(path):
1809 return self._cache[path]['flags']
1807 return self._cache[path]['flags']
1810 return f
1808 return f
1811
1809
1812 def files(self):
1810 def files(self):
1813 return sorted(self.added() + self.modified() + self.removed())
1811 return sorted(self.added() + self.modified() + self.removed())
1814
1812
1815 def modified(self):
1813 def modified(self):
1816 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1814 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1817 self._existsinparent(f)]
1815 self._existsinparent(f)]
1818
1816
1819 def added(self):
1817 def added(self):
1820 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1818 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1821 not self._existsinparent(f)]
1819 not self._existsinparent(f)]
1822
1820
1823 def removed(self):
1821 def removed(self):
1824 return [f for f in self._cache.keys() if
1822 return [f for f in self._cache.keys() if
1825 not self._cache[f]['exists'] and self._existsinparent(f)]
1823 not self._cache[f]['exists'] and self._existsinparent(f)]
1826
1824
1827 def isinmemory(self):
1825 def isinmemory(self):
1828 return True
1826 return True
1829
1827
1830 def filedate(self, path):
1828 def filedate(self, path):
1831 if self.isdirty(path):
1829 if self.isdirty(path):
1832 return self._cache[path]['date']
1830 return self._cache[path]['date']
1833 else:
1831 else:
1834 return self._wrappedctx[path].date()
1832 return self._wrappedctx[path].date()
1835
1833
1836 def markcopied(self, path, origin):
1834 def markcopied(self, path, origin):
1837 if self.isdirty(path):
1835 if self.isdirty(path):
1838 self._cache[path]['copied'] = origin
1836 self._cache[path]['copied'] = origin
1839 else:
1837 else:
1840 raise error.ProgrammingError('markcopied() called on clean context')
1838 raise error.ProgrammingError('markcopied() called on clean context')
1841
1839
1842 def copydata(self, path):
1840 def copydata(self, path):
1843 if self.isdirty(path):
1841 if self.isdirty(path):
1844 return self._cache[path]['copied']
1842 return self._cache[path]['copied']
1845 else:
1843 else:
1846 raise error.ProgrammingError('copydata() called on clean context')
1844 raise error.ProgrammingError('copydata() called on clean context')
1847
1845
1848 def flags(self, path):
1846 def flags(self, path):
1849 if self.isdirty(path):
1847 if self.isdirty(path):
1850 if self._cache[path]['exists']:
1848 if self._cache[path]['exists']:
1851 return self._cache[path]['flags']
1849 return self._cache[path]['flags']
1852 else:
1850 else:
1853 raise error.ProgrammingError("No such file or directory: %s" %
1851 raise error.ProgrammingError("No such file or directory: %s" %
1854 self._path)
1852 self._path)
1855 else:
1853 else:
1856 return self._wrappedctx[path].flags()
1854 return self._wrappedctx[path].flags()
1857
1855
1858 def _existsinparent(self, path):
1856 def _existsinparent(self, path):
1859 try:
1857 try:
1860 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1858 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1861 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1859 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1862 # with an ``exists()`` function.
1860 # with an ``exists()`` function.
1863 self._wrappedctx[path]
1861 self._wrappedctx[path]
1864 return True
1862 return True
1865 except error.ManifestLookupError:
1863 except error.ManifestLookupError:
1866 return False
1864 return False
1867
1865
1868 def _auditconflicts(self, path):
1866 def _auditconflicts(self, path):
1869 """Replicates conflict checks done by wvfs.write().
1867 """Replicates conflict checks done by wvfs.write().
1870
1868
1871 Since we never write to the filesystem and never call `applyupdates` in
1869 Since we never write to the filesystem and never call `applyupdates` in
1872 IMM, we'll never check that a path is actually writable -- e.g., because
1870 IMM, we'll never check that a path is actually writable -- e.g., because
1873 it adds `a/foo`, but `a` is actually a file in the other commit.
1871 it adds `a/foo`, but `a` is actually a file in the other commit.
1874 """
1872 """
1875 def fail(path, component):
1873 def fail(path, component):
1876 # p1() is the base and we're receiving "writes" for p2()'s
1874 # p1() is the base and we're receiving "writes" for p2()'s
1877 # files.
1875 # files.
1878 if 'l' in self.p1()[component].flags():
1876 if 'l' in self.p1()[component].flags():
1879 raise error.Abort("error: %s conflicts with symlink %s "
1877 raise error.Abort("error: %s conflicts with symlink %s "
1880 "in %s." % (path, component,
1878 "in %s." % (path, component,
1881 self.p1().rev()))
1879 self.p1().rev()))
1882 else:
1880 else:
1883 raise error.Abort("error: '%s' conflicts with file '%s' in "
1881 raise error.Abort("error: '%s' conflicts with file '%s' in "
1884 "%s." % (path, component,
1882 "%s." % (path, component,
1885 self.p1().rev()))
1883 self.p1().rev()))
1886
1884
1887 # Test that each new directory to be created to write this path from p2
1885 # Test that each new directory to be created to write this path from p2
1888 # is not a file in p1.
1886 # is not a file in p1.
1889 components = path.split('/')
1887 components = path.split('/')
1890 for i in xrange(len(components)):
1888 for i in xrange(len(components)):
1891 component = "/".join(components[0:i])
1889 component = "/".join(components[0:i])
1892 if component in self.p1():
1890 if component in self.p1():
1893 fail(path, component)
1891 fail(path, component)
1894
1892
1895 # Test the other direction -- that this path from p2 isn't a directory
1893 # Test the other direction -- that this path from p2 isn't a directory
1896 # in p1 (test that p1 doesn't any paths matching `path/*`).
1894 # in p1 (test that p1 doesn't any paths matching `path/*`).
1897 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1895 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1898 matches = self.p1().manifest().matches(match)
1896 matches = self.p1().manifest().matches(match)
1899 if len(matches) > 0:
1897 if len(matches) > 0:
1900 if len(matches) == 1 and matches.keys()[0] == path:
1898 if len(matches) == 1 and matches.keys()[0] == path:
1901 return
1899 return
1902 raise error.Abort("error: file '%s' cannot be written because "
1900 raise error.Abort("error: file '%s' cannot be written because "
1903 " '%s/' is a folder in %s (containing %d "
1901 " '%s/' is a folder in %s (containing %d "
1904 "entries: %s)"
1902 "entries: %s)"
1905 % (path, path, self.p1(), len(matches),
1903 % (path, path, self.p1(), len(matches),
1906 ', '.join(matches.keys())))
1904 ', '.join(matches.keys())))
1907
1905
1908 def write(self, path, data, flags='', **kwargs):
1906 def write(self, path, data, flags='', **kwargs):
1909 if data is None:
1907 if data is None:
1910 raise error.ProgrammingError("data must be non-None")
1908 raise error.ProgrammingError("data must be non-None")
1911 self._auditconflicts(path)
1909 self._auditconflicts(path)
1912 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1913 flags=flags)
1911 flags=flags)
1914
1912
1915 def setflags(self, path, l, x):
1913 def setflags(self, path, l, x):
1916 self._markdirty(path, exists=True, date=dateutil.makedate(),
1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1917 flags=(l and 'l' or '') + (x and 'x' or ''))
1915 flags=(l and 'l' or '') + (x and 'x' or ''))
1918
1916
1919 def remove(self, path):
1917 def remove(self, path):
1920 self._markdirty(path, exists=False)
1918 self._markdirty(path, exists=False)
1921
1919
1922 def exists(self, path):
1920 def exists(self, path):
1923 """exists behaves like `lexists`, but needs to follow symlinks and
1921 """exists behaves like `lexists`, but needs to follow symlinks and
1924 return False if they are broken.
1922 return False if they are broken.
1925 """
1923 """
1926 if self.isdirty(path):
1924 if self.isdirty(path):
1927 # If this path exists and is a symlink, "follow" it by calling
1925 # If this path exists and is a symlink, "follow" it by calling
1928 # exists on the destination path.
1926 # exists on the destination path.
1929 if (self._cache[path]['exists'] and
1927 if (self._cache[path]['exists'] and
1930 'l' in self._cache[path]['flags']):
1928 'l' in self._cache[path]['flags']):
1931 return self.exists(self._cache[path]['data'].strip())
1929 return self.exists(self._cache[path]['data'].strip())
1932 else:
1930 else:
1933 return self._cache[path]['exists']
1931 return self._cache[path]['exists']
1934
1932
1935 return self._existsinparent(path)
1933 return self._existsinparent(path)
1936
1934
1937 def lexists(self, path):
1935 def lexists(self, path):
1938 """lexists returns True if the path exists"""
1936 """lexists returns True if the path exists"""
1939 if self.isdirty(path):
1937 if self.isdirty(path):
1940 return self._cache[path]['exists']
1938 return self._cache[path]['exists']
1941
1939
1942 return self._existsinparent(path)
1940 return self._existsinparent(path)
1943
1941
1944 def size(self, path):
1942 def size(self, path):
1945 if self.isdirty(path):
1943 if self.isdirty(path):
1946 if self._cache[path]['exists']:
1944 if self._cache[path]['exists']:
1947 return len(self._cache[path]['data'])
1945 return len(self._cache[path]['data'])
1948 else:
1946 else:
1949 raise error.ProgrammingError("No such file or directory: %s" %
1947 raise error.ProgrammingError("No such file or directory: %s" %
1950 self._path)
1948 self._path)
1951 return self._wrappedctx[path].size()
1949 return self._wrappedctx[path].size()
1952
1950
1953 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1954 user=None, editor=None):
1952 user=None, editor=None):
1955 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1956 committed.
1954 committed.
1957
1955
1958 ``text`` is the commit message.
1956 ``text`` is the commit message.
1959 ``parents`` (optional) are rev numbers.
1957 ``parents`` (optional) are rev numbers.
1960 """
1958 """
1961 # Default parents to the wrapped contexts' if not passed.
1959 # Default parents to the wrapped contexts' if not passed.
1962 if parents is None:
1960 if parents is None:
1963 parents = self._wrappedctx.parents()
1961 parents = self._wrappedctx.parents()
1964 if len(parents) == 1:
1962 if len(parents) == 1:
1965 parents = (parents[0], None)
1963 parents = (parents[0], None)
1966
1964
1967 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1968 if parents[1] is None:
1966 if parents[1] is None:
1969 parents = (self._repo[parents[0]], None)
1967 parents = (self._repo[parents[0]], None)
1970 else:
1968 else:
1971 parents = (self._repo[parents[0]], self._repo[parents[1]])
1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1972
1970
1973 files = self._cache.keys()
1971 files = self._cache.keys()
1974 def getfile(repo, memctx, path):
1972 def getfile(repo, memctx, path):
1975 if self._cache[path]['exists']:
1973 if self._cache[path]['exists']:
1976 return memfilectx(repo, memctx, path,
1974 return memfilectx(repo, memctx, path,
1977 self._cache[path]['data'],
1975 self._cache[path]['data'],
1978 'l' in self._cache[path]['flags'],
1976 'l' in self._cache[path]['flags'],
1979 'x' in self._cache[path]['flags'],
1977 'x' in self._cache[path]['flags'],
1980 self._cache[path]['copied'])
1978 self._cache[path]['copied'])
1981 else:
1979 else:
1982 # Returning None, but including the path in `files`, is
1980 # Returning None, but including the path in `files`, is
1983 # necessary for memctx to register a deletion.
1981 # necessary for memctx to register a deletion.
1984 return None
1982 return None
1985 return memctx(self._repo, parents, text, files, getfile, date=date,
1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1986 extra=extra, user=user, branch=branch, editor=editor)
1984 extra=extra, user=user, branch=branch, editor=editor)
1987
1985
1988 def isdirty(self, path):
1986 def isdirty(self, path):
1989 return path in self._cache
1987 return path in self._cache
1990
1988
1991 def isempty(self):
1989 def isempty(self):
1992 # We need to discard any keys that are actually clean before the empty
1990 # We need to discard any keys that are actually clean before the empty
1993 # commit check.
1991 # commit check.
1994 self._compact()
1992 self._compact()
1995 return len(self._cache) == 0
1993 return len(self._cache) == 0
1996
1994
1997 def clean(self):
1995 def clean(self):
1998 self._cache = {}
1996 self._cache = {}
1999
1997
2000 def _compact(self):
1998 def _compact(self):
2001 """Removes keys from the cache that are actually clean, by comparing
1999 """Removes keys from the cache that are actually clean, by comparing
2002 them with the underlying context.
2000 them with the underlying context.
2003
2001
2004 This can occur during the merge process, e.g. by passing --tool :local
2002 This can occur during the merge process, e.g. by passing --tool :local
2005 to resolve a conflict.
2003 to resolve a conflict.
2006 """
2004 """
2007 keys = []
2005 keys = []
2008 for path in self._cache.keys():
2006 for path in self._cache.keys():
2009 cache = self._cache[path]
2007 cache = self._cache[path]
2010 try:
2008 try:
2011 underlying = self._wrappedctx[path]
2009 underlying = self._wrappedctx[path]
2012 if (underlying.data() == cache['data'] and
2010 if (underlying.data() == cache['data'] and
2013 underlying.flags() == cache['flags']):
2011 underlying.flags() == cache['flags']):
2014 keys.append(path)
2012 keys.append(path)
2015 except error.ManifestLookupError:
2013 except error.ManifestLookupError:
2016 # Path not in the underlying manifest (created).
2014 # Path not in the underlying manifest (created).
2017 continue
2015 continue
2018
2016
2019 for path in keys:
2017 for path in keys:
2020 del self._cache[path]
2018 del self._cache[path]
2021 return keys
2019 return keys
2022
2020
2023 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2024 self._cache[path] = {
2022 self._cache[path] = {
2025 'exists': exists,
2023 'exists': exists,
2026 'data': data,
2024 'data': data,
2027 'date': date,
2025 'date': date,
2028 'flags': flags,
2026 'flags': flags,
2029 'copied': None,
2027 'copied': None,
2030 }
2028 }
2031
2029
2032 def filectx(self, path, filelog=None):
2030 def filectx(self, path, filelog=None):
2033 return overlayworkingfilectx(self._repo, path, parent=self,
2031 return overlayworkingfilectx(self._repo, path, parent=self,
2034 filelog=filelog)
2032 filelog=filelog)
2035
2033
2036 class overlayworkingfilectx(committablefilectx):
2034 class overlayworkingfilectx(committablefilectx):
2037 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2035 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2038 cache, which can be flushed through later by calling ``flush()``."""
2036 cache, which can be flushed through later by calling ``flush()``."""
2039
2037
2040 def __init__(self, repo, path, filelog=None, parent=None):
2038 def __init__(self, repo, path, filelog=None, parent=None):
2041 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2039 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2042 parent)
2040 parent)
2043 self._repo = repo
2041 self._repo = repo
2044 self._parent = parent
2042 self._parent = parent
2045 self._path = path
2043 self._path = path
2046
2044
2047 def cmp(self, fctx):
2045 def cmp(self, fctx):
2048 return self.data() != fctx.data()
2046 return self.data() != fctx.data()
2049
2047
2050 def changectx(self):
2048 def changectx(self):
2051 return self._parent
2049 return self._parent
2052
2050
2053 def data(self):
2051 def data(self):
2054 return self._parent.data(self._path)
2052 return self._parent.data(self._path)
2055
2053
2056 def date(self):
2054 def date(self):
2057 return self._parent.filedate(self._path)
2055 return self._parent.filedate(self._path)
2058
2056
2059 def exists(self):
2057 def exists(self):
2060 return self.lexists()
2058 return self.lexists()
2061
2059
2062 def lexists(self):
2060 def lexists(self):
2063 return self._parent.exists(self._path)
2061 return self._parent.exists(self._path)
2064
2062
2065 def renamed(self):
2063 def renamed(self):
2066 path = self._parent.copydata(self._path)
2064 path = self._parent.copydata(self._path)
2067 if not path:
2065 if not path:
2068 return None
2066 return None
2069 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2067 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2070
2068
2071 def size(self):
2069 def size(self):
2072 return self._parent.size(self._path)
2070 return self._parent.size(self._path)
2073
2071
2074 def markcopied(self, origin):
2072 def markcopied(self, origin):
2075 self._parent.markcopied(self._path, origin)
2073 self._parent.markcopied(self._path, origin)
2076
2074
2077 def audit(self):
2075 def audit(self):
2078 pass
2076 pass
2079
2077
2080 def flags(self):
2078 def flags(self):
2081 return self._parent.flags(self._path)
2079 return self._parent.flags(self._path)
2082
2080
2083 def setflags(self, islink, isexec):
2081 def setflags(self, islink, isexec):
2084 return self._parent.setflags(self._path, islink, isexec)
2082 return self._parent.setflags(self._path, islink, isexec)
2085
2083
2086 def write(self, data, flags, backgroundclose=False, **kwargs):
2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2087 return self._parent.write(self._path, data, flags, **kwargs)
2085 return self._parent.write(self._path, data, flags, **kwargs)
2088
2086
2089 def remove(self, ignoremissing=False):
2087 def remove(self, ignoremissing=False):
2090 return self._parent.remove(self._path)
2088 return self._parent.remove(self._path)
2091
2089
2092 def clearunknown(self):
2090 def clearunknown(self):
2093 pass
2091 pass
2094
2092
2095 class workingcommitctx(workingctx):
2093 class workingcommitctx(workingctx):
2096 """A workingcommitctx object makes access to data related to
2094 """A workingcommitctx object makes access to data related to
2097 the revision being committed convenient.
2095 the revision being committed convenient.
2098
2096
2099 This hides changes in the working directory, if they aren't
2097 This hides changes in the working directory, if they aren't
2100 committed in this context.
2098 committed in this context.
2101 """
2099 """
2102 def __init__(self, repo, changes,
2100 def __init__(self, repo, changes,
2103 text="", user=None, date=None, extra=None):
2101 text="", user=None, date=None, extra=None):
2104 super(workingctx, self).__init__(repo, text, user, date, extra,
2102 super(workingctx, self).__init__(repo, text, user, date, extra,
2105 changes)
2103 changes)
2106
2104
2107 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2105 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2108 """Return matched files only in ``self._status``
2106 """Return matched files only in ``self._status``
2109
2107
2110 Uncommitted files appear "clean" via this context, even if
2108 Uncommitted files appear "clean" via this context, even if
2111 they aren't actually so in the working directory.
2109 they aren't actually so in the working directory.
2112 """
2110 """
2113 if clean:
2111 if clean:
2114 clean = [f for f in self._manifest if f not in self._changedset]
2112 clean = [f for f in self._manifest if f not in self._changedset]
2115 else:
2113 else:
2116 clean = []
2114 clean = []
2117 return scmutil.status([f for f in self._status.modified if match(f)],
2115 return scmutil.status([f for f in self._status.modified if match(f)],
2118 [f for f in self._status.added if match(f)],
2116 [f for f in self._status.added if match(f)],
2119 [f for f in self._status.removed if match(f)],
2117 [f for f in self._status.removed if match(f)],
2120 [], [], [], clean)
2118 [], [], [], clean)
2121
2119
2122 @propertycache
2120 @propertycache
2123 def _changedset(self):
2121 def _changedset(self):
2124 """Return the set of files changed in this context
2122 """Return the set of files changed in this context
2125 """
2123 """
2126 changed = set(self._status.modified)
2124 changed = set(self._status.modified)
2127 changed.update(self._status.added)
2125 changed.update(self._status.added)
2128 changed.update(self._status.removed)
2126 changed.update(self._status.removed)
2129 return changed
2127 return changed
2130
2128
2131 def makecachingfilectxfn(func):
2129 def makecachingfilectxfn(func):
2132 """Create a filectxfn that caches based on the path.
2130 """Create a filectxfn that caches based on the path.
2133
2131
2134 We can't use util.cachefunc because it uses all arguments as the cache
2132 We can't use util.cachefunc because it uses all arguments as the cache
2135 key and this creates a cycle since the arguments include the repo and
2133 key and this creates a cycle since the arguments include the repo and
2136 memctx.
2134 memctx.
2137 """
2135 """
2138 cache = {}
2136 cache = {}
2139
2137
2140 def getfilectx(repo, memctx, path):
2138 def getfilectx(repo, memctx, path):
2141 if path not in cache:
2139 if path not in cache:
2142 cache[path] = func(repo, memctx, path)
2140 cache[path] = func(repo, memctx, path)
2143 return cache[path]
2141 return cache[path]
2144
2142
2145 return getfilectx
2143 return getfilectx
2146
2144
2147 def memfilefromctx(ctx):
2145 def memfilefromctx(ctx):
2148 """Given a context return a memfilectx for ctx[path]
2146 """Given a context return a memfilectx for ctx[path]
2149
2147
2150 This is a convenience method for building a memctx based on another
2148 This is a convenience method for building a memctx based on another
2151 context.
2149 context.
2152 """
2150 """
2153 def getfilectx(repo, memctx, path):
2151 def getfilectx(repo, memctx, path):
2154 fctx = ctx[path]
2152 fctx = ctx[path]
2155 # this is weird but apparently we only keep track of one parent
2153 # this is weird but apparently we only keep track of one parent
2156 # (why not only store that instead of a tuple?)
2154 # (why not only store that instead of a tuple?)
2157 copied = fctx.renamed()
2155 copied = fctx.renamed()
2158 if copied:
2156 if copied:
2159 copied = copied[0]
2157 copied = copied[0]
2160 return memfilectx(repo, memctx, path, fctx.data(),
2158 return memfilectx(repo, memctx, path, fctx.data(),
2161 islink=fctx.islink(), isexec=fctx.isexec(),
2159 islink=fctx.islink(), isexec=fctx.isexec(),
2162 copied=copied)
2160 copied=copied)
2163
2161
2164 return getfilectx
2162 return getfilectx
2165
2163
2166 def memfilefrompatch(patchstore):
2164 def memfilefrompatch(patchstore):
2167 """Given a patch (e.g. patchstore object) return a memfilectx
2165 """Given a patch (e.g. patchstore object) return a memfilectx
2168
2166
2169 This is a convenience method for building a memctx based on a patchstore.
2167 This is a convenience method for building a memctx based on a patchstore.
2170 """
2168 """
2171 def getfilectx(repo, memctx, path):
2169 def getfilectx(repo, memctx, path):
2172 data, mode, copied = patchstore.getfile(path)
2170 data, mode, copied = patchstore.getfile(path)
2173 if data is None:
2171 if data is None:
2174 return None
2172 return None
2175 islink, isexec = mode
2173 islink, isexec = mode
2176 return memfilectx(repo, memctx, path, data, islink=islink,
2174 return memfilectx(repo, memctx, path, data, islink=islink,
2177 isexec=isexec, copied=copied)
2175 isexec=isexec, copied=copied)
2178
2176
2179 return getfilectx
2177 return getfilectx
2180
2178
2181 class memctx(committablectx):
2179 class memctx(committablectx):
2182 """Use memctx to perform in-memory commits via localrepo.commitctx().
2180 """Use memctx to perform in-memory commits via localrepo.commitctx().
2183
2181
2184 Revision information is supplied at initialization time while
2182 Revision information is supplied at initialization time while
2185 related files data and is made available through a callback
2183 related files data and is made available through a callback
2186 mechanism. 'repo' is the current localrepo, 'parents' is a
2184 mechanism. 'repo' is the current localrepo, 'parents' is a
2187 sequence of two parent revisions identifiers (pass None for every
2185 sequence of two parent revisions identifiers (pass None for every
2188 missing parent), 'text' is the commit message and 'files' lists
2186 missing parent), 'text' is the commit message and 'files' lists
2189 names of files touched by the revision (normalized and relative to
2187 names of files touched by the revision (normalized and relative to
2190 repository root).
2188 repository root).
2191
2189
2192 filectxfn(repo, memctx, path) is a callable receiving the
2190 filectxfn(repo, memctx, path) is a callable receiving the
2193 repository, the current memctx object and the normalized path of
2191 repository, the current memctx object and the normalized path of
2194 requested file, relative to repository root. It is fired by the
2192 requested file, relative to repository root. It is fired by the
2195 commit function for every file in 'files', but calls order is
2193 commit function for every file in 'files', but calls order is
2196 undefined. If the file is available in the revision being
2194 undefined. If the file is available in the revision being
2197 committed (updated or added), filectxfn returns a memfilectx
2195 committed (updated or added), filectxfn returns a memfilectx
2198 object. If the file was removed, filectxfn return None for recent
2196 object. If the file was removed, filectxfn return None for recent
2199 Mercurial. Moved files are represented by marking the source file
2197 Mercurial. Moved files are represented by marking the source file
2200 removed and the new file added with copy information (see
2198 removed and the new file added with copy information (see
2201 memfilectx).
2199 memfilectx).
2202
2200
2203 user receives the committer name and defaults to current
2201 user receives the committer name and defaults to current
2204 repository username, date is the commit date in any format
2202 repository username, date is the commit date in any format
2205 supported by dateutil.parsedate() and defaults to current date, extra
2203 supported by dateutil.parsedate() and defaults to current date, extra
2206 is a dictionary of metadata or is left empty.
2204 is a dictionary of metadata or is left empty.
2207 """
2205 """
2208
2206
2209 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2207 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2210 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2208 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2211 # this field to determine what to do in filectxfn.
2209 # this field to determine what to do in filectxfn.
2212 _returnnoneformissingfiles = True
2210 _returnnoneformissingfiles = True
2213
2211
2214 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2212 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2215 date=None, extra=None, branch=None, editor=False):
2213 date=None, extra=None, branch=None, editor=False):
2216 super(memctx, self).__init__(repo, text, user, date, extra)
2214 super(memctx, self).__init__(repo, text, user, date, extra)
2217 self._rev = None
2215 self._rev = None
2218 self._node = None
2216 self._node = None
2219 parents = [(p or nullid) for p in parents]
2217 parents = [(p or nullid) for p in parents]
2220 p1, p2 = parents
2218 p1, p2 = parents
2221 self._parents = [self._repo[p] for p in (p1, p2)]
2219 self._parents = [self._repo[p] for p in (p1, p2)]
2222 files = sorted(set(files))
2220 files = sorted(set(files))
2223 self._files = files
2221 self._files = files
2224 if branch is not None:
2222 if branch is not None:
2225 self._extra['branch'] = encoding.fromlocal(branch)
2223 self._extra['branch'] = encoding.fromlocal(branch)
2226 self.substate = {}
2224 self.substate = {}
2227
2225
2228 if isinstance(filectxfn, patch.filestore):
2226 if isinstance(filectxfn, patch.filestore):
2229 filectxfn = memfilefrompatch(filectxfn)
2227 filectxfn = memfilefrompatch(filectxfn)
2230 elif not callable(filectxfn):
2228 elif not callable(filectxfn):
2231 # if store is not callable, wrap it in a function
2229 # if store is not callable, wrap it in a function
2232 filectxfn = memfilefromctx(filectxfn)
2230 filectxfn = memfilefromctx(filectxfn)
2233
2231
2234 # memoizing increases performance for e.g. vcs convert scenarios.
2232 # memoizing increases performance for e.g. vcs convert scenarios.
2235 self._filectxfn = makecachingfilectxfn(filectxfn)
2233 self._filectxfn = makecachingfilectxfn(filectxfn)
2236
2234
2237 if editor:
2235 if editor:
2238 self._text = editor(self._repo, self, [])
2236 self._text = editor(self._repo, self, [])
2239 self._repo.savecommitmessage(self._text)
2237 self._repo.savecommitmessage(self._text)
2240
2238
2241 def filectx(self, path, filelog=None):
2239 def filectx(self, path, filelog=None):
2242 """get a file context from the working directory
2240 """get a file context from the working directory
2243
2241
2244 Returns None if file doesn't exist and should be removed."""
2242 Returns None if file doesn't exist and should be removed."""
2245 return self._filectxfn(self._repo, self, path)
2243 return self._filectxfn(self._repo, self, path)
2246
2244
2247 def commit(self):
2245 def commit(self):
2248 """commit context to the repo"""
2246 """commit context to the repo"""
2249 return self._repo.commitctx(self)
2247 return self._repo.commitctx(self)
2250
2248
2251 @propertycache
2249 @propertycache
2252 def _manifest(self):
2250 def _manifest(self):
2253 """generate a manifest based on the return values of filectxfn"""
2251 """generate a manifest based on the return values of filectxfn"""
2254
2252
2255 # keep this simple for now; just worry about p1
2253 # keep this simple for now; just worry about p1
2256 pctx = self._parents[0]
2254 pctx = self._parents[0]
2257 man = pctx.manifest().copy()
2255 man = pctx.manifest().copy()
2258
2256
2259 for f in self._status.modified:
2257 for f in self._status.modified:
2260 p1node = nullid
2258 p1node = nullid
2261 p2node = nullid
2259 p2node = nullid
2262 p = pctx[f].parents() # if file isn't in pctx, check p2?
2260 p = pctx[f].parents() # if file isn't in pctx, check p2?
2263 if len(p) > 0:
2261 if len(p) > 0:
2264 p1node = p[0].filenode()
2262 p1node = p[0].filenode()
2265 if len(p) > 1:
2263 if len(p) > 1:
2266 p2node = p[1].filenode()
2264 p2node = p[1].filenode()
2267 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2265 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2268
2266
2269 for f in self._status.added:
2267 for f in self._status.added:
2270 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2268 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2271
2269
2272 for f in self._status.removed:
2270 for f in self._status.removed:
2273 if f in man:
2271 if f in man:
2274 del man[f]
2272 del man[f]
2275
2273
2276 return man
2274 return man
2277
2275
2278 @propertycache
2276 @propertycache
2279 def _status(self):
2277 def _status(self):
2280 """Calculate exact status from ``files`` specified at construction
2278 """Calculate exact status from ``files`` specified at construction
2281 """
2279 """
2282 man1 = self.p1().manifest()
2280 man1 = self.p1().manifest()
2283 p2 = self._parents[1]
2281 p2 = self._parents[1]
2284 # "1 < len(self._parents)" can't be used for checking
2282 # "1 < len(self._parents)" can't be used for checking
2285 # existence of the 2nd parent, because "memctx._parents" is
2283 # existence of the 2nd parent, because "memctx._parents" is
2286 # explicitly initialized by the list, of which length is 2.
2284 # explicitly initialized by the list, of which length is 2.
2287 if p2.node() != nullid:
2285 if p2.node() != nullid:
2288 man2 = p2.manifest()
2286 man2 = p2.manifest()
2289 managing = lambda f: f in man1 or f in man2
2287 managing = lambda f: f in man1 or f in man2
2290 else:
2288 else:
2291 managing = lambda f: f in man1
2289 managing = lambda f: f in man1
2292
2290
2293 modified, added, removed = [], [], []
2291 modified, added, removed = [], [], []
2294 for f in self._files:
2292 for f in self._files:
2295 if not managing(f):
2293 if not managing(f):
2296 added.append(f)
2294 added.append(f)
2297 elif self[f]:
2295 elif self[f]:
2298 modified.append(f)
2296 modified.append(f)
2299 else:
2297 else:
2300 removed.append(f)
2298 removed.append(f)
2301
2299
2302 return scmutil.status(modified, added, removed, [], [], [], [])
2300 return scmutil.status(modified, added, removed, [], [], [], [])
2303
2301
2304 class memfilectx(committablefilectx):
2302 class memfilectx(committablefilectx):
2305 """memfilectx represents an in-memory file to commit.
2303 """memfilectx represents an in-memory file to commit.
2306
2304
2307 See memctx and committablefilectx for more details.
2305 See memctx and committablefilectx for more details.
2308 """
2306 """
2309 def __init__(self, repo, changectx, path, data, islink=False,
2307 def __init__(self, repo, changectx, path, data, islink=False,
2310 isexec=False, copied=None):
2308 isexec=False, copied=None):
2311 """
2309 """
2312 path is the normalized file path relative to repository root.
2310 path is the normalized file path relative to repository root.
2313 data is the file content as a string.
2311 data is the file content as a string.
2314 islink is True if the file is a symbolic link.
2312 islink is True if the file is a symbolic link.
2315 isexec is True if the file is executable.
2313 isexec is True if the file is executable.
2316 copied is the source file path if current file was copied in the
2314 copied is the source file path if current file was copied in the
2317 revision being committed, or None."""
2315 revision being committed, or None."""
2318 super(memfilectx, self).__init__(repo, path, None, changectx)
2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2319 self._data = data
2317 self._data = data
2320 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2318 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2321 self._copied = None
2319 self._copied = None
2322 if copied:
2320 if copied:
2323 self._copied = (copied, nullid)
2321 self._copied = (copied, nullid)
2324
2322
2325 def data(self):
2323 def data(self):
2326 return self._data
2324 return self._data
2327
2325
2328 def remove(self, ignoremissing=False):
2326 def remove(self, ignoremissing=False):
2329 """wraps unlink for a repo's working directory"""
2327 """wraps unlink for a repo's working directory"""
2330 # need to figure out what to do here
2328 # need to figure out what to do here
2331 del self._changectx[self._path]
2329 del self._changectx[self._path]
2332
2330
2333 def write(self, data, flags, **kwargs):
2331 def write(self, data, flags, **kwargs):
2334 """wraps repo.wwrite"""
2332 """wraps repo.wwrite"""
2335 self._data = data
2333 self._data = data
2336
2334
2337 class overlayfilectx(committablefilectx):
2335 class overlayfilectx(committablefilectx):
2338 """Like memfilectx but take an original filectx and optional parameters to
2336 """Like memfilectx but take an original filectx and optional parameters to
2339 override parts of it. This is useful when fctx.data() is expensive (i.e.
2337 override parts of it. This is useful when fctx.data() is expensive (i.e.
2340 flag processor is expensive) and raw data, flags, and filenode could be
2338 flag processor is expensive) and raw data, flags, and filenode could be
2341 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2339 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2342 """
2340 """
2343
2341
2344 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2342 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2345 copied=None, ctx=None):
2343 copied=None, ctx=None):
2346 """originalfctx: filecontext to duplicate
2344 """originalfctx: filecontext to duplicate
2347
2345
2348 datafunc: None or a function to override data (file content). It is a
2346 datafunc: None or a function to override data (file content). It is a
2349 function to be lazy. path, flags, copied, ctx: None or overridden value
2347 function to be lazy. path, flags, copied, ctx: None or overridden value
2350
2348
2351 copied could be (path, rev), or False. copied could also be just path,
2349 copied could be (path, rev), or False. copied could also be just path,
2352 and will be converted to (path, nullid). This simplifies some callers.
2350 and will be converted to (path, nullid). This simplifies some callers.
2353 """
2351 """
2354
2352
2355 if path is None:
2353 if path is None:
2356 path = originalfctx.path()
2354 path = originalfctx.path()
2357 if ctx is None:
2355 if ctx is None:
2358 ctx = originalfctx.changectx()
2356 ctx = originalfctx.changectx()
2359 ctxmatch = lambda: True
2357 ctxmatch = lambda: True
2360 else:
2358 else:
2361 ctxmatch = lambda: ctx == originalfctx.changectx()
2359 ctxmatch = lambda: ctx == originalfctx.changectx()
2362
2360
2363 repo = originalfctx.repo()
2361 repo = originalfctx.repo()
2364 flog = originalfctx.filelog()
2362 flog = originalfctx.filelog()
2365 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2363 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2366
2364
2367 if copied is None:
2365 if copied is None:
2368 copied = originalfctx.renamed()
2366 copied = originalfctx.renamed()
2369 copiedmatch = lambda: True
2367 copiedmatch = lambda: True
2370 else:
2368 else:
2371 if copied and not isinstance(copied, tuple):
2369 if copied and not isinstance(copied, tuple):
2372 # repo._filecommit will recalculate copyrev so nullid is okay
2370 # repo._filecommit will recalculate copyrev so nullid is okay
2373 copied = (copied, nullid)
2371 copied = (copied, nullid)
2374 copiedmatch = lambda: copied == originalfctx.renamed()
2372 copiedmatch = lambda: copied == originalfctx.renamed()
2375
2373
2376 # When data, copied (could affect data), ctx (could affect filelog
2374 # When data, copied (could affect data), ctx (could affect filelog
2377 # parents) are not overridden, rawdata, rawflags, and filenode may be
2375 # parents) are not overridden, rawdata, rawflags, and filenode may be
2378 # reused (repo._filecommit should double check filelog parents).
2376 # reused (repo._filecommit should double check filelog parents).
2379 #
2377 #
2380 # path, flags are not hashed in filelog (but in manifestlog) so they do
2378 # path, flags are not hashed in filelog (but in manifestlog) so they do
2381 # not affect reusable here.
2379 # not affect reusable here.
2382 #
2380 #
2383 # If ctx or copied is overridden to a same value with originalfctx,
2381 # If ctx or copied is overridden to a same value with originalfctx,
2384 # still consider it's reusable. originalfctx.renamed() may be a bit
2382 # still consider it's reusable. originalfctx.renamed() may be a bit
2385 # expensive so it's not called unless necessary. Assuming datafunc is
2383 # expensive so it's not called unless necessary. Assuming datafunc is
2386 # always expensive, do not call it for this "reusable" test.
2384 # always expensive, do not call it for this "reusable" test.
2387 reusable = datafunc is None and ctxmatch() and copiedmatch()
2385 reusable = datafunc is None and ctxmatch() and copiedmatch()
2388
2386
2389 if datafunc is None:
2387 if datafunc is None:
2390 datafunc = originalfctx.data
2388 datafunc = originalfctx.data
2391 if flags is None:
2389 if flags is None:
2392 flags = originalfctx.flags()
2390 flags = originalfctx.flags()
2393
2391
2394 self._datafunc = datafunc
2392 self._datafunc = datafunc
2395 self._flags = flags
2393 self._flags = flags
2396 self._copied = copied
2394 self._copied = copied
2397
2395
2398 if reusable:
2396 if reusable:
2399 # copy extra fields from originalfctx
2397 # copy extra fields from originalfctx
2400 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2398 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2401 for attr_ in attrs:
2399 for attr_ in attrs:
2402 if util.safehasattr(originalfctx, attr_):
2400 if util.safehasattr(originalfctx, attr_):
2403 setattr(self, attr_, getattr(originalfctx, attr_))
2401 setattr(self, attr_, getattr(originalfctx, attr_))
2404
2402
2405 def data(self):
2403 def data(self):
2406 return self._datafunc()
2404 return self._datafunc()
2407
2405
2408 class metadataonlyctx(committablectx):
2406 class metadataonlyctx(committablectx):
2409 """Like memctx but it's reusing the manifest of different commit.
2407 """Like memctx but it's reusing the manifest of different commit.
2410 Intended to be used by lightweight operations that are creating
2408 Intended to be used by lightweight operations that are creating
2411 metadata-only changes.
2409 metadata-only changes.
2412
2410
2413 Revision information is supplied at initialization time. 'repo' is the
2411 Revision information is supplied at initialization time. 'repo' is the
2414 current localrepo, 'ctx' is original revision which manifest we're reuisng
2412 current localrepo, 'ctx' is original revision which manifest we're reuisng
2415 'parents' is a sequence of two parent revisions identifiers (pass None for
2413 'parents' is a sequence of two parent revisions identifiers (pass None for
2416 every missing parent), 'text' is the commit.
2414 every missing parent), 'text' is the commit.
2417
2415
2418 user receives the committer name and defaults to current repository
2416 user receives the committer name and defaults to current repository
2419 username, date is the commit date in any format supported by
2417 username, date is the commit date in any format supported by
2420 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2418 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2421 metadata or is left empty.
2419 metadata or is left empty.
2422 """
2420 """
2423 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2421 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2424 date=None, extra=None, editor=False):
2422 date=None, extra=None, editor=False):
2425 if text is None:
2423 if text is None:
2426 text = originalctx.description()
2424 text = originalctx.description()
2427 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2425 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2428 self._rev = None
2426 self._rev = None
2429 self._node = None
2427 self._node = None
2430 self._originalctx = originalctx
2428 self._originalctx = originalctx
2431 self._manifestnode = originalctx.manifestnode()
2429 self._manifestnode = originalctx.manifestnode()
2432 if parents is None:
2430 if parents is None:
2433 parents = originalctx.parents()
2431 parents = originalctx.parents()
2434 else:
2432 else:
2435 parents = [repo[p] for p in parents if p is not None]
2433 parents = [repo[p] for p in parents if p is not None]
2436 parents = parents[:]
2434 parents = parents[:]
2437 while len(parents) < 2:
2435 while len(parents) < 2:
2438 parents.append(repo[nullid])
2436 parents.append(repo[nullid])
2439 p1, p2 = self._parents = parents
2437 p1, p2 = self._parents = parents
2440
2438
2441 # sanity check to ensure that the reused manifest parents are
2439 # sanity check to ensure that the reused manifest parents are
2442 # manifests of our commit parents
2440 # manifests of our commit parents
2443 mp1, mp2 = self.manifestctx().parents
2441 mp1, mp2 = self.manifestctx().parents
2444 if p1 != nullid and p1.manifestnode() != mp1:
2442 if p1 != nullid and p1.manifestnode() != mp1:
2445 raise RuntimeError('can\'t reuse the manifest: '
2443 raise RuntimeError('can\'t reuse the manifest: '
2446 'its p1 doesn\'t match the new ctx p1')
2444 'its p1 doesn\'t match the new ctx p1')
2447 if p2 != nullid and p2.manifestnode() != mp2:
2445 if p2 != nullid and p2.manifestnode() != mp2:
2448 raise RuntimeError('can\'t reuse the manifest: '
2446 raise RuntimeError('can\'t reuse the manifest: '
2449 'its p2 doesn\'t match the new ctx p2')
2447 'its p2 doesn\'t match the new ctx p2')
2450
2448
2451 self._files = originalctx.files()
2449 self._files = originalctx.files()
2452 self.substate = {}
2450 self.substate = {}
2453
2451
2454 if editor:
2452 if editor:
2455 self._text = editor(self._repo, self, [])
2453 self._text = editor(self._repo, self, [])
2456 self._repo.savecommitmessage(self._text)
2454 self._repo.savecommitmessage(self._text)
2457
2455
2458 def manifestnode(self):
2456 def manifestnode(self):
2459 return self._manifestnode
2457 return self._manifestnode
2460
2458
2461 @property
2459 @property
2462 def _manifestctx(self):
2460 def _manifestctx(self):
2463 return self._repo.manifestlog[self._manifestnode]
2461 return self._repo.manifestlog[self._manifestnode]
2464
2462
2465 def filectx(self, path, filelog=None):
2463 def filectx(self, path, filelog=None):
2466 return self._originalctx.filectx(path, filelog=filelog)
2464 return self._originalctx.filectx(path, filelog=filelog)
2467
2465
2468 def commit(self):
2466 def commit(self):
2469 """commit context to the repo"""
2467 """commit context to the repo"""
2470 return self._repo.commitctx(self)
2468 return self._repo.commitctx(self)
2471
2469
2472 @property
2470 @property
2473 def _manifest(self):
2471 def _manifest(self):
2474 return self._originalctx.manifest()
2472 return self._originalctx.manifest()
2475
2473
2476 @propertycache
2474 @propertycache
2477 def _status(self):
2475 def _status(self):
2478 """Calculate exact status from ``files`` specified in the ``origctx``
2476 """Calculate exact status from ``files`` specified in the ``origctx``
2479 and parents manifests.
2477 and parents manifests.
2480 """
2478 """
2481 man1 = self.p1().manifest()
2479 man1 = self.p1().manifest()
2482 p2 = self._parents[1]
2480 p2 = self._parents[1]
2483 # "1 < len(self._parents)" can't be used for checking
2481 # "1 < len(self._parents)" can't be used for checking
2484 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2482 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2485 # explicitly initialized by the list, of which length is 2.
2483 # explicitly initialized by the list, of which length is 2.
2486 if p2.node() != nullid:
2484 if p2.node() != nullid:
2487 man2 = p2.manifest()
2485 man2 = p2.manifest()
2488 managing = lambda f: f in man1 or f in man2
2486 managing = lambda f: f in man1 or f in man2
2489 else:
2487 else:
2490 managing = lambda f: f in man1
2488 managing = lambda f: f in man1
2491
2489
2492 modified, added, removed = [], [], []
2490 modified, added, removed = [], [], []
2493 for f in self._files:
2491 for f in self._files:
2494 if not managing(f):
2492 if not managing(f):
2495 added.append(f)
2493 added.append(f)
2496 elif f in self:
2494 elif f in self:
2497 modified.append(f)
2495 modified.append(f)
2498 else:
2496 else:
2499 removed.append(f)
2497 removed.append(f)
2500
2498
2501 return scmutil.status(modified, added, removed, [], [], [], [])
2499 return scmutil.status(modified, added, removed, [], [], [], [])
2502
2500
2503 class arbitraryfilectx(object):
2501 class arbitraryfilectx(object):
2504 """Allows you to use filectx-like functions on a file in an arbitrary
2502 """Allows you to use filectx-like functions on a file in an arbitrary
2505 location on disk, possibly not in the working directory.
2503 location on disk, possibly not in the working directory.
2506 """
2504 """
2507 def __init__(self, path, repo=None):
2505 def __init__(self, path, repo=None):
2508 # Repo is optional because contrib/simplemerge uses this class.
2506 # Repo is optional because contrib/simplemerge uses this class.
2509 self._repo = repo
2507 self._repo = repo
2510 self._path = path
2508 self._path = path
2511
2509
2512 def cmp(self, fctx):
2510 def cmp(self, fctx):
2513 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2511 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2514 # path if either side is a symlink.
2512 # path if either side is a symlink.
2515 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2513 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2516 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2514 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2517 # Add a fast-path for merge if both sides are disk-backed.
2515 # Add a fast-path for merge if both sides are disk-backed.
2518 # Note that filecmp uses the opposite return values (True if same)
2516 # Note that filecmp uses the opposite return values (True if same)
2519 # from our cmp functions (True if different).
2517 # from our cmp functions (True if different).
2520 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2518 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2521 return self.data() != fctx.data()
2519 return self.data() != fctx.data()
2522
2520
2523 def path(self):
2521 def path(self):
2524 return self._path
2522 return self._path
2525
2523
2526 def flags(self):
2524 def flags(self):
2527 return ''
2525 return ''
2528
2526
2529 def data(self):
2527 def data(self):
2530 return util.readfile(self._path)
2528 return util.readfile(self._path)
2531
2529
2532 def decodeddata(self):
2530 def decodeddata(self):
2533 with open(self._path, "rb") as f:
2531 with open(self._path, "rb") as f:
2534 return f.read()
2532 return f.read()
2535
2533
2536 def remove(self):
2534 def remove(self):
2537 util.unlink(self._path)
2535 util.unlink(self._path)
2538
2536
2539 def write(self, data, flags, **kwargs):
2537 def write(self, data, flags, **kwargs):
2540 assert not flags
2538 assert not flags
2541 with open(self._path, "w") as f:
2539 with open(self._path, "w") as f:
2542 f.write(data)
2540 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now