##// END OF EJS Templates
memctx: simplify _manifest with new revlog nodeids...
Sean Farley -
r39749:a5dafefc default
parent child Browse files
Show More
@@ -1,2507 +1,2499 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirfilenodeids,
24 wdirfilenodeids,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
40 scmutil,
39 scmutil,
41 sparse,
40 sparse,
42 subrepo,
41 subrepo,
43 subrepoutil,
42 subrepoutil,
44 util,
43 util,
45 )
44 )
46 from .utils import (
45 from .utils import (
47 dateutil,
46 dateutil,
48 stringutil,
47 stringutil,
49 )
48 )
50
49
51 propertycache = util.propertycache
50 propertycache = util.propertycache
52
51
53 class basectx(object):
52 class basectx(object):
54 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
57 be committed,
56 be committed,
58 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
59 be committed."""
58 be committed."""
60
59
61 def __init__(self, repo):
60 def __init__(self, repo):
62 self._repo = repo
61 self._repo = repo
63
62
64 def __bytes__(self):
63 def __bytes__(self):
65 return short(self.node())
64 return short(self.node())
66
65
67 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
68
67
69 def __repr__(self):
68 def __repr__(self):
70 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
71
70
72 def __eq__(self, other):
71 def __eq__(self, other):
73 try:
72 try:
74 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
74 except AttributeError:
76 return False
75 return False
77
76
78 def __ne__(self, other):
77 def __ne__(self, other):
79 return not (self == other)
78 return not (self == other)
80
79
81 def __contains__(self, key):
80 def __contains__(self, key):
82 return key in self._manifest
81 return key in self._manifest
83
82
84 def __getitem__(self, key):
83 def __getitem__(self, key):
85 return self.filectx(key)
84 return self.filectx(key)
86
85
87 def __iter__(self):
86 def __iter__(self):
88 return iter(self._manifest)
87 return iter(self._manifest)
89
88
90 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
92 the normal manifest."""
94 return self.manifest()
93 return self.manifest()
95
94
96 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
98 match operator.
97 match operator.
99 """
98 """
100 return match
99 return match
101
100
102 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
103 listunknown):
102 listunknown):
104 """build a status with respect to another context"""
103 """build a status with respect to another context"""
105 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
106 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta application.
109 # delta application.
111 mf2 = None
110 mf2 = None
112 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
113 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
114 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
115 if mf2 is None:
114 if mf2 is None:
116 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
117
116
118 modified, added = [], []
117 modified, added = [], []
119 removed = []
118 removed = []
120 clean = []
119 clean = []
121 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deletedset = set(deleted)
121 deletedset = set(deleted)
123 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
124 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
125 if fn in deletedset:
124 if fn in deletedset:
126 continue
125 continue
127 if value is None:
126 if value is None:
128 clean.append(fn)
127 clean.append(fn)
129 continue
128 continue
130 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
131 if node1 is None:
130 if node1 is None:
132 added.append(fn)
131 added.append(fn)
133 elif node2 is None:
132 elif node2 is None:
134 removed.append(fn)
133 removed.append(fn)
135 elif flag1 != flag2:
134 elif flag1 != flag2:
136 modified.append(fn)
135 modified.append(fn)
137 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
138 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
139 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
140 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
141 # to a file as a modification.
140 # to a file as a modification.
142 modified.append(fn)
141 modified.append(fn)
143 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
144 modified.append(fn)
143 modified.append(fn)
145 else:
144 else:
146 clean.append(fn)
145 clean.append(fn)
147
146
148 if removed:
147 if removed:
149 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
150 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
151 (not match or match(fn))]
150 (not match or match(fn))]
152 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
153 (not match or match(fn))]
152 (not match or match(fn))]
154 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
155 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
156
155
157 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
158 ignored, clean)
157 ignored, clean)
159
158
160 @propertycache
159 @propertycache
161 def substate(self):
160 def substate(self):
162 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
163
162
164 def subrev(self, subpath):
163 def subrev(self, subpath):
165 return self.substate[subpath][1]
164 return self.substate[subpath][1]
166
165
167 def rev(self):
166 def rev(self):
168 return self._rev
167 return self._rev
169 def node(self):
168 def node(self):
170 return self._node
169 return self._node
171 def hex(self):
170 def hex(self):
172 return hex(self.node())
171 return hex(self.node())
173 def manifest(self):
172 def manifest(self):
174 return self._manifest
173 return self._manifest
175 def manifestctx(self):
174 def manifestctx(self):
176 return self._manifestctx
175 return self._manifestctx
177 def repo(self):
176 def repo(self):
178 return self._repo
177 return self._repo
179 def phasestr(self):
178 def phasestr(self):
180 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
181 def mutable(self):
180 def mutable(self):
182 return self.phase() > phases.public
181 return self.phase() > phases.public
183
182
184 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
185 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
186
185
187 def obsolete(self):
186 def obsolete(self):
188 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
189 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190
189
191 def extinct(self):
190 def extinct(self):
192 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
193 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194
193
195 def orphan(self):
194 def orphan(self):
196 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
197 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198
197
199 def phasedivergent(self):
198 def phasedivergent(self):
200 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
201
200
202 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
203 """
202 """
204 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205
204
206 def contentdivergent(self):
205 def contentdivergent(self):
207 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
208
207
209 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
210 """
209 """
211 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212
211
213 def isunstable(self):
212 def isunstable(self):
214 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
215 content-divergent"""
214 content-divergent"""
216 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
217
216
218 def instabilities(self):
217 def instabilities(self):
219 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
220
219
221 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
222 - orphan,
221 - orphan,
223 - phase-divergent,
222 - phase-divergent,
224 - content-divergent.
223 - content-divergent.
225 """
224 """
226 instabilities = []
225 instabilities = []
227 if self.orphan():
226 if self.orphan():
228 instabilities.append('orphan')
227 instabilities.append('orphan')
229 if self.phasedivergent():
228 if self.phasedivergent():
230 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
231 if self.contentdivergent():
230 if self.contentdivergent():
232 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
233 return instabilities
232 return instabilities
234
233
235 def parents(self):
234 def parents(self):
236 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
237 return self._parents
236 return self._parents
238
237
239 def p1(self):
238 def p1(self):
240 return self._parents[0]
239 return self._parents[0]
241
240
242 def p2(self):
241 def p2(self):
243 parents = self._parents
242 parents = self._parents
244 if len(parents) == 2:
243 if len(parents) == 2:
245 return parents[1]
244 return parents[1]
246 return changectx(self._repo, nullrev)
245 return changectx(self._repo, nullrev)
247
246
248 def _fileinfo(self, path):
247 def _fileinfo(self, path):
249 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
250 try:
249 try:
251 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
252 except KeyError:
251 except KeyError:
253 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
254 _('not found in manifest'))
253 _('not found in manifest'))
255 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
256 if path in self._manifestdelta:
255 if path in self._manifestdelta:
257 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
258 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
259 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
260 try:
259 try:
261 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
262 except KeyError:
261 except KeyError:
263 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
264 _('not found in manifest'))
263 _('not found in manifest'))
265
264
266 return node, flag
265 return node, flag
267
266
268 def filenode(self, path):
267 def filenode(self, path):
269 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
270
269
271 def flags(self, path):
270 def flags(self, path):
272 try:
271 try:
273 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
274 except error.LookupError:
273 except error.LookupError:
275 return ''
274 return ''
276
275
277 def sub(self, path, allowcreate=True):
276 def sub(self, path, allowcreate=True):
278 '''return a subrepo for the stored revision of path, never wdir()'''
277 '''return a subrepo for the stored revision of path, never wdir()'''
279 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280
279
281 def nullsub(self, path, pctx):
280 def nullsub(self, path, pctx):
282 return subrepo.nullsubrepo(self, path, pctx)
281 return subrepo.nullsubrepo(self, path, pctx)
283
282
284 def workingsub(self, path):
283 def workingsub(self, path):
285 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 context.
285 context.
287 '''
286 '''
288 return subrepo.subrepo(self, path, allowwdir=True)
287 return subrepo.subrepo(self, path, allowwdir=True)
289
288
290 def match(self, pats=None, include=None, exclude=None, default='glob',
289 def match(self, pats=None, include=None, exclude=None, default='glob',
291 listsubrepos=False, badfn=None):
290 listsubrepos=False, badfn=None):
292 r = self._repo
291 r = self._repo
293 return matchmod.match(r.root, r.getcwd(), pats,
292 return matchmod.match(r.root, r.getcwd(), pats,
294 include, exclude, default,
293 include, exclude, default,
295 auditor=r.nofsauditor, ctx=self,
294 auditor=r.nofsauditor, ctx=self,
296 listsubrepos=listsubrepos, badfn=badfn)
295 listsubrepos=listsubrepos, badfn=badfn)
297
296
298 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
299 losedatafn=None, prefix='', relroot='', copy=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
300 hunksfilterfn=None):
299 hunksfilterfn=None):
301 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
302 if ctx2 is None:
301 if ctx2 is None:
303 ctx2 = self.p1()
302 ctx2 = self.p1()
304 if ctx2 is not None:
303 if ctx2 is not None:
305 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
306 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
307 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
308 relroot=relroot, copy=copy,
307 relroot=relroot, copy=copy,
309 hunksfilterfn=hunksfilterfn)
308 hunksfilterfn=hunksfilterfn)
310
309
311 def dirs(self):
310 def dirs(self):
312 return self._manifest.dirs()
311 return self._manifest.dirs()
313
312
314 def hasdir(self, dir):
313 def hasdir(self, dir):
315 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
316
315
317 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
320 directory.
319 directory.
321
320
322 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
323
322
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
324 """
326
325
327 ctx1 = self
326 ctx1 = self
328 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
329
328
330 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
332 # with its first parent.
334 #
333 #
335 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
336 #
335 #
337 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
338 #
337 #
339 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
342 reversed = False
341 reversed = False
343 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
345 reversed = True
344 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
347
346
348 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
349 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
350 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
351 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 listunknown)
351 listunknown)
353
352
354 if reversed:
353 if reversed:
355 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # these make no sense to reverse.
355 # these make no sense to reverse.
357 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r.clean)
357 r.clean)
359
358
360 if listsubrepos:
359 if listsubrepos:
361 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 try:
361 try:
363 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
364 except KeyError:
363 except KeyError:
365 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
366 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
368 rev2 = None
367 rev2 = None
369 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
370 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
371 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
372 listsubrepos=True)
371 listsubrepos=True)
373 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
374 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375
374
376 narrowmatch = self._repo.narrowmatch()
375 narrowmatch = self._repo.narrowmatch()
377 if not narrowmatch.always():
376 if not narrowmatch.always():
378 for l in r:
377 for l in r:
379 l[:] = list(filter(narrowmatch, l))
378 l[:] = list(filter(narrowmatch, l))
380 for l in r:
379 for l in r:
381 l.sort()
380 l.sort()
382
381
383 return r
382 return r
384
383
385 class changectx(basectx):
384 class changectx(basectx):
386 """A changecontext object makes access to data related to a particular
385 """A changecontext object makes access to data related to a particular
387 changeset convenient. It represents a read-only context already present in
386 changeset convenient. It represents a read-only context already present in
388 the repo."""
387 the repo."""
389 def __init__(self, repo, changeid='.'):
388 def __init__(self, repo, changeid='.'):
390 """changeid is a revision number, node, or tag"""
389 """changeid is a revision number, node, or tag"""
391 super(changectx, self).__init__(repo)
390 super(changectx, self).__init__(repo)
392
391
393 try:
392 try:
394 if isinstance(changeid, int):
393 if isinstance(changeid, int):
395 self._node = repo.changelog.node(changeid)
394 self._node = repo.changelog.node(changeid)
396 self._rev = changeid
395 self._rev = changeid
397 return
396 return
398 elif changeid == 'null':
397 elif changeid == 'null':
399 self._node = nullid
398 self._node = nullid
400 self._rev = nullrev
399 self._rev = nullrev
401 return
400 return
402 elif changeid == 'tip':
401 elif changeid == 'tip':
403 self._node = repo.changelog.tip()
402 self._node = repo.changelog.tip()
404 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
405 return
404 return
406 elif (changeid == '.'
405 elif (changeid == '.'
407 or repo.local() and changeid == repo.dirstate.p1()):
406 or repo.local() and changeid == repo.dirstate.p1()):
408 # this is a hack to delay/avoid loading obsmarkers
407 # this is a hack to delay/avoid loading obsmarkers
409 # when we know that '.' won't be hidden
408 # when we know that '.' won't be hidden
410 self._node = repo.dirstate.p1()
409 self._node = repo.dirstate.p1()
411 self._rev = repo.unfiltered().changelog.rev(self._node)
410 self._rev = repo.unfiltered().changelog.rev(self._node)
412 return
411 return
413 elif len(changeid) == 20:
412 elif len(changeid) == 20:
414 try:
413 try:
415 self._node = changeid
414 self._node = changeid
416 self._rev = repo.changelog.rev(changeid)
415 self._rev = repo.changelog.rev(changeid)
417 return
416 return
418 except error.FilteredLookupError:
417 except error.FilteredLookupError:
419 changeid = hex(changeid) # for the error message
418 changeid = hex(changeid) # for the error message
420 raise
419 raise
421 except LookupError:
420 except LookupError:
422 # check if it might have come from damaged dirstate
421 # check if it might have come from damaged dirstate
423 #
422 #
424 # XXX we could avoid the unfiltered if we had a recognizable
423 # XXX we could avoid the unfiltered if we had a recognizable
425 # exception for filtered changeset access
424 # exception for filtered changeset access
426 if (repo.local()
425 if (repo.local()
427 and changeid in repo.unfiltered().dirstate.parents()):
426 and changeid in repo.unfiltered().dirstate.parents()):
428 msg = _("working directory has unknown parent '%s'!")
427 msg = _("working directory has unknown parent '%s'!")
429 raise error.Abort(msg % short(changeid))
428 raise error.Abort(msg % short(changeid))
430 changeid = hex(changeid) # for the error message
429 changeid = hex(changeid) # for the error message
431
430
432 elif len(changeid) == 40:
431 elif len(changeid) == 40:
433 try:
432 try:
434 self._node = bin(changeid)
433 self._node = bin(changeid)
435 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
436 return
435 return
437 except error.FilteredLookupError:
436 except error.FilteredLookupError:
438 raise
437 raise
439 except (TypeError, LookupError):
438 except (TypeError, LookupError):
440 pass
439 pass
441 else:
440 else:
442 raise error.ProgrammingError(
441 raise error.ProgrammingError(
443 "unsupported changeid '%s' of type %s" %
442 "unsupported changeid '%s' of type %s" %
444 (changeid, type(changeid)))
443 (changeid, type(changeid)))
445
444
446 except (error.FilteredIndexError, error.FilteredLookupError):
445 except (error.FilteredIndexError, error.FilteredLookupError):
447 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
448 % pycompat.bytestr(changeid))
447 % pycompat.bytestr(changeid))
449 except error.FilteredRepoLookupError:
448 except error.FilteredRepoLookupError:
450 raise
449 raise
451 except IndexError:
450 except IndexError:
452 pass
451 pass
453 raise error.RepoLookupError(
452 raise error.RepoLookupError(
454 _("unknown revision '%s'") % changeid)
453 _("unknown revision '%s'") % changeid)
455
454
456 def __hash__(self):
455 def __hash__(self):
457 try:
456 try:
458 return hash(self._rev)
457 return hash(self._rev)
459 except AttributeError:
458 except AttributeError:
460 return id(self)
459 return id(self)
461
460
462 def __nonzero__(self):
461 def __nonzero__(self):
463 return self._rev != nullrev
462 return self._rev != nullrev
464
463
465 __bool__ = __nonzero__
464 __bool__ = __nonzero__
466
465
467 @propertycache
466 @propertycache
468 def _changeset(self):
467 def _changeset(self):
469 return self._repo.changelog.changelogrevision(self.rev())
468 return self._repo.changelog.changelogrevision(self.rev())
470
469
471 @propertycache
470 @propertycache
472 def _manifest(self):
471 def _manifest(self):
473 return self._manifestctx.read()
472 return self._manifestctx.read()
474
473
475 @property
474 @property
476 def _manifestctx(self):
475 def _manifestctx(self):
477 return self._repo.manifestlog[self._changeset.manifest]
476 return self._repo.manifestlog[self._changeset.manifest]
478
477
479 @propertycache
478 @propertycache
480 def _manifestdelta(self):
479 def _manifestdelta(self):
481 return self._manifestctx.readdelta()
480 return self._manifestctx.readdelta()
482
481
483 @propertycache
482 @propertycache
484 def _parents(self):
483 def _parents(self):
485 repo = self._repo
484 repo = self._repo
486 p1, p2 = repo.changelog.parentrevs(self._rev)
485 p1, p2 = repo.changelog.parentrevs(self._rev)
487 if p2 == nullrev:
486 if p2 == nullrev:
488 return [changectx(repo, p1)]
487 return [changectx(repo, p1)]
489 return [changectx(repo, p1), changectx(repo, p2)]
488 return [changectx(repo, p1), changectx(repo, p2)]
490
489
491 def changeset(self):
490 def changeset(self):
492 c = self._changeset
491 c = self._changeset
493 return (
492 return (
494 c.manifest,
493 c.manifest,
495 c.user,
494 c.user,
496 c.date,
495 c.date,
497 c.files,
496 c.files,
498 c.description,
497 c.description,
499 c.extra,
498 c.extra,
500 )
499 )
501 def manifestnode(self):
500 def manifestnode(self):
502 return self._changeset.manifest
501 return self._changeset.manifest
503
502
504 def user(self):
503 def user(self):
505 return self._changeset.user
504 return self._changeset.user
506 def date(self):
505 def date(self):
507 return self._changeset.date
506 return self._changeset.date
508 def files(self):
507 def files(self):
509 return self._changeset.files
508 return self._changeset.files
510 def description(self):
509 def description(self):
511 return self._changeset.description
510 return self._changeset.description
512 def branch(self):
511 def branch(self):
513 return encoding.tolocal(self._changeset.extra.get("branch"))
512 return encoding.tolocal(self._changeset.extra.get("branch"))
514 def closesbranch(self):
513 def closesbranch(self):
515 return 'close' in self._changeset.extra
514 return 'close' in self._changeset.extra
516 def extra(self):
515 def extra(self):
517 """Return a dict of extra information."""
516 """Return a dict of extra information."""
518 return self._changeset.extra
517 return self._changeset.extra
519 def tags(self):
518 def tags(self):
520 """Return a list of byte tag names"""
519 """Return a list of byte tag names"""
521 return self._repo.nodetags(self._node)
520 return self._repo.nodetags(self._node)
522 def bookmarks(self):
521 def bookmarks(self):
523 """Return a list of byte bookmark names."""
522 """Return a list of byte bookmark names."""
524 return self._repo.nodebookmarks(self._node)
523 return self._repo.nodebookmarks(self._node)
525 def phase(self):
524 def phase(self):
526 return self._repo._phasecache.phase(self._repo, self._rev)
525 return self._repo._phasecache.phase(self._repo, self._rev)
527 def hidden(self):
526 def hidden(self):
528 return self._rev in repoview.filterrevs(self._repo, 'visible')
527 return self._rev in repoview.filterrevs(self._repo, 'visible')
529
528
530 def isinmemory(self):
529 def isinmemory(self):
531 return False
530 return False
532
531
533 def children(self):
532 def children(self):
534 """return list of changectx contexts for each child changeset.
533 """return list of changectx contexts for each child changeset.
535
534
536 This returns only the immediate child changesets. Use descendants() to
535 This returns only the immediate child changesets. Use descendants() to
537 recursively walk children.
536 recursively walk children.
538 """
537 """
539 c = self._repo.changelog.children(self._node)
538 c = self._repo.changelog.children(self._node)
540 return [changectx(self._repo, x) for x in c]
539 return [changectx(self._repo, x) for x in c]
541
540
542 def ancestors(self):
541 def ancestors(self):
543 for a in self._repo.changelog.ancestors([self._rev]):
542 for a in self._repo.changelog.ancestors([self._rev]):
544 yield changectx(self._repo, a)
543 yield changectx(self._repo, a)
545
544
546 def descendants(self):
545 def descendants(self):
547 """Recursively yield all children of the changeset.
546 """Recursively yield all children of the changeset.
548
547
549 For just the immediate children, use children()
548 For just the immediate children, use children()
550 """
549 """
551 for d in self._repo.changelog.descendants([self._rev]):
550 for d in self._repo.changelog.descendants([self._rev]):
552 yield changectx(self._repo, d)
551 yield changectx(self._repo, d)
553
552
554 def filectx(self, path, fileid=None, filelog=None):
553 def filectx(self, path, fileid=None, filelog=None):
555 """get a file context from this changeset"""
554 """get a file context from this changeset"""
556 if fileid is None:
555 if fileid is None:
557 fileid = self.filenode(path)
556 fileid = self.filenode(path)
558 return filectx(self._repo, path, fileid=fileid,
557 return filectx(self._repo, path, fileid=fileid,
559 changectx=self, filelog=filelog)
558 changectx=self, filelog=filelog)
560
559
561 def ancestor(self, c2, warn=False):
560 def ancestor(self, c2, warn=False):
562 """return the "best" ancestor context of self and c2
561 """return the "best" ancestor context of self and c2
563
562
564 If there are multiple candidates, it will show a message and check
563 If there are multiple candidates, it will show a message and check
565 merge.preferancestor configuration before falling back to the
564 merge.preferancestor configuration before falling back to the
566 revlog ancestor."""
565 revlog ancestor."""
567 # deal with workingctxs
566 # deal with workingctxs
568 n2 = c2._node
567 n2 = c2._node
569 if n2 is None:
568 if n2 is None:
570 n2 = c2._parents[0]._node
569 n2 = c2._parents[0]._node
571 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
570 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
572 if not cahs:
571 if not cahs:
573 anc = nullid
572 anc = nullid
574 elif len(cahs) == 1:
573 elif len(cahs) == 1:
575 anc = cahs[0]
574 anc = cahs[0]
576 else:
575 else:
577 # experimental config: merge.preferancestor
576 # experimental config: merge.preferancestor
578 for r in self._repo.ui.configlist('merge', 'preferancestor'):
577 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 try:
578 try:
580 ctx = scmutil.revsymbol(self._repo, r)
579 ctx = scmutil.revsymbol(self._repo, r)
581 except error.RepoLookupError:
580 except error.RepoLookupError:
582 continue
581 continue
583 anc = ctx.node()
582 anc = ctx.node()
584 if anc in cahs:
583 if anc in cahs:
585 break
584 break
586 else:
585 else:
587 anc = self._repo.changelog.ancestor(self._node, n2)
586 anc = self._repo.changelog.ancestor(self._node, n2)
588 if warn:
587 if warn:
589 self._repo.ui.status(
588 self._repo.ui.status(
590 (_("note: using %s as ancestor of %s and %s\n") %
589 (_("note: using %s as ancestor of %s and %s\n") %
591 (short(anc), short(self._node), short(n2))) +
590 (short(anc), short(self._node), short(n2))) +
592 ''.join(_(" alternatively, use --config "
591 ''.join(_(" alternatively, use --config "
593 "merge.preferancestor=%s\n") %
592 "merge.preferancestor=%s\n") %
594 short(n) for n in sorted(cahs) if n != anc))
593 short(n) for n in sorted(cahs) if n != anc))
595 return changectx(self._repo, anc)
594 return changectx(self._repo, anc)
596
595
597 def isancestorof(self, other):
596 def isancestorof(self, other):
598 """True if this changeset is an ancestor of other"""
597 """True if this changeset is an ancestor of other"""
599 return self._repo.changelog.isancestorrev(self._rev, other._rev)
598 return self._repo.changelog.isancestorrev(self._rev, other._rev)
600
599
601 def walk(self, match):
600 def walk(self, match):
602 '''Generates matching file names.'''
601 '''Generates matching file names.'''
603
602
604 # Wrap match.bad method to have message with nodeid
603 # Wrap match.bad method to have message with nodeid
605 def bad(fn, msg):
604 def bad(fn, msg):
606 # The manifest doesn't know about subrepos, so don't complain about
605 # The manifest doesn't know about subrepos, so don't complain about
607 # paths into valid subrepos.
606 # paths into valid subrepos.
608 if any(fn == s or fn.startswith(s + '/')
607 if any(fn == s or fn.startswith(s + '/')
609 for s in self.substate):
608 for s in self.substate):
610 return
609 return
611 match.bad(fn, _('no such file in rev %s') % self)
610 match.bad(fn, _('no such file in rev %s') % self)
612
611
613 m = matchmod.badmatch(match, bad)
612 m = matchmod.badmatch(match, bad)
614 return self._manifest.walk(m)
613 return self._manifest.walk(m)
615
614
616 def matches(self, match):
615 def matches(self, match):
617 return self.walk(match)
616 return self.walk(match)
618
617
619 class basefilectx(object):
618 class basefilectx(object):
620 """A filecontext object represents the common logic for its children:
619 """A filecontext object represents the common logic for its children:
621 filectx: read-only access to a filerevision that is already present
620 filectx: read-only access to a filerevision that is already present
622 in the repo,
621 in the repo,
623 workingfilectx: a filecontext that represents files from the working
622 workingfilectx: a filecontext that represents files from the working
624 directory,
623 directory,
625 memfilectx: a filecontext that represents files in-memory,
624 memfilectx: a filecontext that represents files in-memory,
626 """
625 """
627 @propertycache
626 @propertycache
628 def _filelog(self):
627 def _filelog(self):
629 return self._repo.file(self._path)
628 return self._repo.file(self._path)
630
629
631 @propertycache
630 @propertycache
632 def _changeid(self):
631 def _changeid(self):
633 if r'_changeid' in self.__dict__:
632 if r'_changeid' in self.__dict__:
634 return self._changeid
633 return self._changeid
635 elif r'_changectx' in self.__dict__:
634 elif r'_changectx' in self.__dict__:
636 return self._changectx.rev()
635 return self._changectx.rev()
637 elif r'_descendantrev' in self.__dict__:
636 elif r'_descendantrev' in self.__dict__:
638 # this file context was created from a revision with a known
637 # this file context was created from a revision with a known
639 # descendant, we can (lazily) correct for linkrev aliases
638 # descendant, we can (lazily) correct for linkrev aliases
640 return self._adjustlinkrev(self._descendantrev)
639 return self._adjustlinkrev(self._descendantrev)
641 else:
640 else:
642 return self._filelog.linkrev(self._filerev)
641 return self._filelog.linkrev(self._filerev)
643
642
644 @propertycache
643 @propertycache
645 def _filenode(self):
644 def _filenode(self):
646 if r'_fileid' in self.__dict__:
645 if r'_fileid' in self.__dict__:
647 return self._filelog.lookup(self._fileid)
646 return self._filelog.lookup(self._fileid)
648 else:
647 else:
649 return self._changectx.filenode(self._path)
648 return self._changectx.filenode(self._path)
650
649
651 @propertycache
650 @propertycache
652 def _filerev(self):
651 def _filerev(self):
653 return self._filelog.rev(self._filenode)
652 return self._filelog.rev(self._filenode)
654
653
655 @propertycache
654 @propertycache
656 def _repopath(self):
655 def _repopath(self):
657 return self._path
656 return self._path
658
657
659 def __nonzero__(self):
658 def __nonzero__(self):
660 try:
659 try:
661 self._filenode
660 self._filenode
662 return True
661 return True
663 except error.LookupError:
662 except error.LookupError:
664 # file is missing
663 # file is missing
665 return False
664 return False
666
665
667 __bool__ = __nonzero__
666 __bool__ = __nonzero__
668
667
669 def __bytes__(self):
668 def __bytes__(self):
670 try:
669 try:
671 return "%s@%s" % (self.path(), self._changectx)
670 return "%s@%s" % (self.path(), self._changectx)
672 except error.LookupError:
671 except error.LookupError:
673 return "%s@???" % self.path()
672 return "%s@???" % self.path()
674
673
675 __str__ = encoding.strmethod(__bytes__)
674 __str__ = encoding.strmethod(__bytes__)
676
675
677 def __repr__(self):
676 def __repr__(self):
678 return r"<%s %s>" % (type(self).__name__, str(self))
677 return r"<%s %s>" % (type(self).__name__, str(self))
679
678
680 def __hash__(self):
679 def __hash__(self):
681 try:
680 try:
682 return hash((self._path, self._filenode))
681 return hash((self._path, self._filenode))
683 except AttributeError:
682 except AttributeError:
684 return id(self)
683 return id(self)
685
684
686 def __eq__(self, other):
685 def __eq__(self, other):
687 try:
686 try:
688 return (type(self) == type(other) and self._path == other._path
687 return (type(self) == type(other) and self._path == other._path
689 and self._filenode == other._filenode)
688 and self._filenode == other._filenode)
690 except AttributeError:
689 except AttributeError:
691 return False
690 return False
692
691
693 def __ne__(self, other):
692 def __ne__(self, other):
694 return not (self == other)
693 return not (self == other)
695
694
696 def filerev(self):
695 def filerev(self):
697 return self._filerev
696 return self._filerev
698 def filenode(self):
697 def filenode(self):
699 return self._filenode
698 return self._filenode
700 @propertycache
699 @propertycache
701 def _flags(self):
700 def _flags(self):
702 return self._changectx.flags(self._path)
701 return self._changectx.flags(self._path)
703 def flags(self):
702 def flags(self):
704 return self._flags
703 return self._flags
705 def filelog(self):
704 def filelog(self):
706 return self._filelog
705 return self._filelog
707 def rev(self):
706 def rev(self):
708 return self._changeid
707 return self._changeid
709 def linkrev(self):
708 def linkrev(self):
710 return self._filelog.linkrev(self._filerev)
709 return self._filelog.linkrev(self._filerev)
711 def node(self):
710 def node(self):
712 return self._changectx.node()
711 return self._changectx.node()
713 def hex(self):
712 def hex(self):
714 return self._changectx.hex()
713 return self._changectx.hex()
715 def user(self):
714 def user(self):
716 return self._changectx.user()
715 return self._changectx.user()
717 def date(self):
716 def date(self):
718 return self._changectx.date()
717 return self._changectx.date()
719 def files(self):
718 def files(self):
720 return self._changectx.files()
719 return self._changectx.files()
721 def description(self):
720 def description(self):
722 return self._changectx.description()
721 return self._changectx.description()
723 def branch(self):
722 def branch(self):
724 return self._changectx.branch()
723 return self._changectx.branch()
725 def extra(self):
724 def extra(self):
726 return self._changectx.extra()
725 return self._changectx.extra()
727 def phase(self):
726 def phase(self):
728 return self._changectx.phase()
727 return self._changectx.phase()
729 def phasestr(self):
728 def phasestr(self):
730 return self._changectx.phasestr()
729 return self._changectx.phasestr()
731 def obsolete(self):
730 def obsolete(self):
732 return self._changectx.obsolete()
731 return self._changectx.obsolete()
733 def instabilities(self):
732 def instabilities(self):
734 return self._changectx.instabilities()
733 return self._changectx.instabilities()
735 def manifest(self):
734 def manifest(self):
736 return self._changectx.manifest()
735 return self._changectx.manifest()
737 def changectx(self):
736 def changectx(self):
738 return self._changectx
737 return self._changectx
739 def renamed(self):
738 def renamed(self):
740 return self._copied
739 return self._copied
741 def repo(self):
740 def repo(self):
742 return self._repo
741 return self._repo
743 def size(self):
742 def size(self):
744 return len(self.data())
743 return len(self.data())
745
744
746 def path(self):
745 def path(self):
747 return self._path
746 return self._path
748
747
749 def isbinary(self):
748 def isbinary(self):
750 try:
749 try:
751 return stringutil.binary(self.data())
750 return stringutil.binary(self.data())
752 except IOError:
751 except IOError:
753 return False
752 return False
754 def isexec(self):
753 def isexec(self):
755 return 'x' in self.flags()
754 return 'x' in self.flags()
756 def islink(self):
755 def islink(self):
757 return 'l' in self.flags()
756 return 'l' in self.flags()
758
757
759 def isabsent(self):
758 def isabsent(self):
760 """whether this filectx represents a file not in self._changectx
759 """whether this filectx represents a file not in self._changectx
761
760
762 This is mainly for merge code to detect change/delete conflicts. This is
761 This is mainly for merge code to detect change/delete conflicts. This is
763 expected to be True for all subclasses of basectx."""
762 expected to be True for all subclasses of basectx."""
764 return False
763 return False
765
764
766 _customcmp = False
765 _customcmp = False
767 def cmp(self, fctx):
766 def cmp(self, fctx):
768 """compare with other file context
767 """compare with other file context
769
768
770 returns True if different than fctx.
769 returns True if different than fctx.
771 """
770 """
772 if fctx._customcmp:
771 if fctx._customcmp:
773 return fctx.cmp(self)
772 return fctx.cmp(self)
774
773
775 if (fctx._filenode is None
774 if (fctx._filenode is None
776 and (self._repo._encodefilterpats
775 and (self._repo._encodefilterpats
777 # if file data starts with '\1\n', empty metadata block is
776 # if file data starts with '\1\n', empty metadata block is
778 # prepended, which adds 4 bytes to filelog.size().
777 # prepended, which adds 4 bytes to filelog.size().
779 or self.size() - 4 == fctx.size())
778 or self.size() - 4 == fctx.size())
780 or self.size() == fctx.size()):
779 or self.size() == fctx.size()):
781 return self._filelog.cmp(self._filenode, fctx.data())
780 return self._filelog.cmp(self._filenode, fctx.data())
782
781
783 return True
782 return True
784
783
785 def _adjustlinkrev(self, srcrev, inclusive=False):
784 def _adjustlinkrev(self, srcrev, inclusive=False):
786 """return the first ancestor of <srcrev> introducing <fnode>
785 """return the first ancestor of <srcrev> introducing <fnode>
787
786
788 If the linkrev of the file revision does not point to an ancestor of
787 If the linkrev of the file revision does not point to an ancestor of
789 srcrev, we'll walk down the ancestors until we find one introducing
788 srcrev, we'll walk down the ancestors until we find one introducing
790 this file revision.
789 this file revision.
791
790
792 :srcrev: the changeset revision we search ancestors from
791 :srcrev: the changeset revision we search ancestors from
793 :inclusive: if true, the src revision will also be checked
792 :inclusive: if true, the src revision will also be checked
794 """
793 """
795 repo = self._repo
794 repo = self._repo
796 cl = repo.unfiltered().changelog
795 cl = repo.unfiltered().changelog
797 mfl = repo.manifestlog
796 mfl = repo.manifestlog
798 # fetch the linkrev
797 # fetch the linkrev
799 lkr = self.linkrev()
798 lkr = self.linkrev()
800 # hack to reuse ancestor computation when searching for renames
799 # hack to reuse ancestor computation when searching for renames
801 memberanc = getattr(self, '_ancestrycontext', None)
800 memberanc = getattr(self, '_ancestrycontext', None)
802 iteranc = None
801 iteranc = None
803 if srcrev is None:
802 if srcrev is None:
804 # wctx case, used by workingfilectx during mergecopy
803 # wctx case, used by workingfilectx during mergecopy
805 revs = [p.rev() for p in self._repo[None].parents()]
804 revs = [p.rev() for p in self._repo[None].parents()]
806 inclusive = True # we skipped the real (revless) source
805 inclusive = True # we skipped the real (revless) source
807 else:
806 else:
808 revs = [srcrev]
807 revs = [srcrev]
809 if memberanc is None:
808 if memberanc is None:
810 memberanc = iteranc = cl.ancestors(revs, lkr,
809 memberanc = iteranc = cl.ancestors(revs, lkr,
811 inclusive=inclusive)
810 inclusive=inclusive)
812 # check if this linkrev is an ancestor of srcrev
811 # check if this linkrev is an ancestor of srcrev
813 if lkr not in memberanc:
812 if lkr not in memberanc:
814 if iteranc is None:
813 if iteranc is None:
815 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
814 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
816 fnode = self._filenode
815 fnode = self._filenode
817 path = self._path
816 path = self._path
818 for a in iteranc:
817 for a in iteranc:
819 ac = cl.read(a) # get changeset data (we avoid object creation)
818 ac = cl.read(a) # get changeset data (we avoid object creation)
820 if path in ac[3]: # checking the 'files' field.
819 if path in ac[3]: # checking the 'files' field.
821 # The file has been touched, check if the content is
820 # The file has been touched, check if the content is
822 # similar to the one we search for.
821 # similar to the one we search for.
823 if fnode == mfl[ac[0]].readfast().get(path):
822 if fnode == mfl[ac[0]].readfast().get(path):
824 return a
823 return a
825 # In theory, we should never get out of that loop without a result.
824 # In theory, we should never get out of that loop without a result.
826 # But if manifest uses a buggy file revision (not children of the
825 # But if manifest uses a buggy file revision (not children of the
827 # one it replaces) we could. Such a buggy situation will likely
826 # one it replaces) we could. Such a buggy situation will likely
828 # result is crash somewhere else at to some point.
827 # result is crash somewhere else at to some point.
829 return lkr
828 return lkr
830
829
831 def introrev(self):
830 def introrev(self):
832 """return the rev of the changeset which introduced this file revision
831 """return the rev of the changeset which introduced this file revision
833
832
834 This method is different from linkrev because it take into account the
833 This method is different from linkrev because it take into account the
835 changeset the filectx was created from. It ensures the returned
834 changeset the filectx was created from. It ensures the returned
836 revision is one of its ancestors. This prevents bugs from
835 revision is one of its ancestors. This prevents bugs from
837 'linkrev-shadowing' when a file revision is used by multiple
836 'linkrev-shadowing' when a file revision is used by multiple
838 changesets.
837 changesets.
839 """
838 """
840 lkr = self.linkrev()
839 lkr = self.linkrev()
841 attrs = vars(self)
840 attrs = vars(self)
842 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
841 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
843 if noctx or self.rev() == lkr:
842 if noctx or self.rev() == lkr:
844 return self.linkrev()
843 return self.linkrev()
845 return self._adjustlinkrev(self.rev(), inclusive=True)
844 return self._adjustlinkrev(self.rev(), inclusive=True)
846
845
847 def introfilectx(self):
846 def introfilectx(self):
848 """Return filectx having identical contents, but pointing to the
847 """Return filectx having identical contents, but pointing to the
849 changeset revision where this filectx was introduced"""
848 changeset revision where this filectx was introduced"""
850 introrev = self.introrev()
849 introrev = self.introrev()
851 if self.rev() == introrev:
850 if self.rev() == introrev:
852 return self
851 return self
853 return self.filectx(self.filenode(), changeid=introrev)
852 return self.filectx(self.filenode(), changeid=introrev)
854
853
855 def _parentfilectx(self, path, fileid, filelog):
854 def _parentfilectx(self, path, fileid, filelog):
856 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
855 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
857 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
856 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
858 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
857 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
859 # If self is associated with a changeset (probably explicitly
858 # If self is associated with a changeset (probably explicitly
860 # fed), ensure the created filectx is associated with a
859 # fed), ensure the created filectx is associated with a
861 # changeset that is an ancestor of self.changectx.
860 # changeset that is an ancestor of self.changectx.
862 # This lets us later use _adjustlinkrev to get a correct link.
861 # This lets us later use _adjustlinkrev to get a correct link.
863 fctx._descendantrev = self.rev()
862 fctx._descendantrev = self.rev()
864 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
863 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 elif r'_descendantrev' in vars(self):
864 elif r'_descendantrev' in vars(self):
866 # Otherwise propagate _descendantrev if we have one associated.
865 # Otherwise propagate _descendantrev if we have one associated.
867 fctx._descendantrev = self._descendantrev
866 fctx._descendantrev = self._descendantrev
868 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
867 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
869 return fctx
868 return fctx
870
869
871 def parents(self):
870 def parents(self):
872 _path = self._path
871 _path = self._path
873 fl = self._filelog
872 fl = self._filelog
874 parents = self._filelog.parents(self._filenode)
873 parents = self._filelog.parents(self._filenode)
875 pl = [(_path, node, fl) for node in parents if node != nullid]
874 pl = [(_path, node, fl) for node in parents if node != nullid]
876
875
877 r = fl.renamed(self._filenode)
876 r = fl.renamed(self._filenode)
878 if r:
877 if r:
879 # - In the simple rename case, both parent are nullid, pl is empty.
878 # - In the simple rename case, both parent are nullid, pl is empty.
880 # - In case of merge, only one of the parent is null id and should
879 # - In case of merge, only one of the parent is null id and should
881 # be replaced with the rename information. This parent is -always-
880 # be replaced with the rename information. This parent is -always-
882 # the first one.
881 # the first one.
883 #
882 #
884 # As null id have always been filtered out in the previous list
883 # As null id have always been filtered out in the previous list
885 # comprehension, inserting to 0 will always result in "replacing
884 # comprehension, inserting to 0 will always result in "replacing
886 # first nullid parent with rename information.
885 # first nullid parent with rename information.
887 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
886 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
888
887
889 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
888 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
890
889
891 def p1(self):
890 def p1(self):
892 return self.parents()[0]
891 return self.parents()[0]
893
892
894 def p2(self):
893 def p2(self):
895 p = self.parents()
894 p = self.parents()
896 if len(p) == 2:
895 if len(p) == 2:
897 return p[1]
896 return p[1]
898 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
897 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
899
898
900 def annotate(self, follow=False, skiprevs=None, diffopts=None):
899 def annotate(self, follow=False, skiprevs=None, diffopts=None):
901 """Returns a list of annotateline objects for each line in the file
900 """Returns a list of annotateline objects for each line in the file
902
901
903 - line.fctx is the filectx of the node where that line was last changed
902 - line.fctx is the filectx of the node where that line was last changed
904 - line.lineno is the line number at the first appearance in the managed
903 - line.lineno is the line number at the first appearance in the managed
905 file
904 file
906 - line.text is the data on that line (including newline character)
905 - line.text is the data on that line (including newline character)
907 """
906 """
908 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
907 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
909
908
910 def parents(f):
909 def parents(f):
911 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
910 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
912 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
911 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
913 # from the topmost introrev (= srcrev) down to p.linkrev() if it
912 # from the topmost introrev (= srcrev) down to p.linkrev() if it
914 # isn't an ancestor of the srcrev.
913 # isn't an ancestor of the srcrev.
915 f._changeid
914 f._changeid
916 pl = f.parents()
915 pl = f.parents()
917
916
918 # Don't return renamed parents if we aren't following.
917 # Don't return renamed parents if we aren't following.
919 if not follow:
918 if not follow:
920 pl = [p for p in pl if p.path() == f.path()]
919 pl = [p for p in pl if p.path() == f.path()]
921
920
922 # renamed filectx won't have a filelog yet, so set it
921 # renamed filectx won't have a filelog yet, so set it
923 # from the cache to save time
922 # from the cache to save time
924 for p in pl:
923 for p in pl:
925 if not r'_filelog' in p.__dict__:
924 if not r'_filelog' in p.__dict__:
926 p._filelog = getlog(p.path())
925 p._filelog = getlog(p.path())
927
926
928 return pl
927 return pl
929
928
930 # use linkrev to find the first changeset where self appeared
929 # use linkrev to find the first changeset where self appeared
931 base = self.introfilectx()
930 base = self.introfilectx()
932 if getattr(base, '_ancestrycontext', None) is None:
931 if getattr(base, '_ancestrycontext', None) is None:
933 cl = self._repo.changelog
932 cl = self._repo.changelog
934 if base.rev() is None:
933 if base.rev() is None:
935 # wctx is not inclusive, but works because _ancestrycontext
934 # wctx is not inclusive, but works because _ancestrycontext
936 # is used to test filelog revisions
935 # is used to test filelog revisions
937 ac = cl.ancestors([p.rev() for p in base.parents()],
936 ac = cl.ancestors([p.rev() for p in base.parents()],
938 inclusive=True)
937 inclusive=True)
939 else:
938 else:
940 ac = cl.ancestors([base.rev()], inclusive=True)
939 ac = cl.ancestors([base.rev()], inclusive=True)
941 base._ancestrycontext = ac
940 base._ancestrycontext = ac
942
941
943 return dagop.annotate(base, parents, skiprevs=skiprevs,
942 return dagop.annotate(base, parents, skiprevs=skiprevs,
944 diffopts=diffopts)
943 diffopts=diffopts)
945
944
946 def ancestors(self, followfirst=False):
945 def ancestors(self, followfirst=False):
947 visit = {}
946 visit = {}
948 c = self
947 c = self
949 if followfirst:
948 if followfirst:
950 cut = 1
949 cut = 1
951 else:
950 else:
952 cut = None
951 cut = None
953
952
954 while True:
953 while True:
955 for parent in c.parents()[:cut]:
954 for parent in c.parents()[:cut]:
956 visit[(parent.linkrev(), parent.filenode())] = parent
955 visit[(parent.linkrev(), parent.filenode())] = parent
957 if not visit:
956 if not visit:
958 break
957 break
959 c = visit.pop(max(visit))
958 c = visit.pop(max(visit))
960 yield c
959 yield c
961
960
962 def decodeddata(self):
961 def decodeddata(self):
963 """Returns `data()` after running repository decoding filters.
962 """Returns `data()` after running repository decoding filters.
964
963
965 This is often equivalent to how the data would be expressed on disk.
964 This is often equivalent to how the data would be expressed on disk.
966 """
965 """
967 return self._repo.wwritedata(self.path(), self.data())
966 return self._repo.wwritedata(self.path(), self.data())
968
967
969 class filectx(basefilectx):
968 class filectx(basefilectx):
970 """A filecontext object makes access to data related to a particular
969 """A filecontext object makes access to data related to a particular
971 filerevision convenient."""
970 filerevision convenient."""
972 def __init__(self, repo, path, changeid=None, fileid=None,
971 def __init__(self, repo, path, changeid=None, fileid=None,
973 filelog=None, changectx=None):
972 filelog=None, changectx=None):
974 """changeid can be a changeset revision, node, or tag.
973 """changeid can be a changeset revision, node, or tag.
975 fileid can be a file revision or node."""
974 fileid can be a file revision or node."""
976 self._repo = repo
975 self._repo = repo
977 self._path = path
976 self._path = path
978
977
979 assert (changeid is not None
978 assert (changeid is not None
980 or fileid is not None
979 or fileid is not None
981 or changectx is not None), \
980 or changectx is not None), \
982 ("bad args: changeid=%r, fileid=%r, changectx=%r"
981 ("bad args: changeid=%r, fileid=%r, changectx=%r"
983 % (changeid, fileid, changectx))
982 % (changeid, fileid, changectx))
984
983
985 if filelog is not None:
984 if filelog is not None:
986 self._filelog = filelog
985 self._filelog = filelog
987
986
988 if changeid is not None:
987 if changeid is not None:
989 self._changeid = changeid
988 self._changeid = changeid
990 if changectx is not None:
989 if changectx is not None:
991 self._changectx = changectx
990 self._changectx = changectx
992 if fileid is not None:
991 if fileid is not None:
993 self._fileid = fileid
992 self._fileid = fileid
994
993
995 @propertycache
994 @propertycache
996 def _changectx(self):
995 def _changectx(self):
997 try:
996 try:
998 return changectx(self._repo, self._changeid)
997 return changectx(self._repo, self._changeid)
999 except error.FilteredRepoLookupError:
998 except error.FilteredRepoLookupError:
1000 # Linkrev may point to any revision in the repository. When the
999 # Linkrev may point to any revision in the repository. When the
1001 # repository is filtered this may lead to `filectx` trying to build
1000 # repository is filtered this may lead to `filectx` trying to build
1002 # `changectx` for filtered revision. In such case we fallback to
1001 # `changectx` for filtered revision. In such case we fallback to
1003 # creating `changectx` on the unfiltered version of the reposition.
1002 # creating `changectx` on the unfiltered version of the reposition.
1004 # This fallback should not be an issue because `changectx` from
1003 # This fallback should not be an issue because `changectx` from
1005 # `filectx` are not used in complex operations that care about
1004 # `filectx` are not used in complex operations that care about
1006 # filtering.
1005 # filtering.
1007 #
1006 #
1008 # This fallback is a cheap and dirty fix that prevent several
1007 # This fallback is a cheap and dirty fix that prevent several
1009 # crashes. It does not ensure the behavior is correct. However the
1008 # crashes. It does not ensure the behavior is correct. However the
1010 # behavior was not correct before filtering either and "incorrect
1009 # behavior was not correct before filtering either and "incorrect
1011 # behavior" is seen as better as "crash"
1010 # behavior" is seen as better as "crash"
1012 #
1011 #
1013 # Linkrevs have several serious troubles with filtering that are
1012 # Linkrevs have several serious troubles with filtering that are
1014 # complicated to solve. Proper handling of the issue here should be
1013 # complicated to solve. Proper handling of the issue here should be
1015 # considered when solving linkrev issue are on the table.
1014 # considered when solving linkrev issue are on the table.
1016 return changectx(self._repo.unfiltered(), self._changeid)
1015 return changectx(self._repo.unfiltered(), self._changeid)
1017
1016
1018 def filectx(self, fileid, changeid=None):
1017 def filectx(self, fileid, changeid=None):
1019 '''opens an arbitrary revision of the file without
1018 '''opens an arbitrary revision of the file without
1020 opening a new filelog'''
1019 opening a new filelog'''
1021 return filectx(self._repo, self._path, fileid=fileid,
1020 return filectx(self._repo, self._path, fileid=fileid,
1022 filelog=self._filelog, changeid=changeid)
1021 filelog=self._filelog, changeid=changeid)
1023
1022
1024 def rawdata(self):
1023 def rawdata(self):
1025 return self._filelog.revision(self._filenode, raw=True)
1024 return self._filelog.revision(self._filenode, raw=True)
1026
1025
1027 def rawflags(self):
1026 def rawflags(self):
1028 """low-level revlog flags"""
1027 """low-level revlog flags"""
1029 return self._filelog.flags(self._filerev)
1028 return self._filelog.flags(self._filerev)
1030
1029
1031 def data(self):
1030 def data(self):
1032 try:
1031 try:
1033 return self._filelog.read(self._filenode)
1032 return self._filelog.read(self._filenode)
1034 except error.CensoredNodeError:
1033 except error.CensoredNodeError:
1035 if self._repo.ui.config("censor", "policy") == "ignore":
1034 if self._repo.ui.config("censor", "policy") == "ignore":
1036 return ""
1035 return ""
1037 raise error.Abort(_("censored node: %s") % short(self._filenode),
1036 raise error.Abort(_("censored node: %s") % short(self._filenode),
1038 hint=_("set censor.policy to ignore errors"))
1037 hint=_("set censor.policy to ignore errors"))
1039
1038
1040 def size(self):
1039 def size(self):
1041 return self._filelog.size(self._filerev)
1040 return self._filelog.size(self._filerev)
1042
1041
1043 @propertycache
1042 @propertycache
1044 def _copied(self):
1043 def _copied(self):
1045 """check if file was actually renamed in this changeset revision
1044 """check if file was actually renamed in this changeset revision
1046
1045
1047 If rename logged in file revision, we report copy for changeset only
1046 If rename logged in file revision, we report copy for changeset only
1048 if file revisions linkrev points back to the changeset in question
1047 if file revisions linkrev points back to the changeset in question
1049 or both changeset parents contain different file revisions.
1048 or both changeset parents contain different file revisions.
1050 """
1049 """
1051
1050
1052 renamed = self._filelog.renamed(self._filenode)
1051 renamed = self._filelog.renamed(self._filenode)
1053 if not renamed:
1052 if not renamed:
1054 return None
1053 return None
1055
1054
1056 if self.rev() == self.linkrev():
1055 if self.rev() == self.linkrev():
1057 return renamed
1056 return renamed
1058
1057
1059 name = self.path()
1058 name = self.path()
1060 fnode = self._filenode
1059 fnode = self._filenode
1061 for p in self._changectx.parents():
1060 for p in self._changectx.parents():
1062 try:
1061 try:
1063 if fnode == p.filenode(name):
1062 if fnode == p.filenode(name):
1064 return None
1063 return None
1065 except error.LookupError:
1064 except error.LookupError:
1066 pass
1065 pass
1067 return renamed
1066 return renamed
1068
1067
1069 def children(self):
1068 def children(self):
1070 # hard for renames
1069 # hard for renames
1071 c = self._filelog.children(self._filenode)
1070 c = self._filelog.children(self._filenode)
1072 return [filectx(self._repo, self._path, fileid=x,
1071 return [filectx(self._repo, self._path, fileid=x,
1073 filelog=self._filelog) for x in c]
1072 filelog=self._filelog) for x in c]
1074
1073
1075 class committablectx(basectx):
1074 class committablectx(basectx):
1076 """A committablectx object provides common functionality for a context that
1075 """A committablectx object provides common functionality for a context that
1077 wants the ability to commit, e.g. workingctx or memctx."""
1076 wants the ability to commit, e.g. workingctx or memctx."""
1078 def __init__(self, repo, text="", user=None, date=None, extra=None,
1077 def __init__(self, repo, text="", user=None, date=None, extra=None,
1079 changes=None):
1078 changes=None):
1080 super(committablectx, self).__init__(repo)
1079 super(committablectx, self).__init__(repo)
1081 self._rev = None
1080 self._rev = None
1082 self._node = None
1081 self._node = None
1083 self._text = text
1082 self._text = text
1084 if date:
1083 if date:
1085 self._date = dateutil.parsedate(date)
1084 self._date = dateutil.parsedate(date)
1086 if user:
1085 if user:
1087 self._user = user
1086 self._user = user
1088 if changes:
1087 if changes:
1089 self._status = changes
1088 self._status = changes
1090
1089
1091 self._extra = {}
1090 self._extra = {}
1092 if extra:
1091 if extra:
1093 self._extra = extra.copy()
1092 self._extra = extra.copy()
1094 if 'branch' not in self._extra:
1093 if 'branch' not in self._extra:
1095 try:
1094 try:
1096 branch = encoding.fromlocal(self._repo.dirstate.branch())
1095 branch = encoding.fromlocal(self._repo.dirstate.branch())
1097 except UnicodeDecodeError:
1096 except UnicodeDecodeError:
1098 raise error.Abort(_('branch name not in UTF-8!'))
1097 raise error.Abort(_('branch name not in UTF-8!'))
1099 self._extra['branch'] = branch
1098 self._extra['branch'] = branch
1100 if self._extra['branch'] == '':
1099 if self._extra['branch'] == '':
1101 self._extra['branch'] = 'default'
1100 self._extra['branch'] = 'default'
1102
1101
1103 def __bytes__(self):
1102 def __bytes__(self):
1104 return bytes(self._parents[0]) + "+"
1103 return bytes(self._parents[0]) + "+"
1105
1104
1106 __str__ = encoding.strmethod(__bytes__)
1105 __str__ = encoding.strmethod(__bytes__)
1107
1106
1108 def __nonzero__(self):
1107 def __nonzero__(self):
1109 return True
1108 return True
1110
1109
1111 __bool__ = __nonzero__
1110 __bool__ = __nonzero__
1112
1111
1113 def _buildflagfunc(self):
1112 def _buildflagfunc(self):
1114 # Create a fallback function for getting file flags when the
1113 # Create a fallback function for getting file flags when the
1115 # filesystem doesn't support them
1114 # filesystem doesn't support them
1116
1115
1117 copiesget = self._repo.dirstate.copies().get
1116 copiesget = self._repo.dirstate.copies().get
1118 parents = self.parents()
1117 parents = self.parents()
1119 if len(parents) < 2:
1118 if len(parents) < 2:
1120 # when we have one parent, it's easy: copy from parent
1119 # when we have one parent, it's easy: copy from parent
1121 man = parents[0].manifest()
1120 man = parents[0].manifest()
1122 def func(f):
1121 def func(f):
1123 f = copiesget(f, f)
1122 f = copiesget(f, f)
1124 return man.flags(f)
1123 return man.flags(f)
1125 else:
1124 else:
1126 # merges are tricky: we try to reconstruct the unstored
1125 # merges are tricky: we try to reconstruct the unstored
1127 # result from the merge (issue1802)
1126 # result from the merge (issue1802)
1128 p1, p2 = parents
1127 p1, p2 = parents
1129 pa = p1.ancestor(p2)
1128 pa = p1.ancestor(p2)
1130 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1129 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1131
1130
1132 def func(f):
1131 def func(f):
1133 f = copiesget(f, f) # may be wrong for merges with copies
1132 f = copiesget(f, f) # may be wrong for merges with copies
1134 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1133 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1135 if fl1 == fl2:
1134 if fl1 == fl2:
1136 return fl1
1135 return fl1
1137 if fl1 == fla:
1136 if fl1 == fla:
1138 return fl2
1137 return fl2
1139 if fl2 == fla:
1138 if fl2 == fla:
1140 return fl1
1139 return fl1
1141 return '' # punt for conflicts
1140 return '' # punt for conflicts
1142
1141
1143 return func
1142 return func
1144
1143
1145 @propertycache
1144 @propertycache
1146 def _flagfunc(self):
1145 def _flagfunc(self):
1147 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1146 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1148
1147
1149 @propertycache
1148 @propertycache
1150 def _status(self):
1149 def _status(self):
1151 return self._repo.status()
1150 return self._repo.status()
1152
1151
1153 @propertycache
1152 @propertycache
1154 def _user(self):
1153 def _user(self):
1155 return self._repo.ui.username()
1154 return self._repo.ui.username()
1156
1155
1157 @propertycache
1156 @propertycache
1158 def _date(self):
1157 def _date(self):
1159 ui = self._repo.ui
1158 ui = self._repo.ui
1160 date = ui.configdate('devel', 'default-date')
1159 date = ui.configdate('devel', 'default-date')
1161 if date is None:
1160 if date is None:
1162 date = dateutil.makedate()
1161 date = dateutil.makedate()
1163 return date
1162 return date
1164
1163
1165 def subrev(self, subpath):
1164 def subrev(self, subpath):
1166 return None
1165 return None
1167
1166
1168 def manifestnode(self):
1167 def manifestnode(self):
1169 return None
1168 return None
1170 def user(self):
1169 def user(self):
1171 return self._user or self._repo.ui.username()
1170 return self._user or self._repo.ui.username()
1172 def date(self):
1171 def date(self):
1173 return self._date
1172 return self._date
1174 def description(self):
1173 def description(self):
1175 return self._text
1174 return self._text
1176 def files(self):
1175 def files(self):
1177 return sorted(self._status.modified + self._status.added +
1176 return sorted(self._status.modified + self._status.added +
1178 self._status.removed)
1177 self._status.removed)
1179
1178
1180 def modified(self):
1179 def modified(self):
1181 return self._status.modified
1180 return self._status.modified
1182 def added(self):
1181 def added(self):
1183 return self._status.added
1182 return self._status.added
1184 def removed(self):
1183 def removed(self):
1185 return self._status.removed
1184 return self._status.removed
1186 def deleted(self):
1185 def deleted(self):
1187 return self._status.deleted
1186 return self._status.deleted
1188 def branch(self):
1187 def branch(self):
1189 return encoding.tolocal(self._extra['branch'])
1188 return encoding.tolocal(self._extra['branch'])
1190 def closesbranch(self):
1189 def closesbranch(self):
1191 return 'close' in self._extra
1190 return 'close' in self._extra
1192 def extra(self):
1191 def extra(self):
1193 return self._extra
1192 return self._extra
1194
1193
1195 def isinmemory(self):
1194 def isinmemory(self):
1196 return False
1195 return False
1197
1196
1198 def tags(self):
1197 def tags(self):
1199 return []
1198 return []
1200
1199
1201 def bookmarks(self):
1200 def bookmarks(self):
1202 b = []
1201 b = []
1203 for p in self.parents():
1202 for p in self.parents():
1204 b.extend(p.bookmarks())
1203 b.extend(p.bookmarks())
1205 return b
1204 return b
1206
1205
1207 def phase(self):
1206 def phase(self):
1208 phase = phases.draft # default phase to draft
1207 phase = phases.draft # default phase to draft
1209 for p in self.parents():
1208 for p in self.parents():
1210 phase = max(phase, p.phase())
1209 phase = max(phase, p.phase())
1211 return phase
1210 return phase
1212
1211
1213 def hidden(self):
1212 def hidden(self):
1214 return False
1213 return False
1215
1214
1216 def children(self):
1215 def children(self):
1217 return []
1216 return []
1218
1217
1219 def flags(self, path):
1218 def flags(self, path):
1220 if r'_manifest' in self.__dict__:
1219 if r'_manifest' in self.__dict__:
1221 try:
1220 try:
1222 return self._manifest.flags(path)
1221 return self._manifest.flags(path)
1223 except KeyError:
1222 except KeyError:
1224 return ''
1223 return ''
1225
1224
1226 try:
1225 try:
1227 return self._flagfunc(path)
1226 return self._flagfunc(path)
1228 except OSError:
1227 except OSError:
1229 return ''
1228 return ''
1230
1229
1231 def ancestor(self, c2):
1230 def ancestor(self, c2):
1232 """return the "best" ancestor context of self and c2"""
1231 """return the "best" ancestor context of self and c2"""
1233 return self._parents[0].ancestor(c2) # punt on two parents for now
1232 return self._parents[0].ancestor(c2) # punt on two parents for now
1234
1233
1235 def walk(self, match):
1234 def walk(self, match):
1236 '''Generates matching file names.'''
1235 '''Generates matching file names.'''
1237 return sorted(self._repo.dirstate.walk(match,
1236 return sorted(self._repo.dirstate.walk(match,
1238 subrepos=sorted(self.substate),
1237 subrepos=sorted(self.substate),
1239 unknown=True, ignored=False))
1238 unknown=True, ignored=False))
1240
1239
1241 def matches(self, match):
1240 def matches(self, match):
1242 ds = self._repo.dirstate
1241 ds = self._repo.dirstate
1243 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1242 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1244
1243
1245 def ancestors(self):
1244 def ancestors(self):
1246 for p in self._parents:
1245 for p in self._parents:
1247 yield p
1246 yield p
1248 for a in self._repo.changelog.ancestors(
1247 for a in self._repo.changelog.ancestors(
1249 [p.rev() for p in self._parents]):
1248 [p.rev() for p in self._parents]):
1250 yield changectx(self._repo, a)
1249 yield changectx(self._repo, a)
1251
1250
1252 def markcommitted(self, node):
1251 def markcommitted(self, node):
1253 """Perform post-commit cleanup necessary after committing this ctx
1252 """Perform post-commit cleanup necessary after committing this ctx
1254
1253
1255 Specifically, this updates backing stores this working context
1254 Specifically, this updates backing stores this working context
1256 wraps to reflect the fact that the changes reflected by this
1255 wraps to reflect the fact that the changes reflected by this
1257 workingctx have been committed. For example, it marks
1256 workingctx have been committed. For example, it marks
1258 modified and added files as normal in the dirstate.
1257 modified and added files as normal in the dirstate.
1259
1258
1260 """
1259 """
1261
1260
1262 with self._repo.dirstate.parentchange():
1261 with self._repo.dirstate.parentchange():
1263 for f in self.modified() + self.added():
1262 for f in self.modified() + self.added():
1264 self._repo.dirstate.normal(f)
1263 self._repo.dirstate.normal(f)
1265 for f in self.removed():
1264 for f in self.removed():
1266 self._repo.dirstate.drop(f)
1265 self._repo.dirstate.drop(f)
1267 self._repo.dirstate.setparents(node)
1266 self._repo.dirstate.setparents(node)
1268
1267
1269 # write changes out explicitly, because nesting wlock at
1268 # write changes out explicitly, because nesting wlock at
1270 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1269 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1271 # from immediately doing so for subsequent changing files
1270 # from immediately doing so for subsequent changing files
1272 self._repo.dirstate.write(self._repo.currenttransaction())
1271 self._repo.dirstate.write(self._repo.currenttransaction())
1273
1272
1274 def dirty(self, missing=False, merge=True, branch=True):
1273 def dirty(self, missing=False, merge=True, branch=True):
1275 return False
1274 return False
1276
1275
1277 class workingctx(committablectx):
1276 class workingctx(committablectx):
1278 """A workingctx object makes access to data related to
1277 """A workingctx object makes access to data related to
1279 the current working directory convenient.
1278 the current working directory convenient.
1280 date - any valid date string or (unixtime, offset), or None.
1279 date - any valid date string or (unixtime, offset), or None.
1281 user - username string, or None.
1280 user - username string, or None.
1282 extra - a dictionary of extra values, or None.
1281 extra - a dictionary of extra values, or None.
1283 changes - a list of file lists as returned by localrepo.status()
1282 changes - a list of file lists as returned by localrepo.status()
1284 or None to use the repository status.
1283 or None to use the repository status.
1285 """
1284 """
1286 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1287 changes=None):
1286 changes=None):
1288 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1287 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1289
1288
1290 def __iter__(self):
1289 def __iter__(self):
1291 d = self._repo.dirstate
1290 d = self._repo.dirstate
1292 for f in d:
1291 for f in d:
1293 if d[f] != 'r':
1292 if d[f] != 'r':
1294 yield f
1293 yield f
1295
1294
1296 def __contains__(self, key):
1295 def __contains__(self, key):
1297 return self._repo.dirstate[key] not in "?r"
1296 return self._repo.dirstate[key] not in "?r"
1298
1297
1299 def hex(self):
1298 def hex(self):
1300 return hex(wdirid)
1299 return hex(wdirid)
1301
1300
1302 @propertycache
1301 @propertycache
1303 def _parents(self):
1302 def _parents(self):
1304 p = self._repo.dirstate.parents()
1303 p = self._repo.dirstate.parents()
1305 if p[1] == nullid:
1304 if p[1] == nullid:
1306 p = p[:-1]
1305 p = p[:-1]
1307 return [changectx(self._repo, x) for x in p]
1306 return [changectx(self._repo, x) for x in p]
1308
1307
1309 def _fileinfo(self, path):
1308 def _fileinfo(self, path):
1310 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1309 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1311 self._manifest
1310 self._manifest
1312 return super(workingctx, self)._fileinfo(path)
1311 return super(workingctx, self)._fileinfo(path)
1313
1312
1314 def filectx(self, path, filelog=None):
1313 def filectx(self, path, filelog=None):
1315 """get a file context from the working directory"""
1314 """get a file context from the working directory"""
1316 return workingfilectx(self._repo, path, workingctx=self,
1315 return workingfilectx(self._repo, path, workingctx=self,
1317 filelog=filelog)
1316 filelog=filelog)
1318
1317
1319 def dirty(self, missing=False, merge=True, branch=True):
1318 def dirty(self, missing=False, merge=True, branch=True):
1320 "check whether a working directory is modified"
1319 "check whether a working directory is modified"
1321 # check subrepos first
1320 # check subrepos first
1322 for s in sorted(self.substate):
1321 for s in sorted(self.substate):
1323 if self.sub(s).dirty(missing=missing):
1322 if self.sub(s).dirty(missing=missing):
1324 return True
1323 return True
1325 # check current working dir
1324 # check current working dir
1326 return ((merge and self.p2()) or
1325 return ((merge and self.p2()) or
1327 (branch and self.branch() != self.p1().branch()) or
1326 (branch and self.branch() != self.p1().branch()) or
1328 self.modified() or self.added() or self.removed() or
1327 self.modified() or self.added() or self.removed() or
1329 (missing and self.deleted()))
1328 (missing and self.deleted()))
1330
1329
1331 def add(self, list, prefix=""):
1330 def add(self, list, prefix=""):
1332 with self._repo.wlock():
1331 with self._repo.wlock():
1333 ui, ds = self._repo.ui, self._repo.dirstate
1332 ui, ds = self._repo.ui, self._repo.dirstate
1334 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1333 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1335 rejected = []
1334 rejected = []
1336 lstat = self._repo.wvfs.lstat
1335 lstat = self._repo.wvfs.lstat
1337 for f in list:
1336 for f in list:
1338 # ds.pathto() returns an absolute file when this is invoked from
1337 # ds.pathto() returns an absolute file when this is invoked from
1339 # the keyword extension. That gets flagged as non-portable on
1338 # the keyword extension. That gets flagged as non-portable on
1340 # Windows, since it contains the drive letter and colon.
1339 # Windows, since it contains the drive letter and colon.
1341 scmutil.checkportable(ui, os.path.join(prefix, f))
1340 scmutil.checkportable(ui, os.path.join(prefix, f))
1342 try:
1341 try:
1343 st = lstat(f)
1342 st = lstat(f)
1344 except OSError:
1343 except OSError:
1345 ui.warn(_("%s does not exist!\n") % uipath(f))
1344 ui.warn(_("%s does not exist!\n") % uipath(f))
1346 rejected.append(f)
1345 rejected.append(f)
1347 continue
1346 continue
1348 limit = ui.configbytes('ui', 'large-file-limit')
1347 limit = ui.configbytes('ui', 'large-file-limit')
1349 if limit != 0 and st.st_size > limit:
1348 if limit != 0 and st.st_size > limit:
1350 ui.warn(_("%s: up to %d MB of RAM may be required "
1349 ui.warn(_("%s: up to %d MB of RAM may be required "
1351 "to manage this file\n"
1350 "to manage this file\n"
1352 "(use 'hg revert %s' to cancel the "
1351 "(use 'hg revert %s' to cancel the "
1353 "pending addition)\n")
1352 "pending addition)\n")
1354 % (f, 3 * st.st_size // 1000000, uipath(f)))
1353 % (f, 3 * st.st_size // 1000000, uipath(f)))
1355 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1354 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1356 ui.warn(_("%s not added: only files and symlinks "
1355 ui.warn(_("%s not added: only files and symlinks "
1357 "supported currently\n") % uipath(f))
1356 "supported currently\n") % uipath(f))
1358 rejected.append(f)
1357 rejected.append(f)
1359 elif ds[f] in 'amn':
1358 elif ds[f] in 'amn':
1360 ui.warn(_("%s already tracked!\n") % uipath(f))
1359 ui.warn(_("%s already tracked!\n") % uipath(f))
1361 elif ds[f] == 'r':
1360 elif ds[f] == 'r':
1362 ds.normallookup(f)
1361 ds.normallookup(f)
1363 else:
1362 else:
1364 ds.add(f)
1363 ds.add(f)
1365 return rejected
1364 return rejected
1366
1365
1367 def forget(self, files, prefix=""):
1366 def forget(self, files, prefix=""):
1368 with self._repo.wlock():
1367 with self._repo.wlock():
1369 ds = self._repo.dirstate
1368 ds = self._repo.dirstate
1370 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1369 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1371 rejected = []
1370 rejected = []
1372 for f in files:
1371 for f in files:
1373 if f not in self._repo.dirstate:
1372 if f not in self._repo.dirstate:
1374 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1373 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1375 rejected.append(f)
1374 rejected.append(f)
1376 elif self._repo.dirstate[f] != 'a':
1375 elif self._repo.dirstate[f] != 'a':
1377 self._repo.dirstate.remove(f)
1376 self._repo.dirstate.remove(f)
1378 else:
1377 else:
1379 self._repo.dirstate.drop(f)
1378 self._repo.dirstate.drop(f)
1380 return rejected
1379 return rejected
1381
1380
1382 def undelete(self, list):
1381 def undelete(self, list):
1383 pctxs = self.parents()
1382 pctxs = self.parents()
1384 with self._repo.wlock():
1383 with self._repo.wlock():
1385 ds = self._repo.dirstate
1384 ds = self._repo.dirstate
1386 for f in list:
1385 for f in list:
1387 if self._repo.dirstate[f] != 'r':
1386 if self._repo.dirstate[f] != 'r':
1388 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1387 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1389 else:
1388 else:
1390 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1389 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1391 t = fctx.data()
1390 t = fctx.data()
1392 self._repo.wwrite(f, t, fctx.flags())
1391 self._repo.wwrite(f, t, fctx.flags())
1393 self._repo.dirstate.normal(f)
1392 self._repo.dirstate.normal(f)
1394
1393
1395 def copy(self, source, dest):
1394 def copy(self, source, dest):
1396 try:
1395 try:
1397 st = self._repo.wvfs.lstat(dest)
1396 st = self._repo.wvfs.lstat(dest)
1398 except OSError as err:
1397 except OSError as err:
1399 if err.errno != errno.ENOENT:
1398 if err.errno != errno.ENOENT:
1400 raise
1399 raise
1401 self._repo.ui.warn(_("%s does not exist!\n")
1400 self._repo.ui.warn(_("%s does not exist!\n")
1402 % self._repo.dirstate.pathto(dest))
1401 % self._repo.dirstate.pathto(dest))
1403 return
1402 return
1404 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1403 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1405 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1404 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1406 "symbolic link\n")
1405 "symbolic link\n")
1407 % self._repo.dirstate.pathto(dest))
1406 % self._repo.dirstate.pathto(dest))
1408 else:
1407 else:
1409 with self._repo.wlock():
1408 with self._repo.wlock():
1410 if self._repo.dirstate[dest] in '?':
1409 if self._repo.dirstate[dest] in '?':
1411 self._repo.dirstate.add(dest)
1410 self._repo.dirstate.add(dest)
1412 elif self._repo.dirstate[dest] in 'r':
1411 elif self._repo.dirstate[dest] in 'r':
1413 self._repo.dirstate.normallookup(dest)
1412 self._repo.dirstate.normallookup(dest)
1414 self._repo.dirstate.copy(source, dest)
1413 self._repo.dirstate.copy(source, dest)
1415
1414
1416 def match(self, pats=None, include=None, exclude=None, default='glob',
1415 def match(self, pats=None, include=None, exclude=None, default='glob',
1417 listsubrepos=False, badfn=None):
1416 listsubrepos=False, badfn=None):
1418 r = self._repo
1417 r = self._repo
1419
1418
1420 # Only a case insensitive filesystem needs magic to translate user input
1419 # Only a case insensitive filesystem needs magic to translate user input
1421 # to actual case in the filesystem.
1420 # to actual case in the filesystem.
1422 icasefs = not util.fscasesensitive(r.root)
1421 icasefs = not util.fscasesensitive(r.root)
1423 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1422 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1424 default, auditor=r.auditor, ctx=self,
1423 default, auditor=r.auditor, ctx=self,
1425 listsubrepos=listsubrepos, badfn=badfn,
1424 listsubrepos=listsubrepos, badfn=badfn,
1426 icasefs=icasefs)
1425 icasefs=icasefs)
1427
1426
1428 def _filtersuspectsymlink(self, files):
1427 def _filtersuspectsymlink(self, files):
1429 if not files or self._repo.dirstate._checklink:
1428 if not files or self._repo.dirstate._checklink:
1430 return files
1429 return files
1431
1430
1432 # Symlink placeholders may get non-symlink-like contents
1431 # Symlink placeholders may get non-symlink-like contents
1433 # via user error or dereferencing by NFS or Samba servers,
1432 # via user error or dereferencing by NFS or Samba servers,
1434 # so we filter out any placeholders that don't look like a
1433 # so we filter out any placeholders that don't look like a
1435 # symlink
1434 # symlink
1436 sane = []
1435 sane = []
1437 for f in files:
1436 for f in files:
1438 if self.flags(f) == 'l':
1437 if self.flags(f) == 'l':
1439 d = self[f].data()
1438 d = self[f].data()
1440 if (d == '' or len(d) >= 1024 or '\n' in d
1439 if (d == '' or len(d) >= 1024 or '\n' in d
1441 or stringutil.binary(d)):
1440 or stringutil.binary(d)):
1442 self._repo.ui.debug('ignoring suspect symlink placeholder'
1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1443 ' "%s"\n' % f)
1442 ' "%s"\n' % f)
1444 continue
1443 continue
1445 sane.append(f)
1444 sane.append(f)
1446 return sane
1445 return sane
1447
1446
1448 def _checklookup(self, files):
1447 def _checklookup(self, files):
1449 # check for any possibly clean files
1448 # check for any possibly clean files
1450 if not files:
1449 if not files:
1451 return [], [], []
1450 return [], [], []
1452
1451
1453 modified = []
1452 modified = []
1454 deleted = []
1453 deleted = []
1455 fixup = []
1454 fixup = []
1456 pctx = self._parents[0]
1455 pctx = self._parents[0]
1457 # do a full compare of any files that might have changed
1456 # do a full compare of any files that might have changed
1458 for f in sorted(files):
1457 for f in sorted(files):
1459 try:
1458 try:
1460 # This will return True for a file that got replaced by a
1459 # This will return True for a file that got replaced by a
1461 # directory in the interim, but fixing that is pretty hard.
1460 # directory in the interim, but fixing that is pretty hard.
1462 if (f not in pctx or self.flags(f) != pctx.flags(f)
1461 if (f not in pctx or self.flags(f) != pctx.flags(f)
1463 or pctx[f].cmp(self[f])):
1462 or pctx[f].cmp(self[f])):
1464 modified.append(f)
1463 modified.append(f)
1465 else:
1464 else:
1466 fixup.append(f)
1465 fixup.append(f)
1467 except (IOError, OSError):
1466 except (IOError, OSError):
1468 # A file become inaccessible in between? Mark it as deleted,
1467 # A file become inaccessible in between? Mark it as deleted,
1469 # matching dirstate behavior (issue5584).
1468 # matching dirstate behavior (issue5584).
1470 # The dirstate has more complex behavior around whether a
1469 # The dirstate has more complex behavior around whether a
1471 # missing file matches a directory, etc, but we don't need to
1470 # missing file matches a directory, etc, but we don't need to
1472 # bother with that: if f has made it to this point, we're sure
1471 # bother with that: if f has made it to this point, we're sure
1473 # it's in the dirstate.
1472 # it's in the dirstate.
1474 deleted.append(f)
1473 deleted.append(f)
1475
1474
1476 return modified, deleted, fixup
1475 return modified, deleted, fixup
1477
1476
1478 def _poststatusfixup(self, status, fixup):
1477 def _poststatusfixup(self, status, fixup):
1479 """update dirstate for files that are actually clean"""
1478 """update dirstate for files that are actually clean"""
1480 poststatus = self._repo.postdsstatus()
1479 poststatus = self._repo.postdsstatus()
1481 if fixup or poststatus:
1480 if fixup or poststatus:
1482 try:
1481 try:
1483 oldid = self._repo.dirstate.identity()
1482 oldid = self._repo.dirstate.identity()
1484
1483
1485 # updating the dirstate is optional
1484 # updating the dirstate is optional
1486 # so we don't wait on the lock
1485 # so we don't wait on the lock
1487 # wlock can invalidate the dirstate, so cache normal _after_
1486 # wlock can invalidate the dirstate, so cache normal _after_
1488 # taking the lock
1487 # taking the lock
1489 with self._repo.wlock(False):
1488 with self._repo.wlock(False):
1490 if self._repo.dirstate.identity() == oldid:
1489 if self._repo.dirstate.identity() == oldid:
1491 if fixup:
1490 if fixup:
1492 normal = self._repo.dirstate.normal
1491 normal = self._repo.dirstate.normal
1493 for f in fixup:
1492 for f in fixup:
1494 normal(f)
1493 normal(f)
1495 # write changes out explicitly, because nesting
1494 # write changes out explicitly, because nesting
1496 # wlock at runtime may prevent 'wlock.release()'
1495 # wlock at runtime may prevent 'wlock.release()'
1497 # after this block from doing so for subsequent
1496 # after this block from doing so for subsequent
1498 # changing files
1497 # changing files
1499 tr = self._repo.currenttransaction()
1498 tr = self._repo.currenttransaction()
1500 self._repo.dirstate.write(tr)
1499 self._repo.dirstate.write(tr)
1501
1500
1502 if poststatus:
1501 if poststatus:
1503 for ps in poststatus:
1502 for ps in poststatus:
1504 ps(self, status)
1503 ps(self, status)
1505 else:
1504 else:
1506 # in this case, writing changes out breaks
1505 # in this case, writing changes out breaks
1507 # consistency, because .hg/dirstate was
1506 # consistency, because .hg/dirstate was
1508 # already changed simultaneously after last
1507 # already changed simultaneously after last
1509 # caching (see also issue5584 for detail)
1508 # caching (see also issue5584 for detail)
1510 self._repo.ui.debug('skip updating dirstate: '
1509 self._repo.ui.debug('skip updating dirstate: '
1511 'identity mismatch\n')
1510 'identity mismatch\n')
1512 except error.LockError:
1511 except error.LockError:
1513 pass
1512 pass
1514 finally:
1513 finally:
1515 # Even if the wlock couldn't be grabbed, clear out the list.
1514 # Even if the wlock couldn't be grabbed, clear out the list.
1516 self._repo.clearpostdsstatus()
1515 self._repo.clearpostdsstatus()
1517
1516
1518 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1517 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1519 '''Gets the status from the dirstate -- internal use only.'''
1518 '''Gets the status from the dirstate -- internal use only.'''
1520 subrepos = []
1519 subrepos = []
1521 if '.hgsub' in self:
1520 if '.hgsub' in self:
1522 subrepos = sorted(self.substate)
1521 subrepos = sorted(self.substate)
1523 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1522 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1524 clean=clean, unknown=unknown)
1523 clean=clean, unknown=unknown)
1525
1524
1526 # check for any possibly clean files
1525 # check for any possibly clean files
1527 fixup = []
1526 fixup = []
1528 if cmp:
1527 if cmp:
1529 modified2, deleted2, fixup = self._checklookup(cmp)
1528 modified2, deleted2, fixup = self._checklookup(cmp)
1530 s.modified.extend(modified2)
1529 s.modified.extend(modified2)
1531 s.deleted.extend(deleted2)
1530 s.deleted.extend(deleted2)
1532
1531
1533 if fixup and clean:
1532 if fixup and clean:
1534 s.clean.extend(fixup)
1533 s.clean.extend(fixup)
1535
1534
1536 self._poststatusfixup(s, fixup)
1535 self._poststatusfixup(s, fixup)
1537
1536
1538 if match.always():
1537 if match.always():
1539 # cache for performance
1538 # cache for performance
1540 if s.unknown or s.ignored or s.clean:
1539 if s.unknown or s.ignored or s.clean:
1541 # "_status" is cached with list*=False in the normal route
1540 # "_status" is cached with list*=False in the normal route
1542 self._status = scmutil.status(s.modified, s.added, s.removed,
1541 self._status = scmutil.status(s.modified, s.added, s.removed,
1543 s.deleted, [], [], [])
1542 s.deleted, [], [], [])
1544 else:
1543 else:
1545 self._status = s
1544 self._status = s
1546
1545
1547 return s
1546 return s
1548
1547
1549 @propertycache
1548 @propertycache
1550 def _manifest(self):
1549 def _manifest(self):
1551 """generate a manifest corresponding to the values in self._status
1550 """generate a manifest corresponding to the values in self._status
1552
1551
1553 This reuse the file nodeid from parent, but we use special node
1552 This reuse the file nodeid from parent, but we use special node
1554 identifiers for added and modified files. This is used by manifests
1553 identifiers for added and modified files. This is used by manifests
1555 merge to see that files are different and by update logic to avoid
1554 merge to see that files are different and by update logic to avoid
1556 deleting newly added files.
1555 deleting newly added files.
1557 """
1556 """
1558 return self._buildstatusmanifest(self._status)
1557 return self._buildstatusmanifest(self._status)
1559
1558
1560 def _buildstatusmanifest(self, status):
1559 def _buildstatusmanifest(self, status):
1561 """Builds a manifest that includes the given status results."""
1560 """Builds a manifest that includes the given status results."""
1562 parents = self.parents()
1561 parents = self.parents()
1563
1562
1564 man = parents[0].manifest().copy()
1563 man = parents[0].manifest().copy()
1565
1564
1566 ff = self._flagfunc
1565 ff = self._flagfunc
1567 for i, l in ((addednodeid, status.added),
1566 for i, l in ((addednodeid, status.added),
1568 (modifiednodeid, status.modified)):
1567 (modifiednodeid, status.modified)):
1569 for f in l:
1568 for f in l:
1570 man[f] = i
1569 man[f] = i
1571 try:
1570 try:
1572 man.setflag(f, ff(f))
1571 man.setflag(f, ff(f))
1573 except OSError:
1572 except OSError:
1574 pass
1573 pass
1575
1574
1576 for f in status.deleted + status.removed:
1575 for f in status.deleted + status.removed:
1577 if f in man:
1576 if f in man:
1578 del man[f]
1577 del man[f]
1579
1578
1580 return man
1579 return man
1581
1580
1582 def _buildstatus(self, other, s, match, listignored, listclean,
1581 def _buildstatus(self, other, s, match, listignored, listclean,
1583 listunknown):
1582 listunknown):
1584 """build a status with respect to another context
1583 """build a status with respect to another context
1585
1584
1586 This includes logic for maintaining the fast path of status when
1585 This includes logic for maintaining the fast path of status when
1587 comparing the working directory against its parent, which is to skip
1586 comparing the working directory against its parent, which is to skip
1588 building a new manifest if self (working directory) is not comparing
1587 building a new manifest if self (working directory) is not comparing
1589 against its parent (repo['.']).
1588 against its parent (repo['.']).
1590 """
1589 """
1591 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1590 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1592 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1591 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1593 # might have accidentally ended up with the entire contents of the file
1592 # might have accidentally ended up with the entire contents of the file
1594 # they are supposed to be linking to.
1593 # they are supposed to be linking to.
1595 s.modified[:] = self._filtersuspectsymlink(s.modified)
1594 s.modified[:] = self._filtersuspectsymlink(s.modified)
1596 if other != self._repo['.']:
1595 if other != self._repo['.']:
1597 s = super(workingctx, self)._buildstatus(other, s, match,
1596 s = super(workingctx, self)._buildstatus(other, s, match,
1598 listignored, listclean,
1597 listignored, listclean,
1599 listunknown)
1598 listunknown)
1600 return s
1599 return s
1601
1600
1602 def _matchstatus(self, other, match):
1601 def _matchstatus(self, other, match):
1603 """override the match method with a filter for directory patterns
1602 """override the match method with a filter for directory patterns
1604
1603
1605 We use inheritance to customize the match.bad method only in cases of
1604 We use inheritance to customize the match.bad method only in cases of
1606 workingctx since it belongs only to the working directory when
1605 workingctx since it belongs only to the working directory when
1607 comparing against the parent changeset.
1606 comparing against the parent changeset.
1608
1607
1609 If we aren't comparing against the working directory's parent, then we
1608 If we aren't comparing against the working directory's parent, then we
1610 just use the default match object sent to us.
1609 just use the default match object sent to us.
1611 """
1610 """
1612 if other != self._repo['.']:
1611 if other != self._repo['.']:
1613 def bad(f, msg):
1612 def bad(f, msg):
1614 # 'f' may be a directory pattern from 'match.files()',
1613 # 'f' may be a directory pattern from 'match.files()',
1615 # so 'f not in ctx1' is not enough
1614 # so 'f not in ctx1' is not enough
1616 if f not in other and not other.hasdir(f):
1615 if f not in other and not other.hasdir(f):
1617 self._repo.ui.warn('%s: %s\n' %
1616 self._repo.ui.warn('%s: %s\n' %
1618 (self._repo.dirstate.pathto(f), msg))
1617 (self._repo.dirstate.pathto(f), msg))
1619 match.bad = bad
1618 match.bad = bad
1620 return match
1619 return match
1621
1620
1622 def markcommitted(self, node):
1621 def markcommitted(self, node):
1623 super(workingctx, self).markcommitted(node)
1622 super(workingctx, self).markcommitted(node)
1624
1623
1625 sparse.aftercommit(self._repo, node)
1624 sparse.aftercommit(self._repo, node)
1626
1625
1627 class committablefilectx(basefilectx):
1626 class committablefilectx(basefilectx):
1628 """A committablefilectx provides common functionality for a file context
1627 """A committablefilectx provides common functionality for a file context
1629 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1628 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1630 def __init__(self, repo, path, filelog=None, ctx=None):
1629 def __init__(self, repo, path, filelog=None, ctx=None):
1631 self._repo = repo
1630 self._repo = repo
1632 self._path = path
1631 self._path = path
1633 self._changeid = None
1632 self._changeid = None
1634 self._filerev = self._filenode = None
1633 self._filerev = self._filenode = None
1635
1634
1636 if filelog is not None:
1635 if filelog is not None:
1637 self._filelog = filelog
1636 self._filelog = filelog
1638 if ctx:
1637 if ctx:
1639 self._changectx = ctx
1638 self._changectx = ctx
1640
1639
1641 def __nonzero__(self):
1640 def __nonzero__(self):
1642 return True
1641 return True
1643
1642
1644 __bool__ = __nonzero__
1643 __bool__ = __nonzero__
1645
1644
1646 def linkrev(self):
1645 def linkrev(self):
1647 # linked to self._changectx no matter if file is modified or not
1646 # linked to self._changectx no matter if file is modified or not
1648 return self.rev()
1647 return self.rev()
1649
1648
1650 def parents(self):
1649 def parents(self):
1651 '''return parent filectxs, following copies if necessary'''
1650 '''return parent filectxs, following copies if necessary'''
1652 def filenode(ctx, path):
1651 def filenode(ctx, path):
1653 return ctx._manifest.get(path, nullid)
1652 return ctx._manifest.get(path, nullid)
1654
1653
1655 path = self._path
1654 path = self._path
1656 fl = self._filelog
1655 fl = self._filelog
1657 pcl = self._changectx._parents
1656 pcl = self._changectx._parents
1658 renamed = self.renamed()
1657 renamed = self.renamed()
1659
1658
1660 if renamed:
1659 if renamed:
1661 pl = [renamed + (None,)]
1660 pl = [renamed + (None,)]
1662 else:
1661 else:
1663 pl = [(path, filenode(pcl[0], path), fl)]
1662 pl = [(path, filenode(pcl[0], path), fl)]
1664
1663
1665 for pc in pcl[1:]:
1664 for pc in pcl[1:]:
1666 pl.append((path, filenode(pc, path), fl))
1665 pl.append((path, filenode(pc, path), fl))
1667
1666
1668 return [self._parentfilectx(p, fileid=n, filelog=l)
1667 return [self._parentfilectx(p, fileid=n, filelog=l)
1669 for p, n, l in pl if n != nullid]
1668 for p, n, l in pl if n != nullid]
1670
1669
1671 def children(self):
1670 def children(self):
1672 return []
1671 return []
1673
1672
1674 class workingfilectx(committablefilectx):
1673 class workingfilectx(committablefilectx):
1675 """A workingfilectx object makes access to data related to a particular
1674 """A workingfilectx object makes access to data related to a particular
1676 file in the working directory convenient."""
1675 file in the working directory convenient."""
1677 def __init__(self, repo, path, filelog=None, workingctx=None):
1676 def __init__(self, repo, path, filelog=None, workingctx=None):
1678 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1677 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1679
1678
1680 @propertycache
1679 @propertycache
1681 def _changectx(self):
1680 def _changectx(self):
1682 return workingctx(self._repo)
1681 return workingctx(self._repo)
1683
1682
1684 def data(self):
1683 def data(self):
1685 return self._repo.wread(self._path)
1684 return self._repo.wread(self._path)
1686 def renamed(self):
1685 def renamed(self):
1687 rp = self._repo.dirstate.copied(self._path)
1686 rp = self._repo.dirstate.copied(self._path)
1688 if not rp:
1687 if not rp:
1689 return None
1688 return None
1690 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1689 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1691
1690
1692 def size(self):
1691 def size(self):
1693 return self._repo.wvfs.lstat(self._path).st_size
1692 return self._repo.wvfs.lstat(self._path).st_size
1694 def date(self):
1693 def date(self):
1695 t, tz = self._changectx.date()
1694 t, tz = self._changectx.date()
1696 try:
1695 try:
1697 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1696 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1698 except OSError as err:
1697 except OSError as err:
1699 if err.errno != errno.ENOENT:
1698 if err.errno != errno.ENOENT:
1700 raise
1699 raise
1701 return (t, tz)
1700 return (t, tz)
1702
1701
1703 def exists(self):
1702 def exists(self):
1704 return self._repo.wvfs.exists(self._path)
1703 return self._repo.wvfs.exists(self._path)
1705
1704
1706 def lexists(self):
1705 def lexists(self):
1707 return self._repo.wvfs.lexists(self._path)
1706 return self._repo.wvfs.lexists(self._path)
1708
1707
1709 def audit(self):
1708 def audit(self):
1710 return self._repo.wvfs.audit(self._path)
1709 return self._repo.wvfs.audit(self._path)
1711
1710
1712 def cmp(self, fctx):
1711 def cmp(self, fctx):
1713 """compare with other file context
1712 """compare with other file context
1714
1713
1715 returns True if different than fctx.
1714 returns True if different than fctx.
1716 """
1715 """
1717 # fctx should be a filectx (not a workingfilectx)
1716 # fctx should be a filectx (not a workingfilectx)
1718 # invert comparison to reuse the same code path
1717 # invert comparison to reuse the same code path
1719 return fctx.cmp(self)
1718 return fctx.cmp(self)
1720
1719
1721 def remove(self, ignoremissing=False):
1720 def remove(self, ignoremissing=False):
1722 """wraps unlink for a repo's working directory"""
1721 """wraps unlink for a repo's working directory"""
1723 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1722 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1724 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1723 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1725 rmdir=rmdir)
1724 rmdir=rmdir)
1726
1725
1727 def write(self, data, flags, backgroundclose=False, **kwargs):
1726 def write(self, data, flags, backgroundclose=False, **kwargs):
1728 """wraps repo.wwrite"""
1727 """wraps repo.wwrite"""
1729 self._repo.wwrite(self._path, data, flags,
1728 self._repo.wwrite(self._path, data, flags,
1730 backgroundclose=backgroundclose,
1729 backgroundclose=backgroundclose,
1731 **kwargs)
1730 **kwargs)
1732
1731
1733 def markcopied(self, src):
1732 def markcopied(self, src):
1734 """marks this file a copy of `src`"""
1733 """marks this file a copy of `src`"""
1735 if self._repo.dirstate[self._path] in "nma":
1734 if self._repo.dirstate[self._path] in "nma":
1736 self._repo.dirstate.copy(src, self._path)
1735 self._repo.dirstate.copy(src, self._path)
1737
1736
1738 def clearunknown(self):
1737 def clearunknown(self):
1739 """Removes conflicting items in the working directory so that
1738 """Removes conflicting items in the working directory so that
1740 ``write()`` can be called successfully.
1739 ``write()`` can be called successfully.
1741 """
1740 """
1742 wvfs = self._repo.wvfs
1741 wvfs = self._repo.wvfs
1743 f = self._path
1742 f = self._path
1744 wvfs.audit(f)
1743 wvfs.audit(f)
1745 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1744 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1746 # remove files under the directory as they should already be
1745 # remove files under the directory as they should already be
1747 # warned and backed up
1746 # warned and backed up
1748 if wvfs.isdir(f) and not wvfs.islink(f):
1747 if wvfs.isdir(f) and not wvfs.islink(f):
1749 wvfs.rmtree(f, forcibly=True)
1748 wvfs.rmtree(f, forcibly=True)
1750 for p in reversed(list(util.finddirs(f))):
1749 for p in reversed(list(util.finddirs(f))):
1751 if wvfs.isfileorlink(p):
1750 if wvfs.isfileorlink(p):
1752 wvfs.unlink(p)
1751 wvfs.unlink(p)
1753 break
1752 break
1754 else:
1753 else:
1755 # don't remove files if path conflicts are not processed
1754 # don't remove files if path conflicts are not processed
1756 if wvfs.isdir(f) and not wvfs.islink(f):
1755 if wvfs.isdir(f) and not wvfs.islink(f):
1757 wvfs.removedirs(f)
1756 wvfs.removedirs(f)
1758
1757
1759 def setflags(self, l, x):
1758 def setflags(self, l, x):
1760 self._repo.wvfs.setflags(self._path, l, x)
1759 self._repo.wvfs.setflags(self._path, l, x)
1761
1760
1762 class overlayworkingctx(committablectx):
1761 class overlayworkingctx(committablectx):
1763 """Wraps another mutable context with a write-back cache that can be
1762 """Wraps another mutable context with a write-back cache that can be
1764 converted into a commit context.
1763 converted into a commit context.
1765
1764
1766 self._cache[path] maps to a dict with keys: {
1765 self._cache[path] maps to a dict with keys: {
1767 'exists': bool?
1766 'exists': bool?
1768 'date': date?
1767 'date': date?
1769 'data': str?
1768 'data': str?
1770 'flags': str?
1769 'flags': str?
1771 'copied': str? (path or None)
1770 'copied': str? (path or None)
1772 }
1771 }
1773 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1772 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1774 is `False`, the file was deleted.
1773 is `False`, the file was deleted.
1775 """
1774 """
1776
1775
1777 def __init__(self, repo):
1776 def __init__(self, repo):
1778 super(overlayworkingctx, self).__init__(repo)
1777 super(overlayworkingctx, self).__init__(repo)
1779 self.clean()
1778 self.clean()
1780
1779
1781 def setbase(self, wrappedctx):
1780 def setbase(self, wrappedctx):
1782 self._wrappedctx = wrappedctx
1781 self._wrappedctx = wrappedctx
1783 self._parents = [wrappedctx]
1782 self._parents = [wrappedctx]
1784 # Drop old manifest cache as it is now out of date.
1783 # Drop old manifest cache as it is now out of date.
1785 # This is necessary when, e.g., rebasing several nodes with one
1784 # This is necessary when, e.g., rebasing several nodes with one
1786 # ``overlayworkingctx`` (e.g. with --collapse).
1785 # ``overlayworkingctx`` (e.g. with --collapse).
1787 util.clearcachedproperty(self, '_manifest')
1786 util.clearcachedproperty(self, '_manifest')
1788
1787
1789 def data(self, path):
1788 def data(self, path):
1790 if self.isdirty(path):
1789 if self.isdirty(path):
1791 if self._cache[path]['exists']:
1790 if self._cache[path]['exists']:
1792 if self._cache[path]['data']:
1791 if self._cache[path]['data']:
1793 return self._cache[path]['data']
1792 return self._cache[path]['data']
1794 else:
1793 else:
1795 # Must fallback here, too, because we only set flags.
1794 # Must fallback here, too, because we only set flags.
1796 return self._wrappedctx[path].data()
1795 return self._wrappedctx[path].data()
1797 else:
1796 else:
1798 raise error.ProgrammingError("No such file or directory: %s" %
1797 raise error.ProgrammingError("No such file or directory: %s" %
1799 path)
1798 path)
1800 else:
1799 else:
1801 return self._wrappedctx[path].data()
1800 return self._wrappedctx[path].data()
1802
1801
1803 @propertycache
1802 @propertycache
1804 def _manifest(self):
1803 def _manifest(self):
1805 parents = self.parents()
1804 parents = self.parents()
1806 man = parents[0].manifest().copy()
1805 man = parents[0].manifest().copy()
1807
1806
1808 flag = self._flagfunc
1807 flag = self._flagfunc
1809 for path in self.added():
1808 for path in self.added():
1810 man[path] = addednodeid
1809 man[path] = addednodeid
1811 man.setflag(path, flag(path))
1810 man.setflag(path, flag(path))
1812 for path in self.modified():
1811 for path in self.modified():
1813 man[path] = modifiednodeid
1812 man[path] = modifiednodeid
1814 man.setflag(path, flag(path))
1813 man.setflag(path, flag(path))
1815 for path in self.removed():
1814 for path in self.removed():
1816 del man[path]
1815 del man[path]
1817 return man
1816 return man
1818
1817
1819 @propertycache
1818 @propertycache
1820 def _flagfunc(self):
1819 def _flagfunc(self):
1821 def f(path):
1820 def f(path):
1822 return self._cache[path]['flags']
1821 return self._cache[path]['flags']
1823 return f
1822 return f
1824
1823
1825 def files(self):
1824 def files(self):
1826 return sorted(self.added() + self.modified() + self.removed())
1825 return sorted(self.added() + self.modified() + self.removed())
1827
1826
1828 def modified(self):
1827 def modified(self):
1829 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1828 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 self._existsinparent(f)]
1829 self._existsinparent(f)]
1831
1830
1832 def added(self):
1831 def added(self):
1833 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1832 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1834 not self._existsinparent(f)]
1833 not self._existsinparent(f)]
1835
1834
1836 def removed(self):
1835 def removed(self):
1837 return [f for f in self._cache.keys() if
1836 return [f for f in self._cache.keys() if
1838 not self._cache[f]['exists'] and self._existsinparent(f)]
1837 not self._cache[f]['exists'] and self._existsinparent(f)]
1839
1838
1840 def isinmemory(self):
1839 def isinmemory(self):
1841 return True
1840 return True
1842
1841
1843 def filedate(self, path):
1842 def filedate(self, path):
1844 if self.isdirty(path):
1843 if self.isdirty(path):
1845 return self._cache[path]['date']
1844 return self._cache[path]['date']
1846 else:
1845 else:
1847 return self._wrappedctx[path].date()
1846 return self._wrappedctx[path].date()
1848
1847
1849 def markcopied(self, path, origin):
1848 def markcopied(self, path, origin):
1850 if self.isdirty(path):
1849 if self.isdirty(path):
1851 self._cache[path]['copied'] = origin
1850 self._cache[path]['copied'] = origin
1852 else:
1851 else:
1853 raise error.ProgrammingError('markcopied() called on clean context')
1852 raise error.ProgrammingError('markcopied() called on clean context')
1854
1853
1855 def copydata(self, path):
1854 def copydata(self, path):
1856 if self.isdirty(path):
1855 if self.isdirty(path):
1857 return self._cache[path]['copied']
1856 return self._cache[path]['copied']
1858 else:
1857 else:
1859 raise error.ProgrammingError('copydata() called on clean context')
1858 raise error.ProgrammingError('copydata() called on clean context')
1860
1859
1861 def flags(self, path):
1860 def flags(self, path):
1862 if self.isdirty(path):
1861 if self.isdirty(path):
1863 if self._cache[path]['exists']:
1862 if self._cache[path]['exists']:
1864 return self._cache[path]['flags']
1863 return self._cache[path]['flags']
1865 else:
1864 else:
1866 raise error.ProgrammingError("No such file or directory: %s" %
1865 raise error.ProgrammingError("No such file or directory: %s" %
1867 self._path)
1866 self._path)
1868 else:
1867 else:
1869 return self._wrappedctx[path].flags()
1868 return self._wrappedctx[path].flags()
1870
1869
1871 def _existsinparent(self, path):
1870 def _existsinparent(self, path):
1872 try:
1871 try:
1873 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1872 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1874 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1873 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1875 # with an ``exists()`` function.
1874 # with an ``exists()`` function.
1876 self._wrappedctx[path]
1875 self._wrappedctx[path]
1877 return True
1876 return True
1878 except error.ManifestLookupError:
1877 except error.ManifestLookupError:
1879 return False
1878 return False
1880
1879
1881 def _auditconflicts(self, path):
1880 def _auditconflicts(self, path):
1882 """Replicates conflict checks done by wvfs.write().
1881 """Replicates conflict checks done by wvfs.write().
1883
1882
1884 Since we never write to the filesystem and never call `applyupdates` in
1883 Since we never write to the filesystem and never call `applyupdates` in
1885 IMM, we'll never check that a path is actually writable -- e.g., because
1884 IMM, we'll never check that a path is actually writable -- e.g., because
1886 it adds `a/foo`, but `a` is actually a file in the other commit.
1885 it adds `a/foo`, but `a` is actually a file in the other commit.
1887 """
1886 """
1888 def fail(path, component):
1887 def fail(path, component):
1889 # p1() is the base and we're receiving "writes" for p2()'s
1888 # p1() is the base and we're receiving "writes" for p2()'s
1890 # files.
1889 # files.
1891 if 'l' in self.p1()[component].flags():
1890 if 'l' in self.p1()[component].flags():
1892 raise error.Abort("error: %s conflicts with symlink %s "
1891 raise error.Abort("error: %s conflicts with symlink %s "
1893 "in %s." % (path, component,
1892 "in %s." % (path, component,
1894 self.p1().rev()))
1893 self.p1().rev()))
1895 else:
1894 else:
1896 raise error.Abort("error: '%s' conflicts with file '%s' in "
1895 raise error.Abort("error: '%s' conflicts with file '%s' in "
1897 "%s." % (path, component,
1896 "%s." % (path, component,
1898 self.p1().rev()))
1897 self.p1().rev()))
1899
1898
1900 # Test that each new directory to be created to write this path from p2
1899 # Test that each new directory to be created to write this path from p2
1901 # is not a file in p1.
1900 # is not a file in p1.
1902 components = path.split('/')
1901 components = path.split('/')
1903 for i in pycompat.xrange(len(components)):
1902 for i in pycompat.xrange(len(components)):
1904 component = "/".join(components[0:i])
1903 component = "/".join(components[0:i])
1905 if component in self.p1() and self._cache[component]['exists']:
1904 if component in self.p1() and self._cache[component]['exists']:
1906 fail(path, component)
1905 fail(path, component)
1907
1906
1908 # Test the other direction -- that this path from p2 isn't a directory
1907 # Test the other direction -- that this path from p2 isn't a directory
1909 # in p1 (test that p1 doesn't any paths matching `path/*`).
1908 # in p1 (test that p1 doesn't any paths matching `path/*`).
1910 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1909 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1911 matches = self.p1().manifest().matches(match)
1910 matches = self.p1().manifest().matches(match)
1912 mfiles = matches.keys()
1911 mfiles = matches.keys()
1913 if len(mfiles) > 0:
1912 if len(mfiles) > 0:
1914 if len(mfiles) == 1 and mfiles[0] == path:
1913 if len(mfiles) == 1 and mfiles[0] == path:
1915 return
1914 return
1916 # omit the files which are deleted in current IMM wctx
1915 # omit the files which are deleted in current IMM wctx
1917 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1916 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1918 if not mfiles:
1917 if not mfiles:
1919 return
1918 return
1920 raise error.Abort("error: file '%s' cannot be written because "
1919 raise error.Abort("error: file '%s' cannot be written because "
1921 " '%s/' is a folder in %s (containing %d "
1920 " '%s/' is a folder in %s (containing %d "
1922 "entries: %s)"
1921 "entries: %s)"
1923 % (path, path, self.p1(), len(mfiles),
1922 % (path, path, self.p1(), len(mfiles),
1924 ', '.join(mfiles)))
1923 ', '.join(mfiles)))
1925
1924
1926 def write(self, path, data, flags='', **kwargs):
1925 def write(self, path, data, flags='', **kwargs):
1927 if data is None:
1926 if data is None:
1928 raise error.ProgrammingError("data must be non-None")
1927 raise error.ProgrammingError("data must be non-None")
1929 self._auditconflicts(path)
1928 self._auditconflicts(path)
1930 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1929 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1931 flags=flags)
1930 flags=flags)
1932
1931
1933 def setflags(self, path, l, x):
1932 def setflags(self, path, l, x):
1934 flag = ''
1933 flag = ''
1935 if l:
1934 if l:
1936 flag = 'l'
1935 flag = 'l'
1937 elif x:
1936 elif x:
1938 flag = 'x'
1937 flag = 'x'
1939 self._markdirty(path, exists=True, date=dateutil.makedate(),
1938 self._markdirty(path, exists=True, date=dateutil.makedate(),
1940 flags=flag)
1939 flags=flag)
1941
1940
1942 def remove(self, path):
1941 def remove(self, path):
1943 self._markdirty(path, exists=False)
1942 self._markdirty(path, exists=False)
1944
1943
1945 def exists(self, path):
1944 def exists(self, path):
1946 """exists behaves like `lexists`, but needs to follow symlinks and
1945 """exists behaves like `lexists`, but needs to follow symlinks and
1947 return False if they are broken.
1946 return False if they are broken.
1948 """
1947 """
1949 if self.isdirty(path):
1948 if self.isdirty(path):
1950 # If this path exists and is a symlink, "follow" it by calling
1949 # If this path exists and is a symlink, "follow" it by calling
1951 # exists on the destination path.
1950 # exists on the destination path.
1952 if (self._cache[path]['exists'] and
1951 if (self._cache[path]['exists'] and
1953 'l' in self._cache[path]['flags']):
1952 'l' in self._cache[path]['flags']):
1954 return self.exists(self._cache[path]['data'].strip())
1953 return self.exists(self._cache[path]['data'].strip())
1955 else:
1954 else:
1956 return self._cache[path]['exists']
1955 return self._cache[path]['exists']
1957
1956
1958 return self._existsinparent(path)
1957 return self._existsinparent(path)
1959
1958
1960 def lexists(self, path):
1959 def lexists(self, path):
1961 """lexists returns True if the path exists"""
1960 """lexists returns True if the path exists"""
1962 if self.isdirty(path):
1961 if self.isdirty(path):
1963 return self._cache[path]['exists']
1962 return self._cache[path]['exists']
1964
1963
1965 return self._existsinparent(path)
1964 return self._existsinparent(path)
1966
1965
1967 def size(self, path):
1966 def size(self, path):
1968 if self.isdirty(path):
1967 if self.isdirty(path):
1969 if self._cache[path]['exists']:
1968 if self._cache[path]['exists']:
1970 return len(self._cache[path]['data'])
1969 return len(self._cache[path]['data'])
1971 else:
1970 else:
1972 raise error.ProgrammingError("No such file or directory: %s" %
1971 raise error.ProgrammingError("No such file or directory: %s" %
1973 self._path)
1972 self._path)
1974 return self._wrappedctx[path].size()
1973 return self._wrappedctx[path].size()
1975
1974
1976 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1975 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1977 user=None, editor=None):
1976 user=None, editor=None):
1978 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1977 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1979 committed.
1978 committed.
1980
1979
1981 ``text`` is the commit message.
1980 ``text`` is the commit message.
1982 ``parents`` (optional) are rev numbers.
1981 ``parents`` (optional) are rev numbers.
1983 """
1982 """
1984 # Default parents to the wrapped contexts' if not passed.
1983 # Default parents to the wrapped contexts' if not passed.
1985 if parents is None:
1984 if parents is None:
1986 parents = self._wrappedctx.parents()
1985 parents = self._wrappedctx.parents()
1987 if len(parents) == 1:
1986 if len(parents) == 1:
1988 parents = (parents[0], None)
1987 parents = (parents[0], None)
1989
1988
1990 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1989 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1991 if parents[1] is None:
1990 if parents[1] is None:
1992 parents = (self._repo[parents[0]], None)
1991 parents = (self._repo[parents[0]], None)
1993 else:
1992 else:
1994 parents = (self._repo[parents[0]], self._repo[parents[1]])
1993 parents = (self._repo[parents[0]], self._repo[parents[1]])
1995
1994
1996 files = self._cache.keys()
1995 files = self._cache.keys()
1997 def getfile(repo, memctx, path):
1996 def getfile(repo, memctx, path):
1998 if self._cache[path]['exists']:
1997 if self._cache[path]['exists']:
1999 return memfilectx(repo, memctx, path,
1998 return memfilectx(repo, memctx, path,
2000 self._cache[path]['data'],
1999 self._cache[path]['data'],
2001 'l' in self._cache[path]['flags'],
2000 'l' in self._cache[path]['flags'],
2002 'x' in self._cache[path]['flags'],
2001 'x' in self._cache[path]['flags'],
2003 self._cache[path]['copied'])
2002 self._cache[path]['copied'])
2004 else:
2003 else:
2005 # Returning None, but including the path in `files`, is
2004 # Returning None, but including the path in `files`, is
2006 # necessary for memctx to register a deletion.
2005 # necessary for memctx to register a deletion.
2007 return None
2006 return None
2008 return memctx(self._repo, parents, text, files, getfile, date=date,
2007 return memctx(self._repo, parents, text, files, getfile, date=date,
2009 extra=extra, user=user, branch=branch, editor=editor)
2008 extra=extra, user=user, branch=branch, editor=editor)
2010
2009
2011 def isdirty(self, path):
2010 def isdirty(self, path):
2012 return path in self._cache
2011 return path in self._cache
2013
2012
2014 def isempty(self):
2013 def isempty(self):
2015 # We need to discard any keys that are actually clean before the empty
2014 # We need to discard any keys that are actually clean before the empty
2016 # commit check.
2015 # commit check.
2017 self._compact()
2016 self._compact()
2018 return len(self._cache) == 0
2017 return len(self._cache) == 0
2019
2018
2020 def clean(self):
2019 def clean(self):
2021 self._cache = {}
2020 self._cache = {}
2022
2021
2023 def _compact(self):
2022 def _compact(self):
2024 """Removes keys from the cache that are actually clean, by comparing
2023 """Removes keys from the cache that are actually clean, by comparing
2025 them with the underlying context.
2024 them with the underlying context.
2026
2025
2027 This can occur during the merge process, e.g. by passing --tool :local
2026 This can occur during the merge process, e.g. by passing --tool :local
2028 to resolve a conflict.
2027 to resolve a conflict.
2029 """
2028 """
2030 keys = []
2029 keys = []
2031 for path in self._cache.keys():
2030 for path in self._cache.keys():
2032 cache = self._cache[path]
2031 cache = self._cache[path]
2033 try:
2032 try:
2034 underlying = self._wrappedctx[path]
2033 underlying = self._wrappedctx[path]
2035 if (underlying.data() == cache['data'] and
2034 if (underlying.data() == cache['data'] and
2036 underlying.flags() == cache['flags']):
2035 underlying.flags() == cache['flags']):
2037 keys.append(path)
2036 keys.append(path)
2038 except error.ManifestLookupError:
2037 except error.ManifestLookupError:
2039 # Path not in the underlying manifest (created).
2038 # Path not in the underlying manifest (created).
2040 continue
2039 continue
2041
2040
2042 for path in keys:
2041 for path in keys:
2043 del self._cache[path]
2042 del self._cache[path]
2044 return keys
2043 return keys
2045
2044
2046 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2045 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2047 # data not provided, let's see if we already have some; if not, let's
2046 # data not provided, let's see if we already have some; if not, let's
2048 # grab it from our underlying context, so that we always have data if
2047 # grab it from our underlying context, so that we always have data if
2049 # the file is marked as existing.
2048 # the file is marked as existing.
2050 if exists and data is None:
2049 if exists and data is None:
2051 oldentry = self._cache.get(path) or {}
2050 oldentry = self._cache.get(path) or {}
2052 data = oldentry.get('data') or self._wrappedctx[path].data()
2051 data = oldentry.get('data') or self._wrappedctx[path].data()
2053
2052
2054 self._cache[path] = {
2053 self._cache[path] = {
2055 'exists': exists,
2054 'exists': exists,
2056 'data': data,
2055 'data': data,
2057 'date': date,
2056 'date': date,
2058 'flags': flags,
2057 'flags': flags,
2059 'copied': None,
2058 'copied': None,
2060 }
2059 }
2061
2060
2062 def filectx(self, path, filelog=None):
2061 def filectx(self, path, filelog=None):
2063 return overlayworkingfilectx(self._repo, path, parent=self,
2062 return overlayworkingfilectx(self._repo, path, parent=self,
2064 filelog=filelog)
2063 filelog=filelog)
2065
2064
2066 class overlayworkingfilectx(committablefilectx):
2065 class overlayworkingfilectx(committablefilectx):
2067 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2066 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2068 cache, which can be flushed through later by calling ``flush()``."""
2067 cache, which can be flushed through later by calling ``flush()``."""
2069
2068
2070 def __init__(self, repo, path, filelog=None, parent=None):
2069 def __init__(self, repo, path, filelog=None, parent=None):
2071 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2070 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2072 parent)
2071 parent)
2073 self._repo = repo
2072 self._repo = repo
2074 self._parent = parent
2073 self._parent = parent
2075 self._path = path
2074 self._path = path
2076
2075
2077 def cmp(self, fctx):
2076 def cmp(self, fctx):
2078 return self.data() != fctx.data()
2077 return self.data() != fctx.data()
2079
2078
2080 def changectx(self):
2079 def changectx(self):
2081 return self._parent
2080 return self._parent
2082
2081
2083 def data(self):
2082 def data(self):
2084 return self._parent.data(self._path)
2083 return self._parent.data(self._path)
2085
2084
2086 def date(self):
2085 def date(self):
2087 return self._parent.filedate(self._path)
2086 return self._parent.filedate(self._path)
2088
2087
2089 def exists(self):
2088 def exists(self):
2090 return self.lexists()
2089 return self.lexists()
2091
2090
2092 def lexists(self):
2091 def lexists(self):
2093 return self._parent.exists(self._path)
2092 return self._parent.exists(self._path)
2094
2093
2095 def renamed(self):
2094 def renamed(self):
2096 path = self._parent.copydata(self._path)
2095 path = self._parent.copydata(self._path)
2097 if not path:
2096 if not path:
2098 return None
2097 return None
2099 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2098 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2100
2099
2101 def size(self):
2100 def size(self):
2102 return self._parent.size(self._path)
2101 return self._parent.size(self._path)
2103
2102
2104 def markcopied(self, origin):
2103 def markcopied(self, origin):
2105 self._parent.markcopied(self._path, origin)
2104 self._parent.markcopied(self._path, origin)
2106
2105
2107 def audit(self):
2106 def audit(self):
2108 pass
2107 pass
2109
2108
2110 def flags(self):
2109 def flags(self):
2111 return self._parent.flags(self._path)
2110 return self._parent.flags(self._path)
2112
2111
2113 def setflags(self, islink, isexec):
2112 def setflags(self, islink, isexec):
2114 return self._parent.setflags(self._path, islink, isexec)
2113 return self._parent.setflags(self._path, islink, isexec)
2115
2114
2116 def write(self, data, flags, backgroundclose=False, **kwargs):
2115 def write(self, data, flags, backgroundclose=False, **kwargs):
2117 return self._parent.write(self._path, data, flags, **kwargs)
2116 return self._parent.write(self._path, data, flags, **kwargs)
2118
2117
2119 def remove(self, ignoremissing=False):
2118 def remove(self, ignoremissing=False):
2120 return self._parent.remove(self._path)
2119 return self._parent.remove(self._path)
2121
2120
2122 def clearunknown(self):
2121 def clearunknown(self):
2123 pass
2122 pass
2124
2123
2125 class workingcommitctx(workingctx):
2124 class workingcommitctx(workingctx):
2126 """A workingcommitctx object makes access to data related to
2125 """A workingcommitctx object makes access to data related to
2127 the revision being committed convenient.
2126 the revision being committed convenient.
2128
2127
2129 This hides changes in the working directory, if they aren't
2128 This hides changes in the working directory, if they aren't
2130 committed in this context.
2129 committed in this context.
2131 """
2130 """
2132 def __init__(self, repo, changes,
2131 def __init__(self, repo, changes,
2133 text="", user=None, date=None, extra=None):
2132 text="", user=None, date=None, extra=None):
2134 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2133 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2135 changes)
2134 changes)
2136
2135
2137 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2136 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2138 """Return matched files only in ``self._status``
2137 """Return matched files only in ``self._status``
2139
2138
2140 Uncommitted files appear "clean" via this context, even if
2139 Uncommitted files appear "clean" via this context, even if
2141 they aren't actually so in the working directory.
2140 they aren't actually so in the working directory.
2142 """
2141 """
2143 if clean:
2142 if clean:
2144 clean = [f for f in self._manifest if f not in self._changedset]
2143 clean = [f for f in self._manifest if f not in self._changedset]
2145 else:
2144 else:
2146 clean = []
2145 clean = []
2147 return scmutil.status([f for f in self._status.modified if match(f)],
2146 return scmutil.status([f for f in self._status.modified if match(f)],
2148 [f for f in self._status.added if match(f)],
2147 [f for f in self._status.added if match(f)],
2149 [f for f in self._status.removed if match(f)],
2148 [f for f in self._status.removed if match(f)],
2150 [], [], [], clean)
2149 [], [], [], clean)
2151
2150
2152 @propertycache
2151 @propertycache
2153 def _changedset(self):
2152 def _changedset(self):
2154 """Return the set of files changed in this context
2153 """Return the set of files changed in this context
2155 """
2154 """
2156 changed = set(self._status.modified)
2155 changed = set(self._status.modified)
2157 changed.update(self._status.added)
2156 changed.update(self._status.added)
2158 changed.update(self._status.removed)
2157 changed.update(self._status.removed)
2159 return changed
2158 return changed
2160
2159
2161 def makecachingfilectxfn(func):
2160 def makecachingfilectxfn(func):
2162 """Create a filectxfn that caches based on the path.
2161 """Create a filectxfn that caches based on the path.
2163
2162
2164 We can't use util.cachefunc because it uses all arguments as the cache
2163 We can't use util.cachefunc because it uses all arguments as the cache
2165 key and this creates a cycle since the arguments include the repo and
2164 key and this creates a cycle since the arguments include the repo and
2166 memctx.
2165 memctx.
2167 """
2166 """
2168 cache = {}
2167 cache = {}
2169
2168
2170 def getfilectx(repo, memctx, path):
2169 def getfilectx(repo, memctx, path):
2171 if path not in cache:
2170 if path not in cache:
2172 cache[path] = func(repo, memctx, path)
2171 cache[path] = func(repo, memctx, path)
2173 return cache[path]
2172 return cache[path]
2174
2173
2175 return getfilectx
2174 return getfilectx
2176
2175
2177 def memfilefromctx(ctx):
2176 def memfilefromctx(ctx):
2178 """Given a context return a memfilectx for ctx[path]
2177 """Given a context return a memfilectx for ctx[path]
2179
2178
2180 This is a convenience method for building a memctx based on another
2179 This is a convenience method for building a memctx based on another
2181 context.
2180 context.
2182 """
2181 """
2183 def getfilectx(repo, memctx, path):
2182 def getfilectx(repo, memctx, path):
2184 fctx = ctx[path]
2183 fctx = ctx[path]
2185 # this is weird but apparently we only keep track of one parent
2184 # this is weird but apparently we only keep track of one parent
2186 # (why not only store that instead of a tuple?)
2185 # (why not only store that instead of a tuple?)
2187 copied = fctx.renamed()
2186 copied = fctx.renamed()
2188 if copied:
2187 if copied:
2189 copied = copied[0]
2188 copied = copied[0]
2190 return memfilectx(repo, memctx, path, fctx.data(),
2189 return memfilectx(repo, memctx, path, fctx.data(),
2191 islink=fctx.islink(), isexec=fctx.isexec(),
2190 islink=fctx.islink(), isexec=fctx.isexec(),
2192 copied=copied)
2191 copied=copied)
2193
2192
2194 return getfilectx
2193 return getfilectx
2195
2194
2196 def memfilefrompatch(patchstore):
2195 def memfilefrompatch(patchstore):
2197 """Given a patch (e.g. patchstore object) return a memfilectx
2196 """Given a patch (e.g. patchstore object) return a memfilectx
2198
2197
2199 This is a convenience method for building a memctx based on a patchstore.
2198 This is a convenience method for building a memctx based on a patchstore.
2200 """
2199 """
2201 def getfilectx(repo, memctx, path):
2200 def getfilectx(repo, memctx, path):
2202 data, mode, copied = patchstore.getfile(path)
2201 data, mode, copied = patchstore.getfile(path)
2203 if data is None:
2202 if data is None:
2204 return None
2203 return None
2205 islink, isexec = mode
2204 islink, isexec = mode
2206 return memfilectx(repo, memctx, path, data, islink=islink,
2205 return memfilectx(repo, memctx, path, data, islink=islink,
2207 isexec=isexec, copied=copied)
2206 isexec=isexec, copied=copied)
2208
2207
2209 return getfilectx
2208 return getfilectx
2210
2209
2211 class memctx(committablectx):
2210 class memctx(committablectx):
2212 """Use memctx to perform in-memory commits via localrepo.commitctx().
2211 """Use memctx to perform in-memory commits via localrepo.commitctx().
2213
2212
2214 Revision information is supplied at initialization time while
2213 Revision information is supplied at initialization time while
2215 related files data and is made available through a callback
2214 related files data and is made available through a callback
2216 mechanism. 'repo' is the current localrepo, 'parents' is a
2215 mechanism. 'repo' is the current localrepo, 'parents' is a
2217 sequence of two parent revisions identifiers (pass None for every
2216 sequence of two parent revisions identifiers (pass None for every
2218 missing parent), 'text' is the commit message and 'files' lists
2217 missing parent), 'text' is the commit message and 'files' lists
2219 names of files touched by the revision (normalized and relative to
2218 names of files touched by the revision (normalized and relative to
2220 repository root).
2219 repository root).
2221
2220
2222 filectxfn(repo, memctx, path) is a callable receiving the
2221 filectxfn(repo, memctx, path) is a callable receiving the
2223 repository, the current memctx object and the normalized path of
2222 repository, the current memctx object and the normalized path of
2224 requested file, relative to repository root. It is fired by the
2223 requested file, relative to repository root. It is fired by the
2225 commit function for every file in 'files', but calls order is
2224 commit function for every file in 'files', but calls order is
2226 undefined. If the file is available in the revision being
2225 undefined. If the file is available in the revision being
2227 committed (updated or added), filectxfn returns a memfilectx
2226 committed (updated or added), filectxfn returns a memfilectx
2228 object. If the file was removed, filectxfn return None for recent
2227 object. If the file was removed, filectxfn return None for recent
2229 Mercurial. Moved files are represented by marking the source file
2228 Mercurial. Moved files are represented by marking the source file
2230 removed and the new file added with copy information (see
2229 removed and the new file added with copy information (see
2231 memfilectx).
2230 memfilectx).
2232
2231
2233 user receives the committer name and defaults to current
2232 user receives the committer name and defaults to current
2234 repository username, date is the commit date in any format
2233 repository username, date is the commit date in any format
2235 supported by dateutil.parsedate() and defaults to current date, extra
2234 supported by dateutil.parsedate() and defaults to current date, extra
2236 is a dictionary of metadata or is left empty.
2235 is a dictionary of metadata or is left empty.
2237 """
2236 """
2238
2237
2239 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2238 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2240 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2239 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2241 # this field to determine what to do in filectxfn.
2240 # this field to determine what to do in filectxfn.
2242 _returnnoneformissingfiles = True
2241 _returnnoneformissingfiles = True
2243
2242
2244 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2243 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2245 date=None, extra=None, branch=None, editor=False):
2244 date=None, extra=None, branch=None, editor=False):
2246 super(memctx, self).__init__(repo, text, user, date, extra)
2245 super(memctx, self).__init__(repo, text, user, date, extra)
2247 self._rev = None
2246 self._rev = None
2248 self._node = None
2247 self._node = None
2249 parents = [(p or nullid) for p in parents]
2248 parents = [(p or nullid) for p in parents]
2250 p1, p2 = parents
2249 p1, p2 = parents
2251 self._parents = [self._repo[p] for p in (p1, p2)]
2250 self._parents = [self._repo[p] for p in (p1, p2)]
2252 files = sorted(set(files))
2251 files = sorted(set(files))
2253 self._files = files
2252 self._files = files
2254 if branch is not None:
2253 if branch is not None:
2255 self._extra['branch'] = encoding.fromlocal(branch)
2254 self._extra['branch'] = encoding.fromlocal(branch)
2256 self.substate = {}
2255 self.substate = {}
2257
2256
2258 if isinstance(filectxfn, patch.filestore):
2257 if isinstance(filectxfn, patch.filestore):
2259 filectxfn = memfilefrompatch(filectxfn)
2258 filectxfn = memfilefrompatch(filectxfn)
2260 elif not callable(filectxfn):
2259 elif not callable(filectxfn):
2261 # if store is not callable, wrap it in a function
2260 # if store is not callable, wrap it in a function
2262 filectxfn = memfilefromctx(filectxfn)
2261 filectxfn = memfilefromctx(filectxfn)
2263
2262
2264 # memoizing increases performance for e.g. vcs convert scenarios.
2263 # memoizing increases performance for e.g. vcs convert scenarios.
2265 self._filectxfn = makecachingfilectxfn(filectxfn)
2264 self._filectxfn = makecachingfilectxfn(filectxfn)
2266
2265
2267 if editor:
2266 if editor:
2268 self._text = editor(self._repo, self, [])
2267 self._text = editor(self._repo, self, [])
2269 self._repo.savecommitmessage(self._text)
2268 self._repo.savecommitmessage(self._text)
2270
2269
2271 def filectx(self, path, filelog=None):
2270 def filectx(self, path, filelog=None):
2272 """get a file context from the working directory
2271 """get a file context from the working directory
2273
2272
2274 Returns None if file doesn't exist and should be removed."""
2273 Returns None if file doesn't exist and should be removed."""
2275 return self._filectxfn(self._repo, self, path)
2274 return self._filectxfn(self._repo, self, path)
2276
2275
2277 def commit(self):
2276 def commit(self):
2278 """commit context to the repo"""
2277 """commit context to the repo"""
2279 return self._repo.commitctx(self)
2278 return self._repo.commitctx(self)
2280
2279
2281 @propertycache
2280 @propertycache
2282 def _manifest(self):
2281 def _manifest(self):
2283 """generate a manifest based on the return values of filectxfn"""
2282 """generate a manifest based on the return values of filectxfn"""
2284
2283
2285 # keep this simple for now; just worry about p1
2284 # keep this simple for now; just worry about p1
2286 pctx = self._parents[0]
2285 pctx = self._parents[0]
2287 man = pctx.manifest().copy()
2286 man = pctx.manifest().copy()
2288
2287
2289 for f in self._status.modified:
2288 for f in self._status.modified:
2290 p1node = nullid
2289 man[f] = modifiednodeid
2291 p2node = nullid
2292 p = pctx[f].parents() # if file isn't in pctx, check p2?
2293 if len(p) > 0:
2294 p1node = p[0].filenode()
2295 if len(p) > 1:
2296 p2node = p[1].filenode()
2297 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2298
2290
2299 for f in self._status.added:
2291 for f in self._status.added:
2300 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2292 man[f] = addednodeid
2301
2293
2302 for f in self._status.removed:
2294 for f in self._status.removed:
2303 if f in man:
2295 if f in man:
2304 del man[f]
2296 del man[f]
2305
2297
2306 return man
2298 return man
2307
2299
2308 @propertycache
2300 @propertycache
2309 def _status(self):
2301 def _status(self):
2310 """Calculate exact status from ``files`` specified at construction
2302 """Calculate exact status from ``files`` specified at construction
2311 """
2303 """
2312 man1 = self.p1().manifest()
2304 man1 = self.p1().manifest()
2313 p2 = self._parents[1]
2305 p2 = self._parents[1]
2314 # "1 < len(self._parents)" can't be used for checking
2306 # "1 < len(self._parents)" can't be used for checking
2315 # existence of the 2nd parent, because "memctx._parents" is
2307 # existence of the 2nd parent, because "memctx._parents" is
2316 # explicitly initialized by the list, of which length is 2.
2308 # explicitly initialized by the list, of which length is 2.
2317 if p2.node() != nullid:
2309 if p2.node() != nullid:
2318 man2 = p2.manifest()
2310 man2 = p2.manifest()
2319 managing = lambda f: f in man1 or f in man2
2311 managing = lambda f: f in man1 or f in man2
2320 else:
2312 else:
2321 managing = lambda f: f in man1
2313 managing = lambda f: f in man1
2322
2314
2323 modified, added, removed = [], [], []
2315 modified, added, removed = [], [], []
2324 for f in self._files:
2316 for f in self._files:
2325 if not managing(f):
2317 if not managing(f):
2326 added.append(f)
2318 added.append(f)
2327 elif self[f]:
2319 elif self[f]:
2328 modified.append(f)
2320 modified.append(f)
2329 else:
2321 else:
2330 removed.append(f)
2322 removed.append(f)
2331
2323
2332 return scmutil.status(modified, added, removed, [], [], [], [])
2324 return scmutil.status(modified, added, removed, [], [], [], [])
2333
2325
2334 class memfilectx(committablefilectx):
2326 class memfilectx(committablefilectx):
2335 """memfilectx represents an in-memory file to commit.
2327 """memfilectx represents an in-memory file to commit.
2336
2328
2337 See memctx and committablefilectx for more details.
2329 See memctx and committablefilectx for more details.
2338 """
2330 """
2339 def __init__(self, repo, changectx, path, data, islink=False,
2331 def __init__(self, repo, changectx, path, data, islink=False,
2340 isexec=False, copied=None):
2332 isexec=False, copied=None):
2341 """
2333 """
2342 path is the normalized file path relative to repository root.
2334 path is the normalized file path relative to repository root.
2343 data is the file content as a string.
2335 data is the file content as a string.
2344 islink is True if the file is a symbolic link.
2336 islink is True if the file is a symbolic link.
2345 isexec is True if the file is executable.
2337 isexec is True if the file is executable.
2346 copied is the source file path if current file was copied in the
2338 copied is the source file path if current file was copied in the
2347 revision being committed, or None."""
2339 revision being committed, or None."""
2348 super(memfilectx, self).__init__(repo, path, None, changectx)
2340 super(memfilectx, self).__init__(repo, path, None, changectx)
2349 self._data = data
2341 self._data = data
2350 if islink:
2342 if islink:
2351 self._flags = 'l'
2343 self._flags = 'l'
2352 elif isexec:
2344 elif isexec:
2353 self._flags = 'x'
2345 self._flags = 'x'
2354 else:
2346 else:
2355 self._flags = ''
2347 self._flags = ''
2356 self._copied = None
2348 self._copied = None
2357 if copied:
2349 if copied:
2358 self._copied = (copied, nullid)
2350 self._copied = (copied, nullid)
2359
2351
2360 def data(self):
2352 def data(self):
2361 return self._data
2353 return self._data
2362
2354
2363 def remove(self, ignoremissing=False):
2355 def remove(self, ignoremissing=False):
2364 """wraps unlink for a repo's working directory"""
2356 """wraps unlink for a repo's working directory"""
2365 # need to figure out what to do here
2357 # need to figure out what to do here
2366 del self._changectx[self._path]
2358 del self._changectx[self._path]
2367
2359
2368 def write(self, data, flags, **kwargs):
2360 def write(self, data, flags, **kwargs):
2369 """wraps repo.wwrite"""
2361 """wraps repo.wwrite"""
2370 self._data = data
2362 self._data = data
2371
2363
2372
2364
2373 class metadataonlyctx(committablectx):
2365 class metadataonlyctx(committablectx):
2374 """Like memctx but it's reusing the manifest of different commit.
2366 """Like memctx but it's reusing the manifest of different commit.
2375 Intended to be used by lightweight operations that are creating
2367 Intended to be used by lightweight operations that are creating
2376 metadata-only changes.
2368 metadata-only changes.
2377
2369
2378 Revision information is supplied at initialization time. 'repo' is the
2370 Revision information is supplied at initialization time. 'repo' is the
2379 current localrepo, 'ctx' is original revision which manifest we're reuisng
2371 current localrepo, 'ctx' is original revision which manifest we're reuisng
2380 'parents' is a sequence of two parent revisions identifiers (pass None for
2372 'parents' is a sequence of two parent revisions identifiers (pass None for
2381 every missing parent), 'text' is the commit.
2373 every missing parent), 'text' is the commit.
2382
2374
2383 user receives the committer name and defaults to current repository
2375 user receives the committer name and defaults to current repository
2384 username, date is the commit date in any format supported by
2376 username, date is the commit date in any format supported by
2385 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2377 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2386 metadata or is left empty.
2378 metadata or is left empty.
2387 """
2379 """
2388 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2380 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2389 date=None, extra=None, editor=False):
2381 date=None, extra=None, editor=False):
2390 if text is None:
2382 if text is None:
2391 text = originalctx.description()
2383 text = originalctx.description()
2392 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2384 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2393 self._rev = None
2385 self._rev = None
2394 self._node = None
2386 self._node = None
2395 self._originalctx = originalctx
2387 self._originalctx = originalctx
2396 self._manifestnode = originalctx.manifestnode()
2388 self._manifestnode = originalctx.manifestnode()
2397 if parents is None:
2389 if parents is None:
2398 parents = originalctx.parents()
2390 parents = originalctx.parents()
2399 else:
2391 else:
2400 parents = [repo[p] for p in parents if p is not None]
2392 parents = [repo[p] for p in parents if p is not None]
2401 parents = parents[:]
2393 parents = parents[:]
2402 while len(parents) < 2:
2394 while len(parents) < 2:
2403 parents.append(repo[nullid])
2395 parents.append(repo[nullid])
2404 p1, p2 = self._parents = parents
2396 p1, p2 = self._parents = parents
2405
2397
2406 # sanity check to ensure that the reused manifest parents are
2398 # sanity check to ensure that the reused manifest parents are
2407 # manifests of our commit parents
2399 # manifests of our commit parents
2408 mp1, mp2 = self.manifestctx().parents
2400 mp1, mp2 = self.manifestctx().parents
2409 if p1 != nullid and p1.manifestnode() != mp1:
2401 if p1 != nullid and p1.manifestnode() != mp1:
2410 raise RuntimeError('can\'t reuse the manifest: '
2402 raise RuntimeError('can\'t reuse the manifest: '
2411 'its p1 doesn\'t match the new ctx p1')
2403 'its p1 doesn\'t match the new ctx p1')
2412 if p2 != nullid and p2.manifestnode() != mp2:
2404 if p2 != nullid and p2.manifestnode() != mp2:
2413 raise RuntimeError('can\'t reuse the manifest: '
2405 raise RuntimeError('can\'t reuse the manifest: '
2414 'its p2 doesn\'t match the new ctx p2')
2406 'its p2 doesn\'t match the new ctx p2')
2415
2407
2416 self._files = originalctx.files()
2408 self._files = originalctx.files()
2417 self.substate = {}
2409 self.substate = {}
2418
2410
2419 if editor:
2411 if editor:
2420 self._text = editor(self._repo, self, [])
2412 self._text = editor(self._repo, self, [])
2421 self._repo.savecommitmessage(self._text)
2413 self._repo.savecommitmessage(self._text)
2422
2414
2423 def manifestnode(self):
2415 def manifestnode(self):
2424 return self._manifestnode
2416 return self._manifestnode
2425
2417
2426 @property
2418 @property
2427 def _manifestctx(self):
2419 def _manifestctx(self):
2428 return self._repo.manifestlog[self._manifestnode]
2420 return self._repo.manifestlog[self._manifestnode]
2429
2421
2430 def filectx(self, path, filelog=None):
2422 def filectx(self, path, filelog=None):
2431 return self._originalctx.filectx(path, filelog=filelog)
2423 return self._originalctx.filectx(path, filelog=filelog)
2432
2424
2433 def commit(self):
2425 def commit(self):
2434 """commit context to the repo"""
2426 """commit context to the repo"""
2435 return self._repo.commitctx(self)
2427 return self._repo.commitctx(self)
2436
2428
2437 @property
2429 @property
2438 def _manifest(self):
2430 def _manifest(self):
2439 return self._originalctx.manifest()
2431 return self._originalctx.manifest()
2440
2432
2441 @propertycache
2433 @propertycache
2442 def _status(self):
2434 def _status(self):
2443 """Calculate exact status from ``files`` specified in the ``origctx``
2435 """Calculate exact status from ``files`` specified in the ``origctx``
2444 and parents manifests.
2436 and parents manifests.
2445 """
2437 """
2446 man1 = self.p1().manifest()
2438 man1 = self.p1().manifest()
2447 p2 = self._parents[1]
2439 p2 = self._parents[1]
2448 # "1 < len(self._parents)" can't be used for checking
2440 # "1 < len(self._parents)" can't be used for checking
2449 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2441 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2450 # explicitly initialized by the list, of which length is 2.
2442 # explicitly initialized by the list, of which length is 2.
2451 if p2.node() != nullid:
2443 if p2.node() != nullid:
2452 man2 = p2.manifest()
2444 man2 = p2.manifest()
2453 managing = lambda f: f in man1 or f in man2
2445 managing = lambda f: f in man1 or f in man2
2454 else:
2446 else:
2455 managing = lambda f: f in man1
2447 managing = lambda f: f in man1
2456
2448
2457 modified, added, removed = [], [], []
2449 modified, added, removed = [], [], []
2458 for f in self._files:
2450 for f in self._files:
2459 if not managing(f):
2451 if not managing(f):
2460 added.append(f)
2452 added.append(f)
2461 elif f in self:
2453 elif f in self:
2462 modified.append(f)
2454 modified.append(f)
2463 else:
2455 else:
2464 removed.append(f)
2456 removed.append(f)
2465
2457
2466 return scmutil.status(modified, added, removed, [], [], [], [])
2458 return scmutil.status(modified, added, removed, [], [], [], [])
2467
2459
2468 class arbitraryfilectx(object):
2460 class arbitraryfilectx(object):
2469 """Allows you to use filectx-like functions on a file in an arbitrary
2461 """Allows you to use filectx-like functions on a file in an arbitrary
2470 location on disk, possibly not in the working directory.
2462 location on disk, possibly not in the working directory.
2471 """
2463 """
2472 def __init__(self, path, repo=None):
2464 def __init__(self, path, repo=None):
2473 # Repo is optional because contrib/simplemerge uses this class.
2465 # Repo is optional because contrib/simplemerge uses this class.
2474 self._repo = repo
2466 self._repo = repo
2475 self._path = path
2467 self._path = path
2476
2468
2477 def cmp(self, fctx):
2469 def cmp(self, fctx):
2478 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2470 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2479 # path if either side is a symlink.
2471 # path if either side is a symlink.
2480 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2472 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2481 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2473 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2482 # Add a fast-path for merge if both sides are disk-backed.
2474 # Add a fast-path for merge if both sides are disk-backed.
2483 # Note that filecmp uses the opposite return values (True if same)
2475 # Note that filecmp uses the opposite return values (True if same)
2484 # from our cmp functions (True if different).
2476 # from our cmp functions (True if different).
2485 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2477 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2486 return self.data() != fctx.data()
2478 return self.data() != fctx.data()
2487
2479
2488 def path(self):
2480 def path(self):
2489 return self._path
2481 return self._path
2490
2482
2491 def flags(self):
2483 def flags(self):
2492 return ''
2484 return ''
2493
2485
2494 def data(self):
2486 def data(self):
2495 return util.readfile(self._path)
2487 return util.readfile(self._path)
2496
2488
2497 def decodeddata(self):
2489 def decodeddata(self):
2498 with open(self._path, "rb") as f:
2490 with open(self._path, "rb") as f:
2499 return f.read()
2491 return f.read()
2500
2492
2501 def remove(self):
2493 def remove(self):
2502 util.unlink(self._path)
2494 util.unlink(self._path)
2503
2495
2504 def write(self, data, flags, **kwargs):
2496 def write(self, data, flags, **kwargs):
2505 assert not flags
2497 assert not flags
2506 with open(self._path, "w") as f:
2498 with open(self._path, "w") as f:
2507 f.write(data)
2499 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now