##// END OF EJS Templates
context: name files relative to cwd in warning messages...
Matt Harbison -
r33501:7008f681 default
parent child Browse files
Show More
@@ -1,2322 +1,2330 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 phases,
37 phases,
37 pycompat,
38 pycompat,
38 repoview,
39 repoview,
39 revlog,
40 revlog,
40 scmutil,
41 scmutil,
41 sparse,
42 sparse,
42 subrepo,
43 subrepo,
43 util,
44 util,
44 )
45 )
45
46
46 propertycache = util.propertycache
47 propertycache = util.propertycache
47
48
48 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49
50
50 class basectx(object):
51 class basectx(object):
51 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
52 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
53 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
54 be committed,
55 be committed,
55 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
56 be committed."""
57 be committed."""
57 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
59 return changeid
60 return changeid
60
61
61 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
62
63
63 o._repo = repo
64 o._repo = repo
64 o._rev = nullrev
65 o._rev = nullrev
65 o._node = nullid
66 o._node = nullid
66
67
67 return o
68 return o
68
69
69 def __bytes__(self):
70 def __bytes__(self):
70 return short(self.node())
71 return short(self.node())
71
72
72 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
73
74
74 def __int__(self):
75 def __int__(self):
75 return self.rev()
76 return self.rev()
76
77
77 def __repr__(self):
78 def __repr__(self):
78 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
79
80
80 def __eq__(self, other):
81 def __eq__(self, other):
81 try:
82 try:
82 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
83 except AttributeError:
84 except AttributeError:
84 return False
85 return False
85
86
86 def __ne__(self, other):
87 def __ne__(self, other):
87 return not (self == other)
88 return not (self == other)
88
89
89 def __contains__(self, key):
90 def __contains__(self, key):
90 return key in self._manifest
91 return key in self._manifest
91
92
92 def __getitem__(self, key):
93 def __getitem__(self, key):
93 return self.filectx(key)
94 return self.filectx(key)
94
95
95 def __iter__(self):
96 def __iter__(self):
96 return iter(self._manifest)
97 return iter(self._manifest)
97
98
98 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
99 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
100 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
101 the normal manifest."""
102 the normal manifest."""
102 return self.manifest()
103 return self.manifest()
103
104
104 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
105 """return match.always if match is none
106 """return match.always if match is none
106
107
107 This internal method provides a way for child objects to override the
108 This internal method provides a way for child objects to override the
108 match operator.
109 match operator.
109 """
110 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
112
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
114 listunknown):
114 """build a status with respect to another context"""
115 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
116 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
121 # delta application.
121 mf2 = None
122 mf2 = None
122 if self.rev() is not None and self.rev() < other.rev():
123 if self.rev() is not None and self.rev() < other.rev():
123 mf2 = self._buildstatusmanifest(s)
124 mf2 = self._buildstatusmanifest(s)
124 mf1 = other._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
125 if mf2 is None:
126 if mf2 is None:
126 mf2 = self._buildstatusmanifest(s)
127 mf2 = self._buildstatusmanifest(s)
127
128
128 modified, added = [], []
129 modified, added = [], []
129 removed = []
130 removed = []
130 clean = []
131 clean = []
131 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deletedset = set(deleted)
133 deletedset = set(deleted)
133 d = mf1.diff(mf2, match=match, clean=listclean)
134 d = mf1.diff(mf2, match=match, clean=listclean)
134 for fn, value in d.iteritems():
135 for fn, value in d.iteritems():
135 if fn in deletedset:
136 if fn in deletedset:
136 continue
137 continue
137 if value is None:
138 if value is None:
138 clean.append(fn)
139 clean.append(fn)
139 continue
140 continue
140 (node1, flag1), (node2, flag2) = value
141 (node1, flag1), (node2, flag2) = value
141 if node1 is None:
142 if node1 is None:
142 added.append(fn)
143 added.append(fn)
143 elif node2 is None:
144 elif node2 is None:
144 removed.append(fn)
145 removed.append(fn)
145 elif flag1 != flag2:
146 elif flag1 != flag2:
146 modified.append(fn)
147 modified.append(fn)
147 elif node2 not in wdirnodes:
148 elif node2 not in wdirnodes:
148 # When comparing files between two commits, we save time by
149 # When comparing files between two commits, we save time by
149 # not comparing the file contents when the nodeids differ.
150 # not comparing the file contents when the nodeids differ.
150 # Note that this means we incorrectly report a reverted change
151 # Note that this means we incorrectly report a reverted change
151 # to a file as a modification.
152 # to a file as a modification.
152 modified.append(fn)
153 modified.append(fn)
153 elif self[fn].cmp(other[fn]):
154 elif self[fn].cmp(other[fn]):
154 modified.append(fn)
155 modified.append(fn)
155 else:
156 else:
156 clean.append(fn)
157 clean.append(fn)
157
158
158 if removed:
159 if removed:
159 # need to filter files if they are already reported as removed
160 # need to filter files if they are already reported as removed
160 unknown = [fn for fn in unknown if fn not in mf1 and
161 unknown = [fn for fn in unknown if fn not in mf1 and
161 (not match or match(fn))]
162 (not match or match(fn))]
162 ignored = [fn for fn in ignored if fn not in mf1 and
163 ignored = [fn for fn in ignored if fn not in mf1 and
163 (not match or match(fn))]
164 (not match or match(fn))]
164 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
165 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
166
167
167 return scmutil.status(modified, added, removed, deleted, unknown,
168 return scmutil.status(modified, added, removed, deleted, unknown,
168 ignored, clean)
169 ignored, clean)
169
170
170 @propertycache
171 @propertycache
171 def substate(self):
172 def substate(self):
172 return subrepo.state(self, self._repo.ui)
173 return subrepo.state(self, self._repo.ui)
173
174
174 def subrev(self, subpath):
175 def subrev(self, subpath):
175 return self.substate[subpath][1]
176 return self.substate[subpath][1]
176
177
177 def rev(self):
178 def rev(self):
178 return self._rev
179 return self._rev
179 def node(self):
180 def node(self):
180 return self._node
181 return self._node
181 def hex(self):
182 def hex(self):
182 return hex(self.node())
183 return hex(self.node())
183 def manifest(self):
184 def manifest(self):
184 return self._manifest
185 return self._manifest
185 def manifestctx(self):
186 def manifestctx(self):
186 return self._manifestctx
187 return self._manifestctx
187 def repo(self):
188 def repo(self):
188 return self._repo
189 return self._repo
189 def phasestr(self):
190 def phasestr(self):
190 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
191 def mutable(self):
192 def mutable(self):
192 return self.phase() > phases.public
193 return self.phase() > phases.public
193
194
194 def getfileset(self, expr):
195 def getfileset(self, expr):
195 return fileset.getfileset(self, expr)
196 return fileset.getfileset(self, expr)
196
197
197 def obsolete(self):
198 def obsolete(self):
198 """True if the changeset is obsolete"""
199 """True if the changeset is obsolete"""
199 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200
201
201 def extinct(self):
202 def extinct(self):
202 """True if the changeset is extinct"""
203 """True if the changeset is extinct"""
203 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204
205
205 def unstable(self):
206 def unstable(self):
206 """True if the changeset is not obsolete but it's ancestor are"""
207 """True if the changeset is not obsolete but it's ancestor are"""
207 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
208 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
208
209
209 def bumped(self):
210 def bumped(self):
210 """True if the changeset try to be a successor of a public changeset
211 """True if the changeset try to be a successor of a public changeset
211
212
212 Only non-public and non-obsolete changesets may be bumped.
213 Only non-public and non-obsolete changesets may be bumped.
213 """
214 """
214 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
215 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
215
216
216 def divergent(self):
217 def divergent(self):
217 """Is a successors of a changeset with multiple possible successors set
218 """Is a successors of a changeset with multiple possible successors set
218
219
219 Only non-public and non-obsolete changesets may be divergent.
220 Only non-public and non-obsolete changesets may be divergent.
220 """
221 """
221 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
222 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
222
223
223 def troubled(self):
224 def troubled(self):
224 """True if the changeset is either unstable, bumped or divergent"""
225 """True if the changeset is either unstable, bumped or divergent"""
225 return self.unstable() or self.bumped() or self.divergent()
226 return self.unstable() or self.bumped() or self.divergent()
226
227
227 def troubles(self):
228 def troubles(self):
228 """return the list of troubles affecting this changesets.
229 """return the list of troubles affecting this changesets.
229
230
230 Troubles are returned as strings. possible values are:
231 Troubles are returned as strings. possible values are:
231 - unstable,
232 - unstable,
232 - bumped,
233 - bumped,
233 - divergent.
234 - divergent.
234 """
235 """
235 troubles = []
236 troubles = []
236 if self.unstable():
237 if self.unstable():
237 troubles.append('unstable')
238 troubles.append('unstable')
238 if self.bumped():
239 if self.bumped():
239 troubles.append('bumped')
240 troubles.append('bumped')
240 if self.divergent():
241 if self.divergent():
241 troubles.append('divergent')
242 troubles.append('divergent')
242 return troubles
243 return troubles
243
244
244 def parents(self):
245 def parents(self):
245 """return contexts for each parent changeset"""
246 """return contexts for each parent changeset"""
246 return self._parents
247 return self._parents
247
248
248 def p1(self):
249 def p1(self):
249 return self._parents[0]
250 return self._parents[0]
250
251
251 def p2(self):
252 def p2(self):
252 parents = self._parents
253 parents = self._parents
253 if len(parents) == 2:
254 if len(parents) == 2:
254 return parents[1]
255 return parents[1]
255 return changectx(self._repo, nullrev)
256 return changectx(self._repo, nullrev)
256
257
257 def _fileinfo(self, path):
258 def _fileinfo(self, path):
258 if r'_manifest' in self.__dict__:
259 if r'_manifest' in self.__dict__:
259 try:
260 try:
260 return self._manifest[path], self._manifest.flags(path)
261 return self._manifest[path], self._manifest.flags(path)
261 except KeyError:
262 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
263 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
264 _('not found in manifest'))
264 if r'_manifestdelta' in self.__dict__ or path in self.files():
265 if r'_manifestdelta' in self.__dict__ or path in self.files():
265 if path in self._manifestdelta:
266 if path in self._manifestdelta:
266 return (self._manifestdelta[path],
267 return (self._manifestdelta[path],
267 self._manifestdelta.flags(path))
268 self._manifestdelta.flags(path))
268 mfl = self._repo.manifestlog
269 mfl = self._repo.manifestlog
269 try:
270 try:
270 node, flag = mfl[self._changeset.manifest].find(path)
271 node, flag = mfl[self._changeset.manifest].find(path)
271 except KeyError:
272 except KeyError:
272 raise error.ManifestLookupError(self._node, path,
273 raise error.ManifestLookupError(self._node, path,
273 _('not found in manifest'))
274 _('not found in manifest'))
274
275
275 return node, flag
276 return node, flag
276
277
277 def filenode(self, path):
278 def filenode(self, path):
278 return self._fileinfo(path)[0]
279 return self._fileinfo(path)[0]
279
280
280 def flags(self, path):
281 def flags(self, path):
281 try:
282 try:
282 return self._fileinfo(path)[1]
283 return self._fileinfo(path)[1]
283 except error.LookupError:
284 except error.LookupError:
284 return ''
285 return ''
285
286
286 def sub(self, path, allowcreate=True):
287 def sub(self, path, allowcreate=True):
287 '''return a subrepo for the stored revision of path, never wdir()'''
288 '''return a subrepo for the stored revision of path, never wdir()'''
288 return subrepo.subrepo(self, path, allowcreate=allowcreate)
289 return subrepo.subrepo(self, path, allowcreate=allowcreate)
289
290
290 def nullsub(self, path, pctx):
291 def nullsub(self, path, pctx):
291 return subrepo.nullsubrepo(self, path, pctx)
292 return subrepo.nullsubrepo(self, path, pctx)
292
293
293 def workingsub(self, path):
294 def workingsub(self, path):
294 '''return a subrepo for the stored revision, or wdir if this is a wdir
295 '''return a subrepo for the stored revision, or wdir if this is a wdir
295 context.
296 context.
296 '''
297 '''
297 return subrepo.subrepo(self, path, allowwdir=True)
298 return subrepo.subrepo(self, path, allowwdir=True)
298
299
299 def match(self, pats=None, include=None, exclude=None, default='glob',
300 def match(self, pats=None, include=None, exclude=None, default='glob',
300 listsubrepos=False, badfn=None):
301 listsubrepos=False, badfn=None):
301 r = self._repo
302 r = self._repo
302 return matchmod.match(r.root, r.getcwd(), pats,
303 return matchmod.match(r.root, r.getcwd(), pats,
303 include, exclude, default,
304 include, exclude, default,
304 auditor=r.nofsauditor, ctx=self,
305 auditor=r.nofsauditor, ctx=self,
305 listsubrepos=listsubrepos, badfn=badfn)
306 listsubrepos=listsubrepos, badfn=badfn)
306
307
307 def diff(self, ctx2=None, match=None, **opts):
308 def diff(self, ctx2=None, match=None, **opts):
308 """Returns a diff generator for the given contexts and matcher"""
309 """Returns a diff generator for the given contexts and matcher"""
309 if ctx2 is None:
310 if ctx2 is None:
310 ctx2 = self.p1()
311 ctx2 = self.p1()
311 if ctx2 is not None:
312 if ctx2 is not None:
312 ctx2 = self._repo[ctx2]
313 ctx2 = self._repo[ctx2]
313 diffopts = patch.diffopts(self._repo.ui, opts)
314 diffopts = patch.diffopts(self._repo.ui, opts)
314 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
315 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
315
316
316 def dirs(self):
317 def dirs(self):
317 return self._manifest.dirs()
318 return self._manifest.dirs()
318
319
319 def hasdir(self, dir):
320 def hasdir(self, dir):
320 return self._manifest.hasdir(dir)
321 return self._manifest.hasdir(dir)
321
322
322 def status(self, other=None, match=None, listignored=False,
323 def status(self, other=None, match=None, listignored=False,
323 listclean=False, listunknown=False, listsubrepos=False):
324 listclean=False, listunknown=False, listsubrepos=False):
324 """return status of files between two nodes or node and working
325 """return status of files between two nodes or node and working
325 directory.
326 directory.
326
327
327 If other is None, compare this node with working directory.
328 If other is None, compare this node with working directory.
328
329
329 returns (modified, added, removed, deleted, unknown, ignored, clean)
330 returns (modified, added, removed, deleted, unknown, ignored, clean)
330 """
331 """
331
332
332 ctx1 = self
333 ctx1 = self
333 ctx2 = self._repo[other]
334 ctx2 = self._repo[other]
334
335
335 # This next code block is, admittedly, fragile logic that tests for
336 # This next code block is, admittedly, fragile logic that tests for
336 # reversing the contexts and wouldn't need to exist if it weren't for
337 # reversing the contexts and wouldn't need to exist if it weren't for
337 # the fast (and common) code path of comparing the working directory
338 # the fast (and common) code path of comparing the working directory
338 # with its first parent.
339 # with its first parent.
339 #
340 #
340 # What we're aiming for here is the ability to call:
341 # What we're aiming for here is the ability to call:
341 #
342 #
342 # workingctx.status(parentctx)
343 # workingctx.status(parentctx)
343 #
344 #
344 # If we always built the manifest for each context and compared those,
345 # If we always built the manifest for each context and compared those,
345 # then we'd be done. But the special case of the above call means we
346 # then we'd be done. But the special case of the above call means we
346 # just copy the manifest of the parent.
347 # just copy the manifest of the parent.
347 reversed = False
348 reversed = False
348 if (not isinstance(ctx1, changectx)
349 if (not isinstance(ctx1, changectx)
349 and isinstance(ctx2, changectx)):
350 and isinstance(ctx2, changectx)):
350 reversed = True
351 reversed = True
351 ctx1, ctx2 = ctx2, ctx1
352 ctx1, ctx2 = ctx2, ctx1
352
353
353 match = ctx2._matchstatus(ctx1, match)
354 match = ctx2._matchstatus(ctx1, match)
354 r = scmutil.status([], [], [], [], [], [], [])
355 r = scmutil.status([], [], [], [], [], [], [])
355 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
356 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
356 listunknown)
357 listunknown)
357
358
358 if reversed:
359 if reversed:
359 # Reverse added and removed. Clear deleted, unknown and ignored as
360 # Reverse added and removed. Clear deleted, unknown and ignored as
360 # these make no sense to reverse.
361 # these make no sense to reverse.
361 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
362 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
362 r.clean)
363 r.clean)
363
364
364 if listsubrepos:
365 if listsubrepos:
365 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
366 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
366 try:
367 try:
367 rev2 = ctx2.subrev(subpath)
368 rev2 = ctx2.subrev(subpath)
368 except KeyError:
369 except KeyError:
369 # A subrepo that existed in node1 was deleted between
370 # A subrepo that existed in node1 was deleted between
370 # node1 and node2 (inclusive). Thus, ctx2's substate
371 # node1 and node2 (inclusive). Thus, ctx2's substate
371 # won't contain that subpath. The best we can do ignore it.
372 # won't contain that subpath. The best we can do ignore it.
372 rev2 = None
373 rev2 = None
373 submatch = matchmod.subdirmatcher(subpath, match)
374 submatch = matchmod.subdirmatcher(subpath, match)
374 s = sub.status(rev2, match=submatch, ignored=listignored,
375 s = sub.status(rev2, match=submatch, ignored=listignored,
375 clean=listclean, unknown=listunknown,
376 clean=listclean, unknown=listunknown,
376 listsubrepos=True)
377 listsubrepos=True)
377 for rfiles, sfiles in zip(r, s):
378 for rfiles, sfiles in zip(r, s):
378 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
379 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
379
380
380 for l in r:
381 for l in r:
381 l.sort()
382 l.sort()
382
383
383 return r
384 return r
384
385
385 def _filterederror(repo, changeid):
386 def _filterederror(repo, changeid):
386 """build an exception to be raised about a filtered changeid
387 """build an exception to be raised about a filtered changeid
387
388
388 This is extracted in a function to help extensions (eg: evolve) to
389 This is extracted in a function to help extensions (eg: evolve) to
389 experiment with various message variants."""
390 experiment with various message variants."""
390 if repo.filtername.startswith('visible'):
391 if repo.filtername.startswith('visible'):
391 msg = _("hidden revision '%s'") % changeid
392 msg = _("hidden revision '%s'") % changeid
392 hint = _('use --hidden to access hidden revisions')
393 hint = _('use --hidden to access hidden revisions')
393 return error.FilteredRepoLookupError(msg, hint=hint)
394 return error.FilteredRepoLookupError(msg, hint=hint)
394 msg = _("filtered revision '%s' (not in '%s' subset)")
395 msg = _("filtered revision '%s' (not in '%s' subset)")
395 msg %= (changeid, repo.filtername)
396 msg %= (changeid, repo.filtername)
396 return error.FilteredRepoLookupError(msg)
397 return error.FilteredRepoLookupError(msg)
397
398
398 class changectx(basectx):
399 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
400 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
401 changeset convenient. It represents a read-only context already present in
401 the repo."""
402 the repo."""
402 def __init__(self, repo, changeid=''):
403 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
404 """changeid is a revision number, node, or tag"""
404
405
405 # since basectx.__new__ already took care of copying the object, we
406 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
407 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
408 if isinstance(changeid, basectx):
408 return
409 return
409
410
410 if changeid == '':
411 if changeid == '':
411 changeid = '.'
412 changeid = '.'
412 self._repo = repo
413 self._repo = repo
413
414
414 try:
415 try:
415 if isinstance(changeid, int):
416 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
417 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
418 self._rev = changeid
418 return
419 return
419 if not pycompat.ispy3 and isinstance(changeid, long):
420 if not pycompat.ispy3 and isinstance(changeid, long):
420 changeid = str(changeid)
421 changeid = str(changeid)
421 if changeid == 'null':
422 if changeid == 'null':
422 self._node = nullid
423 self._node = nullid
423 self._rev = nullrev
424 self._rev = nullrev
424 return
425 return
425 if changeid == 'tip':
426 if changeid == 'tip':
426 self._node = repo.changelog.tip()
427 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
428 self._rev = repo.changelog.rev(self._node)
428 return
429 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
431 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
432 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
433 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
435 return
435 if len(changeid) == 20:
436 if len(changeid) == 20:
436 try:
437 try:
437 self._node = changeid
438 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
439 self._rev = repo.changelog.rev(changeid)
439 return
440 return
440 except error.FilteredRepoLookupError:
441 except error.FilteredRepoLookupError:
441 raise
442 raise
442 except LookupError:
443 except LookupError:
443 pass
444 pass
444
445
445 try:
446 try:
446 r = int(changeid)
447 r = int(changeid)
447 if '%d' % r != changeid:
448 if '%d' % r != changeid:
448 raise ValueError
449 raise ValueError
449 l = len(repo.changelog)
450 l = len(repo.changelog)
450 if r < 0:
451 if r < 0:
451 r += l
452 r += l
452 if r < 0 or r >= l and r != wdirrev:
453 if r < 0 or r >= l and r != wdirrev:
453 raise ValueError
454 raise ValueError
454 self._rev = r
455 self._rev = r
455 self._node = repo.changelog.node(r)
456 self._node = repo.changelog.node(r)
456 return
457 return
457 except error.FilteredIndexError:
458 except error.FilteredIndexError:
458 raise
459 raise
459 except (ValueError, OverflowError, IndexError):
460 except (ValueError, OverflowError, IndexError):
460 pass
461 pass
461
462
462 if len(changeid) == 40:
463 if len(changeid) == 40:
463 try:
464 try:
464 self._node = bin(changeid)
465 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
466 self._rev = repo.changelog.rev(self._node)
466 return
467 return
467 except error.FilteredLookupError:
468 except error.FilteredLookupError:
468 raise
469 raise
469 except (TypeError, LookupError):
470 except (TypeError, LookupError):
470 pass
471 pass
471
472
472 # lookup bookmarks through the name interface
473 # lookup bookmarks through the name interface
473 try:
474 try:
474 self._node = repo.names.singlenode(repo, changeid)
475 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
476 self._rev = repo.changelog.rev(self._node)
476 return
477 return
477 except KeyError:
478 except KeyError:
478 pass
479 pass
479 except error.FilteredRepoLookupError:
480 except error.FilteredRepoLookupError:
480 raise
481 raise
481 except error.RepoLookupError:
482 except error.RepoLookupError:
482 pass
483 pass
483
484
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
486 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
487 self._rev = repo.changelog.rev(self._node)
487 return
488 return
488
489
489 # lookup failed
490 # lookup failed
490 # check if it might have come from damaged dirstate
491 # check if it might have come from damaged dirstate
491 #
492 #
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
494 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
495 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
496 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
497 raise error.Abort(msg % short(changeid))
497 try:
498 try:
498 if len(changeid) == 20 and nonascii(changeid):
499 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
500 changeid = hex(changeid)
500 except TypeError:
501 except TypeError:
501 pass
502 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
504 error.FilteredRepoLookupError):
504 raise _filterederror(repo, changeid)
505 raise _filterederror(repo, changeid)
505 except IndexError:
506 except IndexError:
506 pass
507 pass
507 raise error.RepoLookupError(
508 raise error.RepoLookupError(
508 _("unknown revision '%s'") % changeid)
509 _("unknown revision '%s'") % changeid)
509
510
510 def __hash__(self):
511 def __hash__(self):
511 try:
512 try:
512 return hash(self._rev)
513 return hash(self._rev)
513 except AttributeError:
514 except AttributeError:
514 return id(self)
515 return id(self)
515
516
516 def __nonzero__(self):
517 def __nonzero__(self):
517 return self._rev != nullrev
518 return self._rev != nullrev
518
519
519 __bool__ = __nonzero__
520 __bool__ = __nonzero__
520
521
521 @propertycache
522 @propertycache
522 def _changeset(self):
523 def _changeset(self):
523 return self._repo.changelog.changelogrevision(self.rev())
524 return self._repo.changelog.changelogrevision(self.rev())
524
525
525 @propertycache
526 @propertycache
526 def _manifest(self):
527 def _manifest(self):
527 return self._manifestctx.read()
528 return self._manifestctx.read()
528
529
529 @property
530 @property
530 def _manifestctx(self):
531 def _manifestctx(self):
531 return self._repo.manifestlog[self._changeset.manifest]
532 return self._repo.manifestlog[self._changeset.manifest]
532
533
533 @propertycache
534 @propertycache
534 def _manifestdelta(self):
535 def _manifestdelta(self):
535 return self._manifestctx.readdelta()
536 return self._manifestctx.readdelta()
536
537
537 @propertycache
538 @propertycache
538 def _parents(self):
539 def _parents(self):
539 repo = self._repo
540 repo = self._repo
540 p1, p2 = repo.changelog.parentrevs(self._rev)
541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 if p2 == nullrev:
542 if p2 == nullrev:
542 return [changectx(repo, p1)]
543 return [changectx(repo, p1)]
543 return [changectx(repo, p1), changectx(repo, p2)]
544 return [changectx(repo, p1), changectx(repo, p2)]
544
545
545 def changeset(self):
546 def changeset(self):
546 c = self._changeset
547 c = self._changeset
547 return (
548 return (
548 c.manifest,
549 c.manifest,
549 c.user,
550 c.user,
550 c.date,
551 c.date,
551 c.files,
552 c.files,
552 c.description,
553 c.description,
553 c.extra,
554 c.extra,
554 )
555 )
555 def manifestnode(self):
556 def manifestnode(self):
556 return self._changeset.manifest
557 return self._changeset.manifest
557
558
558 def user(self):
559 def user(self):
559 return self._changeset.user
560 return self._changeset.user
560 def date(self):
561 def date(self):
561 return self._changeset.date
562 return self._changeset.date
562 def files(self):
563 def files(self):
563 return self._changeset.files
564 return self._changeset.files
564 def description(self):
565 def description(self):
565 return self._changeset.description
566 return self._changeset.description
566 def branch(self):
567 def branch(self):
567 return encoding.tolocal(self._changeset.extra.get("branch"))
568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 def closesbranch(self):
569 def closesbranch(self):
569 return 'close' in self._changeset.extra
570 return 'close' in self._changeset.extra
570 def extra(self):
571 def extra(self):
571 return self._changeset.extra
572 return self._changeset.extra
572 def tags(self):
573 def tags(self):
573 return self._repo.nodetags(self._node)
574 return self._repo.nodetags(self._node)
574 def bookmarks(self):
575 def bookmarks(self):
575 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
576 def phase(self):
577 def phase(self):
577 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 def hidden(self):
579 def hidden(self):
579 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580
581
581 def children(self):
582 def children(self):
582 """return contexts for each child changeset"""
583 """return contexts for each child changeset"""
583 c = self._repo.changelog.children(self._node)
584 c = self._repo.changelog.children(self._node)
584 return [changectx(self._repo, x) for x in c]
585 return [changectx(self._repo, x) for x in c]
585
586
586 def ancestors(self):
587 def ancestors(self):
587 for a in self._repo.changelog.ancestors([self._rev]):
588 for a in self._repo.changelog.ancestors([self._rev]):
588 yield changectx(self._repo, a)
589 yield changectx(self._repo, a)
589
590
590 def descendants(self):
591 def descendants(self):
591 for d in self._repo.changelog.descendants([self._rev]):
592 for d in self._repo.changelog.descendants([self._rev]):
592 yield changectx(self._repo, d)
593 yield changectx(self._repo, d)
593
594
594 def filectx(self, path, fileid=None, filelog=None):
595 def filectx(self, path, fileid=None, filelog=None):
595 """get a file context from this changeset"""
596 """get a file context from this changeset"""
596 if fileid is None:
597 if fileid is None:
597 fileid = self.filenode(path)
598 fileid = self.filenode(path)
598 return filectx(self._repo, path, fileid=fileid,
599 return filectx(self._repo, path, fileid=fileid,
599 changectx=self, filelog=filelog)
600 changectx=self, filelog=filelog)
600
601
601 def ancestor(self, c2, warn=False):
602 def ancestor(self, c2, warn=False):
602 """return the "best" ancestor context of self and c2
603 """return the "best" ancestor context of self and c2
603
604
604 If there are multiple candidates, it will show a message and check
605 If there are multiple candidates, it will show a message and check
605 merge.preferancestor configuration before falling back to the
606 merge.preferancestor configuration before falling back to the
606 revlog ancestor."""
607 revlog ancestor."""
607 # deal with workingctxs
608 # deal with workingctxs
608 n2 = c2._node
609 n2 = c2._node
609 if n2 is None:
610 if n2 is None:
610 n2 = c2._parents[0]._node
611 n2 = c2._parents[0]._node
611 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 if not cahs:
613 if not cahs:
613 anc = nullid
614 anc = nullid
614 elif len(cahs) == 1:
615 elif len(cahs) == 1:
615 anc = cahs[0]
616 anc = cahs[0]
616 else:
617 else:
617 # experimental config: merge.preferancestor
618 # experimental config: merge.preferancestor
618 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 try:
620 try:
620 ctx = changectx(self._repo, r)
621 ctx = changectx(self._repo, r)
621 except error.RepoLookupError:
622 except error.RepoLookupError:
622 continue
623 continue
623 anc = ctx.node()
624 anc = ctx.node()
624 if anc in cahs:
625 if anc in cahs:
625 break
626 break
626 else:
627 else:
627 anc = self._repo.changelog.ancestor(self._node, n2)
628 anc = self._repo.changelog.ancestor(self._node, n2)
628 if warn:
629 if warn:
629 self._repo.ui.status(
630 self._repo.ui.status(
630 (_("note: using %s as ancestor of %s and %s\n") %
631 (_("note: using %s as ancestor of %s and %s\n") %
631 (short(anc), short(self._node), short(n2))) +
632 (short(anc), short(self._node), short(n2))) +
632 ''.join(_(" alternatively, use --config "
633 ''.join(_(" alternatively, use --config "
633 "merge.preferancestor=%s\n") %
634 "merge.preferancestor=%s\n") %
634 short(n) for n in sorted(cahs) if n != anc))
635 short(n) for n in sorted(cahs) if n != anc))
635 return changectx(self._repo, anc)
636 return changectx(self._repo, anc)
636
637
637 def descendant(self, other):
638 def descendant(self, other):
638 """True if other is descendant of this changeset"""
639 """True if other is descendant of this changeset"""
639 return self._repo.changelog.descendant(self._rev, other._rev)
640 return self._repo.changelog.descendant(self._rev, other._rev)
640
641
641 def walk(self, match):
642 def walk(self, match):
642 '''Generates matching file names.'''
643 '''Generates matching file names.'''
643
644
644 # Wrap match.bad method to have message with nodeid
645 # Wrap match.bad method to have message with nodeid
645 def bad(fn, msg):
646 def bad(fn, msg):
646 # The manifest doesn't know about subrepos, so don't complain about
647 # The manifest doesn't know about subrepos, so don't complain about
647 # paths into valid subrepos.
648 # paths into valid subrepos.
648 if any(fn == s or fn.startswith(s + '/')
649 if any(fn == s or fn.startswith(s + '/')
649 for s in self.substate):
650 for s in self.substate):
650 return
651 return
651 match.bad(fn, _('no such file in rev %s') % self)
652 match.bad(fn, _('no such file in rev %s') % self)
652
653
653 m = matchmod.badmatch(match, bad)
654 m = matchmod.badmatch(match, bad)
654 return self._manifest.walk(m)
655 return self._manifest.walk(m)
655
656
656 def matches(self, match):
657 def matches(self, match):
657 return self.walk(match)
658 return self.walk(match)
658
659
659 class basefilectx(object):
660 class basefilectx(object):
660 """A filecontext object represents the common logic for its children:
661 """A filecontext object represents the common logic for its children:
661 filectx: read-only access to a filerevision that is already present
662 filectx: read-only access to a filerevision that is already present
662 in the repo,
663 in the repo,
663 workingfilectx: a filecontext that represents files from the working
664 workingfilectx: a filecontext that represents files from the working
664 directory,
665 directory,
665 memfilectx: a filecontext that represents files in-memory,
666 memfilectx: a filecontext that represents files in-memory,
666 overlayfilectx: duplicate another filecontext with some fields overridden.
667 overlayfilectx: duplicate another filecontext with some fields overridden.
667 """
668 """
668 @propertycache
669 @propertycache
669 def _filelog(self):
670 def _filelog(self):
670 return self._repo.file(self._path)
671 return self._repo.file(self._path)
671
672
672 @propertycache
673 @propertycache
673 def _changeid(self):
674 def _changeid(self):
674 if r'_changeid' in self.__dict__:
675 if r'_changeid' in self.__dict__:
675 return self._changeid
676 return self._changeid
676 elif r'_changectx' in self.__dict__:
677 elif r'_changectx' in self.__dict__:
677 return self._changectx.rev()
678 return self._changectx.rev()
678 elif r'_descendantrev' in self.__dict__:
679 elif r'_descendantrev' in self.__dict__:
679 # this file context was created from a revision with a known
680 # this file context was created from a revision with a known
680 # descendant, we can (lazily) correct for linkrev aliases
681 # descendant, we can (lazily) correct for linkrev aliases
681 return self._adjustlinkrev(self._descendantrev)
682 return self._adjustlinkrev(self._descendantrev)
682 else:
683 else:
683 return self._filelog.linkrev(self._filerev)
684 return self._filelog.linkrev(self._filerev)
684
685
685 @propertycache
686 @propertycache
686 def _filenode(self):
687 def _filenode(self):
687 if r'_fileid' in self.__dict__:
688 if r'_fileid' in self.__dict__:
688 return self._filelog.lookup(self._fileid)
689 return self._filelog.lookup(self._fileid)
689 else:
690 else:
690 return self._changectx.filenode(self._path)
691 return self._changectx.filenode(self._path)
691
692
692 @propertycache
693 @propertycache
693 def _filerev(self):
694 def _filerev(self):
694 return self._filelog.rev(self._filenode)
695 return self._filelog.rev(self._filenode)
695
696
696 @propertycache
697 @propertycache
697 def _repopath(self):
698 def _repopath(self):
698 return self._path
699 return self._path
699
700
700 def __nonzero__(self):
701 def __nonzero__(self):
701 try:
702 try:
702 self._filenode
703 self._filenode
703 return True
704 return True
704 except error.LookupError:
705 except error.LookupError:
705 # file is missing
706 # file is missing
706 return False
707 return False
707
708
708 __bool__ = __nonzero__
709 __bool__ = __nonzero__
709
710
710 def __bytes__(self):
711 def __bytes__(self):
711 try:
712 try:
712 return "%s@%s" % (self.path(), self._changectx)
713 return "%s@%s" % (self.path(), self._changectx)
713 except error.LookupError:
714 except error.LookupError:
714 return "%s@???" % self.path()
715 return "%s@???" % self.path()
715
716
716 __str__ = encoding.strmethod(__bytes__)
717 __str__ = encoding.strmethod(__bytes__)
717
718
718 def __repr__(self):
719 def __repr__(self):
719 return "<%s %s>" % (type(self).__name__, str(self))
720 return "<%s %s>" % (type(self).__name__, str(self))
720
721
721 def __hash__(self):
722 def __hash__(self):
722 try:
723 try:
723 return hash((self._path, self._filenode))
724 return hash((self._path, self._filenode))
724 except AttributeError:
725 except AttributeError:
725 return id(self)
726 return id(self)
726
727
727 def __eq__(self, other):
728 def __eq__(self, other):
728 try:
729 try:
729 return (type(self) == type(other) and self._path == other._path
730 return (type(self) == type(other) and self._path == other._path
730 and self._filenode == other._filenode)
731 and self._filenode == other._filenode)
731 except AttributeError:
732 except AttributeError:
732 return False
733 return False
733
734
734 def __ne__(self, other):
735 def __ne__(self, other):
735 return not (self == other)
736 return not (self == other)
736
737
737 def filerev(self):
738 def filerev(self):
738 return self._filerev
739 return self._filerev
739 def filenode(self):
740 def filenode(self):
740 return self._filenode
741 return self._filenode
741 @propertycache
742 @propertycache
742 def _flags(self):
743 def _flags(self):
743 return self._changectx.flags(self._path)
744 return self._changectx.flags(self._path)
744 def flags(self):
745 def flags(self):
745 return self._flags
746 return self._flags
746 def filelog(self):
747 def filelog(self):
747 return self._filelog
748 return self._filelog
748 def rev(self):
749 def rev(self):
749 return self._changeid
750 return self._changeid
750 def linkrev(self):
751 def linkrev(self):
751 return self._filelog.linkrev(self._filerev)
752 return self._filelog.linkrev(self._filerev)
752 def node(self):
753 def node(self):
753 return self._changectx.node()
754 return self._changectx.node()
754 def hex(self):
755 def hex(self):
755 return self._changectx.hex()
756 return self._changectx.hex()
756 def user(self):
757 def user(self):
757 return self._changectx.user()
758 return self._changectx.user()
758 def date(self):
759 def date(self):
759 return self._changectx.date()
760 return self._changectx.date()
760 def files(self):
761 def files(self):
761 return self._changectx.files()
762 return self._changectx.files()
762 def description(self):
763 def description(self):
763 return self._changectx.description()
764 return self._changectx.description()
764 def branch(self):
765 def branch(self):
765 return self._changectx.branch()
766 return self._changectx.branch()
766 def extra(self):
767 def extra(self):
767 return self._changectx.extra()
768 return self._changectx.extra()
768 def phase(self):
769 def phase(self):
769 return self._changectx.phase()
770 return self._changectx.phase()
770 def phasestr(self):
771 def phasestr(self):
771 return self._changectx.phasestr()
772 return self._changectx.phasestr()
772 def manifest(self):
773 def manifest(self):
773 return self._changectx.manifest()
774 return self._changectx.manifest()
774 def changectx(self):
775 def changectx(self):
775 return self._changectx
776 return self._changectx
776 def renamed(self):
777 def renamed(self):
777 return self._copied
778 return self._copied
778 def repo(self):
779 def repo(self):
779 return self._repo
780 return self._repo
780 def size(self):
781 def size(self):
781 return len(self.data())
782 return len(self.data())
782
783
783 def path(self):
784 def path(self):
784 return self._path
785 return self._path
785
786
786 def isbinary(self):
787 def isbinary(self):
787 try:
788 try:
788 return util.binary(self.data())
789 return util.binary(self.data())
789 except IOError:
790 except IOError:
790 return False
791 return False
791 def isexec(self):
792 def isexec(self):
792 return 'x' in self.flags()
793 return 'x' in self.flags()
793 def islink(self):
794 def islink(self):
794 return 'l' in self.flags()
795 return 'l' in self.flags()
795
796
796 def isabsent(self):
797 def isabsent(self):
797 """whether this filectx represents a file not in self._changectx
798 """whether this filectx represents a file not in self._changectx
798
799
799 This is mainly for merge code to detect change/delete conflicts. This is
800 This is mainly for merge code to detect change/delete conflicts. This is
800 expected to be True for all subclasses of basectx."""
801 expected to be True for all subclasses of basectx."""
801 return False
802 return False
802
803
803 _customcmp = False
804 _customcmp = False
804 def cmp(self, fctx):
805 def cmp(self, fctx):
805 """compare with other file context
806 """compare with other file context
806
807
807 returns True if different than fctx.
808 returns True if different than fctx.
808 """
809 """
809 if fctx._customcmp:
810 if fctx._customcmp:
810 return fctx.cmp(self)
811 return fctx.cmp(self)
811
812
812 if (fctx._filenode is None
813 if (fctx._filenode is None
813 and (self._repo._encodefilterpats
814 and (self._repo._encodefilterpats
814 # if file data starts with '\1\n', empty metadata block is
815 # if file data starts with '\1\n', empty metadata block is
815 # prepended, which adds 4 bytes to filelog.size().
816 # prepended, which adds 4 bytes to filelog.size().
816 or self.size() - 4 == fctx.size())
817 or self.size() - 4 == fctx.size())
817 or self.size() == fctx.size()):
818 or self.size() == fctx.size()):
818 return self._filelog.cmp(self._filenode, fctx.data())
819 return self._filelog.cmp(self._filenode, fctx.data())
819
820
820 return True
821 return True
821
822
822 def _adjustlinkrev(self, srcrev, inclusive=False):
823 def _adjustlinkrev(self, srcrev, inclusive=False):
823 """return the first ancestor of <srcrev> introducing <fnode>
824 """return the first ancestor of <srcrev> introducing <fnode>
824
825
825 If the linkrev of the file revision does not point to an ancestor of
826 If the linkrev of the file revision does not point to an ancestor of
826 srcrev, we'll walk down the ancestors until we find one introducing
827 srcrev, we'll walk down the ancestors until we find one introducing
827 this file revision.
828 this file revision.
828
829
829 :srcrev: the changeset revision we search ancestors from
830 :srcrev: the changeset revision we search ancestors from
830 :inclusive: if true, the src revision will also be checked
831 :inclusive: if true, the src revision will also be checked
831 """
832 """
832 repo = self._repo
833 repo = self._repo
833 cl = repo.unfiltered().changelog
834 cl = repo.unfiltered().changelog
834 mfl = repo.manifestlog
835 mfl = repo.manifestlog
835 # fetch the linkrev
836 # fetch the linkrev
836 lkr = self.linkrev()
837 lkr = self.linkrev()
837 # hack to reuse ancestor computation when searching for renames
838 # hack to reuse ancestor computation when searching for renames
838 memberanc = getattr(self, '_ancestrycontext', None)
839 memberanc = getattr(self, '_ancestrycontext', None)
839 iteranc = None
840 iteranc = None
840 if srcrev is None:
841 if srcrev is None:
841 # wctx case, used by workingfilectx during mergecopy
842 # wctx case, used by workingfilectx during mergecopy
842 revs = [p.rev() for p in self._repo[None].parents()]
843 revs = [p.rev() for p in self._repo[None].parents()]
843 inclusive = True # we skipped the real (revless) source
844 inclusive = True # we skipped the real (revless) source
844 else:
845 else:
845 revs = [srcrev]
846 revs = [srcrev]
846 if memberanc is None:
847 if memberanc is None:
847 memberanc = iteranc = cl.ancestors(revs, lkr,
848 memberanc = iteranc = cl.ancestors(revs, lkr,
848 inclusive=inclusive)
849 inclusive=inclusive)
849 # check if this linkrev is an ancestor of srcrev
850 # check if this linkrev is an ancestor of srcrev
850 if lkr not in memberanc:
851 if lkr not in memberanc:
851 if iteranc is None:
852 if iteranc is None:
852 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
853 fnode = self._filenode
854 fnode = self._filenode
854 path = self._path
855 path = self._path
855 for a in iteranc:
856 for a in iteranc:
856 ac = cl.read(a) # get changeset data (we avoid object creation)
857 ac = cl.read(a) # get changeset data (we avoid object creation)
857 if path in ac[3]: # checking the 'files' field.
858 if path in ac[3]: # checking the 'files' field.
858 # The file has been touched, check if the content is
859 # The file has been touched, check if the content is
859 # similar to the one we search for.
860 # similar to the one we search for.
860 if fnode == mfl[ac[0]].readfast().get(path):
861 if fnode == mfl[ac[0]].readfast().get(path):
861 return a
862 return a
862 # In theory, we should never get out of that loop without a result.
863 # In theory, we should never get out of that loop without a result.
863 # But if manifest uses a buggy file revision (not children of the
864 # But if manifest uses a buggy file revision (not children of the
864 # one it replaces) we could. Such a buggy situation will likely
865 # one it replaces) we could. Such a buggy situation will likely
865 # result is crash somewhere else at to some point.
866 # result is crash somewhere else at to some point.
866 return lkr
867 return lkr
867
868
868 def introrev(self):
869 def introrev(self):
869 """return the rev of the changeset which introduced this file revision
870 """return the rev of the changeset which introduced this file revision
870
871
871 This method is different from linkrev because it take into account the
872 This method is different from linkrev because it take into account the
872 changeset the filectx was created from. It ensures the returned
873 changeset the filectx was created from. It ensures the returned
873 revision is one of its ancestors. This prevents bugs from
874 revision is one of its ancestors. This prevents bugs from
874 'linkrev-shadowing' when a file revision is used by multiple
875 'linkrev-shadowing' when a file revision is used by multiple
875 changesets.
876 changesets.
876 """
877 """
877 lkr = self.linkrev()
878 lkr = self.linkrev()
878 attrs = vars(self)
879 attrs = vars(self)
879 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
880 if noctx or self.rev() == lkr:
881 if noctx or self.rev() == lkr:
881 return self.linkrev()
882 return self.linkrev()
882 return self._adjustlinkrev(self.rev(), inclusive=True)
883 return self._adjustlinkrev(self.rev(), inclusive=True)
883
884
884 def _parentfilectx(self, path, fileid, filelog):
885 def _parentfilectx(self, path, fileid, filelog):
885 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
886 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
887 if '_changeid' in vars(self) or '_changectx' in vars(self):
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
888 # If self is associated with a changeset (probably explicitly
889 # If self is associated with a changeset (probably explicitly
889 # fed), ensure the created filectx is associated with a
890 # fed), ensure the created filectx is associated with a
890 # changeset that is an ancestor of self.changectx.
891 # changeset that is an ancestor of self.changectx.
891 # This lets us later use _adjustlinkrev to get a correct link.
892 # This lets us later use _adjustlinkrev to get a correct link.
892 fctx._descendantrev = self.rev()
893 fctx._descendantrev = self.rev()
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 elif '_descendantrev' in vars(self):
895 elif '_descendantrev' in vars(self):
895 # Otherwise propagate _descendantrev if we have one associated.
896 # Otherwise propagate _descendantrev if we have one associated.
896 fctx._descendantrev = self._descendantrev
897 fctx._descendantrev = self._descendantrev
897 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
898 return fctx
899 return fctx
899
900
900 def parents(self):
901 def parents(self):
901 _path = self._path
902 _path = self._path
902 fl = self._filelog
903 fl = self._filelog
903 parents = self._filelog.parents(self._filenode)
904 parents = self._filelog.parents(self._filenode)
904 pl = [(_path, node, fl) for node in parents if node != nullid]
905 pl = [(_path, node, fl) for node in parents if node != nullid]
905
906
906 r = fl.renamed(self._filenode)
907 r = fl.renamed(self._filenode)
907 if r:
908 if r:
908 # - In the simple rename case, both parent are nullid, pl is empty.
909 # - In the simple rename case, both parent are nullid, pl is empty.
909 # - In case of merge, only one of the parent is null id and should
910 # - In case of merge, only one of the parent is null id and should
910 # be replaced with the rename information. This parent is -always-
911 # be replaced with the rename information. This parent is -always-
911 # the first one.
912 # the first one.
912 #
913 #
913 # As null id have always been filtered out in the previous list
914 # As null id have always been filtered out in the previous list
914 # comprehension, inserting to 0 will always result in "replacing
915 # comprehension, inserting to 0 will always result in "replacing
915 # first nullid parent with rename information.
916 # first nullid parent with rename information.
916 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
917
918
918 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
919
920
920 def p1(self):
921 def p1(self):
921 return self.parents()[0]
922 return self.parents()[0]
922
923
923 def p2(self):
924 def p2(self):
924 p = self.parents()
925 p = self.parents()
925 if len(p) == 2:
926 if len(p) == 2:
926 return p[1]
927 return p[1]
927 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
928
929
929 def annotate(self, follow=False, linenumber=False, skiprevs=None,
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
930 diffopts=None):
931 diffopts=None):
931 '''returns a list of tuples of ((ctx, number), line) for each line
932 '''returns a list of tuples of ((ctx, number), line) for each line
932 in the file, where ctx is the filectx of the node where
933 in the file, where ctx is the filectx of the node where
933 that line was last changed; if linenumber parameter is true, number is
934 that line was last changed; if linenumber parameter is true, number is
934 the line number at the first appearance in the managed file, otherwise,
935 the line number at the first appearance in the managed file, otherwise,
935 number has a fixed value of False.
936 number has a fixed value of False.
936 '''
937 '''
937
938
938 def lines(text):
939 def lines(text):
939 if text.endswith("\n"):
940 if text.endswith("\n"):
940 return text.count("\n")
941 return text.count("\n")
941 return text.count("\n") + int(bool(text))
942 return text.count("\n") + int(bool(text))
942
943
943 if linenumber:
944 if linenumber:
944 def decorate(text, rev):
945 def decorate(text, rev):
945 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
946 else:
947 else:
947 def decorate(text, rev):
948 def decorate(text, rev):
948 return ([(rev, False)] * lines(text), text)
949 return ([(rev, False)] * lines(text), text)
949
950
950 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951
952
952 def parents(f):
953 def parents(f):
953 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # isn't an ancestor of the srcrev.
957 # isn't an ancestor of the srcrev.
957 f._changeid
958 f._changeid
958 pl = f.parents()
959 pl = f.parents()
959
960
960 # Don't return renamed parents if we aren't following.
961 # Don't return renamed parents if we aren't following.
961 if not follow:
962 if not follow:
962 pl = [p for p in pl if p.path() == f.path()]
963 pl = [p for p in pl if p.path() == f.path()]
963
964
964 # renamed filectx won't have a filelog yet, so set it
965 # renamed filectx won't have a filelog yet, so set it
965 # from the cache to save time
966 # from the cache to save time
966 for p in pl:
967 for p in pl:
967 if not '_filelog' in p.__dict__:
968 if not '_filelog' in p.__dict__:
968 p._filelog = getlog(p.path())
969 p._filelog = getlog(p.path())
969
970
970 return pl
971 return pl
971
972
972 # use linkrev to find the first changeset where self appeared
973 # use linkrev to find the first changeset where self appeared
973 base = self
974 base = self
974 introrev = self.introrev()
975 introrev = self.introrev()
975 if self.rev() != introrev:
976 if self.rev() != introrev:
976 base = self.filectx(self.filenode(), changeid=introrev)
977 base = self.filectx(self.filenode(), changeid=introrev)
977 if getattr(base, '_ancestrycontext', None) is None:
978 if getattr(base, '_ancestrycontext', None) is None:
978 cl = self._repo.changelog
979 cl = self._repo.changelog
979 if introrev is None:
980 if introrev is None:
980 # wctx is not inclusive, but works because _ancestrycontext
981 # wctx is not inclusive, but works because _ancestrycontext
981 # is used to test filelog revisions
982 # is used to test filelog revisions
982 ac = cl.ancestors([p.rev() for p in base.parents()],
983 ac = cl.ancestors([p.rev() for p in base.parents()],
983 inclusive=True)
984 inclusive=True)
984 else:
985 else:
985 ac = cl.ancestors([introrev], inclusive=True)
986 ac = cl.ancestors([introrev], inclusive=True)
986 base._ancestrycontext = ac
987 base._ancestrycontext = ac
987
988
988 # This algorithm would prefer to be recursive, but Python is a
989 # This algorithm would prefer to be recursive, but Python is a
989 # bit recursion-hostile. Instead we do an iterative
990 # bit recursion-hostile. Instead we do an iterative
990 # depth-first search.
991 # depth-first search.
991
992
992 # 1st DFS pre-calculates pcache and needed
993 # 1st DFS pre-calculates pcache and needed
993 visit = [base]
994 visit = [base]
994 pcache = {}
995 pcache = {}
995 needed = {base: 1}
996 needed = {base: 1}
996 while visit:
997 while visit:
997 f = visit.pop()
998 f = visit.pop()
998 if f in pcache:
999 if f in pcache:
999 continue
1000 continue
1000 pl = parents(f)
1001 pl = parents(f)
1001 pcache[f] = pl
1002 pcache[f] = pl
1002 for p in pl:
1003 for p in pl:
1003 needed[p] = needed.get(p, 0) + 1
1004 needed[p] = needed.get(p, 0) + 1
1004 if p not in pcache:
1005 if p not in pcache:
1005 visit.append(p)
1006 visit.append(p)
1006
1007
1007 # 2nd DFS does the actual annotate
1008 # 2nd DFS does the actual annotate
1008 visit[:] = [base]
1009 visit[:] = [base]
1009 hist = {}
1010 hist = {}
1010 while visit:
1011 while visit:
1011 f = visit[-1]
1012 f = visit[-1]
1012 if f in hist:
1013 if f in hist:
1013 visit.pop()
1014 visit.pop()
1014 continue
1015 continue
1015
1016
1016 ready = True
1017 ready = True
1017 pl = pcache[f]
1018 pl = pcache[f]
1018 for p in pl:
1019 for p in pl:
1019 if p not in hist:
1020 if p not in hist:
1020 ready = False
1021 ready = False
1021 visit.append(p)
1022 visit.append(p)
1022 if ready:
1023 if ready:
1023 visit.pop()
1024 visit.pop()
1024 curr = decorate(f.data(), f)
1025 curr = decorate(f.data(), f)
1025 skipchild = False
1026 skipchild = False
1026 if skiprevs is not None:
1027 if skiprevs is not None:
1027 skipchild = f._changeid in skiprevs
1028 skipchild = f._changeid in skiprevs
1028 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1029 diffopts)
1030 diffopts)
1030 for p in pl:
1031 for p in pl:
1031 if needed[p] == 1:
1032 if needed[p] == 1:
1032 del hist[p]
1033 del hist[p]
1033 del needed[p]
1034 del needed[p]
1034 else:
1035 else:
1035 needed[p] -= 1
1036 needed[p] -= 1
1036
1037
1037 hist[f] = curr
1038 hist[f] = curr
1038 del pcache[f]
1039 del pcache[f]
1039
1040
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1042
1042 def ancestors(self, followfirst=False):
1043 def ancestors(self, followfirst=False):
1043 visit = {}
1044 visit = {}
1044 c = self
1045 c = self
1045 if followfirst:
1046 if followfirst:
1046 cut = 1
1047 cut = 1
1047 else:
1048 else:
1048 cut = None
1049 cut = None
1049
1050
1050 while True:
1051 while True:
1051 for parent in c.parents()[:cut]:
1052 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1054 if not visit:
1054 break
1055 break
1055 c = visit.pop(max(visit))
1056 c = visit.pop(max(visit))
1056 yield c
1057 yield c
1057
1058
1058 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1059 r'''
1060 r'''
1060 Given parent and child fctxes and annotate data for parents, for all lines
1061 Given parent and child fctxes and annotate data for parents, for all lines
1061 in either parent that match the child, annotate the child with the parent's
1062 in either parent that match the child, annotate the child with the parent's
1062 data.
1063 data.
1063
1064
1064 Additionally, if `skipchild` is True, replace all other lines with parent
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1065 annotate data as well such that child is never blamed for any lines.
1066 annotate data as well such that child is never blamed for any lines.
1066
1067
1067 >>> oldfctx = 'old'
1068 >>> oldfctx = 'old'
1068 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1069 >>> olddata = 'a\nb\n'
1070 >>> olddata = 'a\nb\n'
1070 >>> p1data = 'a\nb\nc\n'
1071 >>> p1data = 'a\nb\nc\n'
1071 >>> p2data = 'a\nc\nd\n'
1072 >>> p2data = 'a\nc\nd\n'
1072 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1073 >>> diffopts = mdiff.diffopts()
1074 >>> diffopts = mdiff.diffopts()
1074
1075
1075 >>> def decorate(text, rev):
1076 >>> def decorate(text, rev):
1076 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1077
1078
1078 Basic usage:
1079 Basic usage:
1079
1080
1080 >>> oldann = decorate(olddata, oldfctx)
1081 >>> oldann = decorate(olddata, oldfctx)
1081 >>> p1ann = decorate(p1data, p1fctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1082 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1083 >>> p1ann[0]
1084 >>> p1ann[0]
1084 [('old', 1), ('old', 2), ('p1', 3)]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1085 >>> p2ann = decorate(p2data, p2fctx)
1086 >>> p2ann = decorate(p2data, p2fctx)
1086 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1087 >>> p2ann[0]
1088 >>> p2ann[0]
1088 [('old', 1), ('p2', 2), ('p2', 3)]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1089
1090
1090 Test with multiple parents (note the difference caused by ordering):
1091 Test with multiple parents (note the difference caused by ordering):
1091
1092
1092 >>> childann = decorate(childdata, childfctx)
1093 >>> childann = decorate(childdata, childfctx)
1093 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1094 ... diffopts)
1095 ... diffopts)
1095 >>> childann[0]
1096 >>> childann[0]
1096 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1097
1098
1098 >>> childann = decorate(childdata, childfctx)
1099 >>> childann = decorate(childdata, childfctx)
1099 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1100 ... diffopts)
1101 ... diffopts)
1101 >>> childann[0]
1102 >>> childann[0]
1102 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1103
1104
1104 Test with skipchild (note the difference caused by ordering):
1105 Test with skipchild (note the difference caused by ordering):
1105
1106
1106 >>> childann = decorate(childdata, childfctx)
1107 >>> childann = decorate(childdata, childfctx)
1107 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1108 ... diffopts)
1109 ... diffopts)
1109 >>> childann[0]
1110 >>> childann[0]
1110 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1111
1112
1112 >>> childann = decorate(childdata, childfctx)
1113 >>> childann = decorate(childdata, childfctx)
1113 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1114 ... diffopts)
1115 ... diffopts)
1115 >>> childann[0]
1116 >>> childann[0]
1116 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1117 '''
1118 '''
1118 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1119 for parent in parents]
1120 for parent in parents]
1120
1121
1121 if skipchild:
1122 if skipchild:
1122 # Need to iterate over the blocks twice -- make it a list
1123 # Need to iterate over the blocks twice -- make it a list
1123 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1124 # Mercurial currently prefers p2 over p1 for annotate.
1125 # Mercurial currently prefers p2 over p1 for annotate.
1125 # TODO: change this?
1126 # TODO: change this?
1126 for parent, blocks in pblocks:
1127 for parent, blocks in pblocks:
1127 for (a1, a2, b1, b2), t in blocks:
1128 for (a1, a2, b1, b2), t in blocks:
1128 # Changed blocks ('!') or blocks made only of blank lines ('~')
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1129 # belong to the child.
1130 # belong to the child.
1130 if t == '=':
1131 if t == '=':
1131 child[0][b1:b2] = parent[0][a1:a2]
1132 child[0][b1:b2] = parent[0][a1:a2]
1132
1133
1133 if skipchild:
1134 if skipchild:
1134 # Now try and match up anything that couldn't be matched,
1135 # Now try and match up anything that couldn't be matched,
1135 # Reversing pblocks maintains bias towards p2, matching above
1136 # Reversing pblocks maintains bias towards p2, matching above
1136 # behavior.
1137 # behavior.
1137 pblocks.reverse()
1138 pblocks.reverse()
1138
1139
1139 # The heuristics are:
1140 # The heuristics are:
1140 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1141 # This could potentially be smarter but works well enough.
1142 # This could potentially be smarter but works well enough.
1142 # * For a non-matching section, do a best-effort fit. Match lines in
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1143 # diff hunks 1:1, dropping lines as necessary.
1144 # diff hunks 1:1, dropping lines as necessary.
1144 # * Repeat the last line as a last resort.
1145 # * Repeat the last line as a last resort.
1145
1146
1146 # First, replace as much as possible without repeating the last line.
1147 # First, replace as much as possible without repeating the last line.
1147 remaining = [(parent, []) for parent, _blocks in pblocks]
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1148 for idx, (parent, blocks) in enumerate(pblocks):
1149 for idx, (parent, blocks) in enumerate(pblocks):
1149 for (a1, a2, b1, b2), _t in blocks:
1150 for (a1, a2, b1, b2), _t in blocks:
1150 if a2 - a1 >= b2 - b1:
1151 if a2 - a1 >= b2 - b1:
1151 for bk in xrange(b1, b2):
1152 for bk in xrange(b1, b2):
1152 if child[0][bk][0] == childfctx:
1153 if child[0][bk][0] == childfctx:
1153 ak = min(a1 + (bk - b1), a2 - 1)
1154 ak = min(a1 + (bk - b1), a2 - 1)
1154 child[0][bk] = parent[0][ak]
1155 child[0][bk] = parent[0][ak]
1155 else:
1156 else:
1156 remaining[idx][1].append((a1, a2, b1, b2))
1157 remaining[idx][1].append((a1, a2, b1, b2))
1157
1158
1158 # Then, look at anything left, which might involve repeating the last
1159 # Then, look at anything left, which might involve repeating the last
1159 # line.
1160 # line.
1160 for parent, blocks in remaining:
1161 for parent, blocks in remaining:
1161 for a1, a2, b1, b2 in blocks:
1162 for a1, a2, b1, b2 in blocks:
1162 for bk in xrange(b1, b2):
1163 for bk in xrange(b1, b2):
1163 if child[0][bk][0] == childfctx:
1164 if child[0][bk][0] == childfctx:
1164 ak = min(a1 + (bk - b1), a2 - 1)
1165 ak = min(a1 + (bk - b1), a2 - 1)
1165 child[0][bk] = parent[0][ak]
1166 child[0][bk] = parent[0][ak]
1166 return child
1167 return child
1167
1168
1168 class filectx(basefilectx):
1169 class filectx(basefilectx):
1169 """A filecontext object makes access to data related to a particular
1170 """A filecontext object makes access to data related to a particular
1170 filerevision convenient."""
1171 filerevision convenient."""
1171 def __init__(self, repo, path, changeid=None, fileid=None,
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1172 filelog=None, changectx=None):
1173 filelog=None, changectx=None):
1173 """changeid can be a changeset revision, node, or tag.
1174 """changeid can be a changeset revision, node, or tag.
1174 fileid can be a file revision or node."""
1175 fileid can be a file revision or node."""
1175 self._repo = repo
1176 self._repo = repo
1176 self._path = path
1177 self._path = path
1177
1178
1178 assert (changeid is not None
1179 assert (changeid is not None
1179 or fileid is not None
1180 or fileid is not None
1180 or changectx is not None), \
1181 or changectx is not None), \
1181 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1182 % (changeid, fileid, changectx))
1183 % (changeid, fileid, changectx))
1183
1184
1184 if filelog is not None:
1185 if filelog is not None:
1185 self._filelog = filelog
1186 self._filelog = filelog
1186
1187
1187 if changeid is not None:
1188 if changeid is not None:
1188 self._changeid = changeid
1189 self._changeid = changeid
1189 if changectx is not None:
1190 if changectx is not None:
1190 self._changectx = changectx
1191 self._changectx = changectx
1191 if fileid is not None:
1192 if fileid is not None:
1192 self._fileid = fileid
1193 self._fileid = fileid
1193
1194
1194 @propertycache
1195 @propertycache
1195 def _changectx(self):
1196 def _changectx(self):
1196 try:
1197 try:
1197 return changectx(self._repo, self._changeid)
1198 return changectx(self._repo, self._changeid)
1198 except error.FilteredRepoLookupError:
1199 except error.FilteredRepoLookupError:
1199 # Linkrev may point to any revision in the repository. When the
1200 # Linkrev may point to any revision in the repository. When the
1200 # repository is filtered this may lead to `filectx` trying to build
1201 # repository is filtered this may lead to `filectx` trying to build
1201 # `changectx` for filtered revision. In such case we fallback to
1202 # `changectx` for filtered revision. In such case we fallback to
1202 # creating `changectx` on the unfiltered version of the reposition.
1203 # creating `changectx` on the unfiltered version of the reposition.
1203 # This fallback should not be an issue because `changectx` from
1204 # This fallback should not be an issue because `changectx` from
1204 # `filectx` are not used in complex operations that care about
1205 # `filectx` are not used in complex operations that care about
1205 # filtering.
1206 # filtering.
1206 #
1207 #
1207 # This fallback is a cheap and dirty fix that prevent several
1208 # This fallback is a cheap and dirty fix that prevent several
1208 # crashes. It does not ensure the behavior is correct. However the
1209 # crashes. It does not ensure the behavior is correct. However the
1209 # behavior was not correct before filtering either and "incorrect
1210 # behavior was not correct before filtering either and "incorrect
1210 # behavior" is seen as better as "crash"
1211 # behavior" is seen as better as "crash"
1211 #
1212 #
1212 # Linkrevs have several serious troubles with filtering that are
1213 # Linkrevs have several serious troubles with filtering that are
1213 # complicated to solve. Proper handling of the issue here should be
1214 # complicated to solve. Proper handling of the issue here should be
1214 # considered when solving linkrev issue are on the table.
1215 # considered when solving linkrev issue are on the table.
1215 return changectx(self._repo.unfiltered(), self._changeid)
1216 return changectx(self._repo.unfiltered(), self._changeid)
1216
1217
1217 def filectx(self, fileid, changeid=None):
1218 def filectx(self, fileid, changeid=None):
1218 '''opens an arbitrary revision of the file without
1219 '''opens an arbitrary revision of the file without
1219 opening a new filelog'''
1220 opening a new filelog'''
1220 return filectx(self._repo, self._path, fileid=fileid,
1221 return filectx(self._repo, self._path, fileid=fileid,
1221 filelog=self._filelog, changeid=changeid)
1222 filelog=self._filelog, changeid=changeid)
1222
1223
1223 def rawdata(self):
1224 def rawdata(self):
1224 return self._filelog.revision(self._filenode, raw=True)
1225 return self._filelog.revision(self._filenode, raw=True)
1225
1226
1226 def rawflags(self):
1227 def rawflags(self):
1227 """low-level revlog flags"""
1228 """low-level revlog flags"""
1228 return self._filelog.flags(self._filerev)
1229 return self._filelog.flags(self._filerev)
1229
1230
1230 def data(self):
1231 def data(self):
1231 try:
1232 try:
1232 return self._filelog.read(self._filenode)
1233 return self._filelog.read(self._filenode)
1233 except error.CensoredNodeError:
1234 except error.CensoredNodeError:
1234 if self._repo.ui.config("censor", "policy") == "ignore":
1235 if self._repo.ui.config("censor", "policy") == "ignore":
1235 return ""
1236 return ""
1236 raise error.Abort(_("censored node: %s") % short(self._filenode),
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1237 hint=_("set censor.policy to ignore errors"))
1238 hint=_("set censor.policy to ignore errors"))
1238
1239
1239 def size(self):
1240 def size(self):
1240 return self._filelog.size(self._filerev)
1241 return self._filelog.size(self._filerev)
1241
1242
1242 @propertycache
1243 @propertycache
1243 def _copied(self):
1244 def _copied(self):
1244 """check if file was actually renamed in this changeset revision
1245 """check if file was actually renamed in this changeset revision
1245
1246
1246 If rename logged in file revision, we report copy for changeset only
1247 If rename logged in file revision, we report copy for changeset only
1247 if file revisions linkrev points back to the changeset in question
1248 if file revisions linkrev points back to the changeset in question
1248 or both changeset parents contain different file revisions.
1249 or both changeset parents contain different file revisions.
1249 """
1250 """
1250
1251
1251 renamed = self._filelog.renamed(self._filenode)
1252 renamed = self._filelog.renamed(self._filenode)
1252 if not renamed:
1253 if not renamed:
1253 return renamed
1254 return renamed
1254
1255
1255 if self.rev() == self.linkrev():
1256 if self.rev() == self.linkrev():
1256 return renamed
1257 return renamed
1257
1258
1258 name = self.path()
1259 name = self.path()
1259 fnode = self._filenode
1260 fnode = self._filenode
1260 for p in self._changectx.parents():
1261 for p in self._changectx.parents():
1261 try:
1262 try:
1262 if fnode == p.filenode(name):
1263 if fnode == p.filenode(name):
1263 return None
1264 return None
1264 except error.LookupError:
1265 except error.LookupError:
1265 pass
1266 pass
1266 return renamed
1267 return renamed
1267
1268
1268 def children(self):
1269 def children(self):
1269 # hard for renames
1270 # hard for renames
1270 c = self._filelog.children(self._filenode)
1271 c = self._filelog.children(self._filenode)
1271 return [filectx(self._repo, self._path, fileid=x,
1272 return [filectx(self._repo, self._path, fileid=x,
1272 filelog=self._filelog) for x in c]
1273 filelog=self._filelog) for x in c]
1273
1274
1274 class committablectx(basectx):
1275 class committablectx(basectx):
1275 """A committablectx object provides common functionality for a context that
1276 """A committablectx object provides common functionality for a context that
1276 wants the ability to commit, e.g. workingctx or memctx."""
1277 wants the ability to commit, e.g. workingctx or memctx."""
1277 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 changes=None):
1279 changes=None):
1279 self._repo = repo
1280 self._repo = repo
1280 self._rev = None
1281 self._rev = None
1281 self._node = None
1282 self._node = None
1282 self._text = text
1283 self._text = text
1283 if date:
1284 if date:
1284 self._date = util.parsedate(date)
1285 self._date = util.parsedate(date)
1285 if user:
1286 if user:
1286 self._user = user
1287 self._user = user
1287 if changes:
1288 if changes:
1288 self._status = changes
1289 self._status = changes
1289
1290
1290 self._extra = {}
1291 self._extra = {}
1291 if extra:
1292 if extra:
1292 self._extra = extra.copy()
1293 self._extra = extra.copy()
1293 if 'branch' not in self._extra:
1294 if 'branch' not in self._extra:
1294 try:
1295 try:
1295 branch = encoding.fromlocal(self._repo.dirstate.branch())
1296 branch = encoding.fromlocal(self._repo.dirstate.branch())
1296 except UnicodeDecodeError:
1297 except UnicodeDecodeError:
1297 raise error.Abort(_('branch name not in UTF-8!'))
1298 raise error.Abort(_('branch name not in UTF-8!'))
1298 self._extra['branch'] = branch
1299 self._extra['branch'] = branch
1299 if self._extra['branch'] == '':
1300 if self._extra['branch'] == '':
1300 self._extra['branch'] = 'default'
1301 self._extra['branch'] = 'default'
1301
1302
1302 def __bytes__(self):
1303 def __bytes__(self):
1303 return bytes(self._parents[0]) + "+"
1304 return bytes(self._parents[0]) + "+"
1304
1305
1305 __str__ = encoding.strmethod(__bytes__)
1306 __str__ = encoding.strmethod(__bytes__)
1306
1307
1307 def __nonzero__(self):
1308 def __nonzero__(self):
1308 return True
1309 return True
1309
1310
1310 __bool__ = __nonzero__
1311 __bool__ = __nonzero__
1311
1312
1312 def _buildflagfunc(self):
1313 def _buildflagfunc(self):
1313 # Create a fallback function for getting file flags when the
1314 # Create a fallback function for getting file flags when the
1314 # filesystem doesn't support them
1315 # filesystem doesn't support them
1315
1316
1316 copiesget = self._repo.dirstate.copies().get
1317 copiesget = self._repo.dirstate.copies().get
1317 parents = self.parents()
1318 parents = self.parents()
1318 if len(parents) < 2:
1319 if len(parents) < 2:
1319 # when we have one parent, it's easy: copy from parent
1320 # when we have one parent, it's easy: copy from parent
1320 man = parents[0].manifest()
1321 man = parents[0].manifest()
1321 def func(f):
1322 def func(f):
1322 f = copiesget(f, f)
1323 f = copiesget(f, f)
1323 return man.flags(f)
1324 return man.flags(f)
1324 else:
1325 else:
1325 # merges are tricky: we try to reconstruct the unstored
1326 # merges are tricky: we try to reconstruct the unstored
1326 # result from the merge (issue1802)
1327 # result from the merge (issue1802)
1327 p1, p2 = parents
1328 p1, p2 = parents
1328 pa = p1.ancestor(p2)
1329 pa = p1.ancestor(p2)
1329 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1330 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1330
1331
1331 def func(f):
1332 def func(f):
1332 f = copiesget(f, f) # may be wrong for merges with copies
1333 f = copiesget(f, f) # may be wrong for merges with copies
1333 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1334 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1334 if fl1 == fl2:
1335 if fl1 == fl2:
1335 return fl1
1336 return fl1
1336 if fl1 == fla:
1337 if fl1 == fla:
1337 return fl2
1338 return fl2
1338 if fl2 == fla:
1339 if fl2 == fla:
1339 return fl1
1340 return fl1
1340 return '' # punt for conflicts
1341 return '' # punt for conflicts
1341
1342
1342 return func
1343 return func
1343
1344
1344 @propertycache
1345 @propertycache
1345 def _flagfunc(self):
1346 def _flagfunc(self):
1346 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1347 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1347
1348
1348 @propertycache
1349 @propertycache
1349 def _status(self):
1350 def _status(self):
1350 return self._repo.status()
1351 return self._repo.status()
1351
1352
1352 @propertycache
1353 @propertycache
1353 def _user(self):
1354 def _user(self):
1354 return self._repo.ui.username()
1355 return self._repo.ui.username()
1355
1356
1356 @propertycache
1357 @propertycache
1357 def _date(self):
1358 def _date(self):
1358 ui = self._repo.ui
1359 ui = self._repo.ui
1359 date = ui.configdate('devel', 'default-date')
1360 date = ui.configdate('devel', 'default-date')
1360 if date is None:
1361 if date is None:
1361 date = util.makedate()
1362 date = util.makedate()
1362 return date
1363 return date
1363
1364
1364 def subrev(self, subpath):
1365 def subrev(self, subpath):
1365 return None
1366 return None
1366
1367
1367 def manifestnode(self):
1368 def manifestnode(self):
1368 return None
1369 return None
1369 def user(self):
1370 def user(self):
1370 return self._user or self._repo.ui.username()
1371 return self._user or self._repo.ui.username()
1371 def date(self):
1372 def date(self):
1372 return self._date
1373 return self._date
1373 def description(self):
1374 def description(self):
1374 return self._text
1375 return self._text
1375 def files(self):
1376 def files(self):
1376 return sorted(self._status.modified + self._status.added +
1377 return sorted(self._status.modified + self._status.added +
1377 self._status.removed)
1378 self._status.removed)
1378
1379
1379 def modified(self):
1380 def modified(self):
1380 return self._status.modified
1381 return self._status.modified
1381 def added(self):
1382 def added(self):
1382 return self._status.added
1383 return self._status.added
1383 def removed(self):
1384 def removed(self):
1384 return self._status.removed
1385 return self._status.removed
1385 def deleted(self):
1386 def deleted(self):
1386 return self._status.deleted
1387 return self._status.deleted
1387 def branch(self):
1388 def branch(self):
1388 return encoding.tolocal(self._extra['branch'])
1389 return encoding.tolocal(self._extra['branch'])
1389 def closesbranch(self):
1390 def closesbranch(self):
1390 return 'close' in self._extra
1391 return 'close' in self._extra
1391 def extra(self):
1392 def extra(self):
1392 return self._extra
1393 return self._extra
1393
1394
1394 def tags(self):
1395 def tags(self):
1395 return []
1396 return []
1396
1397
1397 def bookmarks(self):
1398 def bookmarks(self):
1398 b = []
1399 b = []
1399 for p in self.parents():
1400 for p in self.parents():
1400 b.extend(p.bookmarks())
1401 b.extend(p.bookmarks())
1401 return b
1402 return b
1402
1403
1403 def phase(self):
1404 def phase(self):
1404 phase = phases.draft # default phase to draft
1405 phase = phases.draft # default phase to draft
1405 for p in self.parents():
1406 for p in self.parents():
1406 phase = max(phase, p.phase())
1407 phase = max(phase, p.phase())
1407 return phase
1408 return phase
1408
1409
1409 def hidden(self):
1410 def hidden(self):
1410 return False
1411 return False
1411
1412
1412 def children(self):
1413 def children(self):
1413 return []
1414 return []
1414
1415
1415 def flags(self, path):
1416 def flags(self, path):
1416 if r'_manifest' in self.__dict__:
1417 if r'_manifest' in self.__dict__:
1417 try:
1418 try:
1418 return self._manifest.flags(path)
1419 return self._manifest.flags(path)
1419 except KeyError:
1420 except KeyError:
1420 return ''
1421 return ''
1421
1422
1422 try:
1423 try:
1423 return self._flagfunc(path)
1424 return self._flagfunc(path)
1424 except OSError:
1425 except OSError:
1425 return ''
1426 return ''
1426
1427
1427 def ancestor(self, c2):
1428 def ancestor(self, c2):
1428 """return the "best" ancestor context of self and c2"""
1429 """return the "best" ancestor context of self and c2"""
1429 return self._parents[0].ancestor(c2) # punt on two parents for now
1430 return self._parents[0].ancestor(c2) # punt on two parents for now
1430
1431
1431 def walk(self, match):
1432 def walk(self, match):
1432 '''Generates matching file names.'''
1433 '''Generates matching file names.'''
1433 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1434 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1434 True, False))
1435 True, False))
1435
1436
1436 def matches(self, match):
1437 def matches(self, match):
1437 return sorted(self._repo.dirstate.matches(match))
1438 return sorted(self._repo.dirstate.matches(match))
1438
1439
1439 def ancestors(self):
1440 def ancestors(self):
1440 for p in self._parents:
1441 for p in self._parents:
1441 yield p
1442 yield p
1442 for a in self._repo.changelog.ancestors(
1443 for a in self._repo.changelog.ancestors(
1443 [p.rev() for p in self._parents]):
1444 [p.rev() for p in self._parents]):
1444 yield changectx(self._repo, a)
1445 yield changectx(self._repo, a)
1445
1446
1446 def markcommitted(self, node):
1447 def markcommitted(self, node):
1447 """Perform post-commit cleanup necessary after committing this ctx
1448 """Perform post-commit cleanup necessary after committing this ctx
1448
1449
1449 Specifically, this updates backing stores this working context
1450 Specifically, this updates backing stores this working context
1450 wraps to reflect the fact that the changes reflected by this
1451 wraps to reflect the fact that the changes reflected by this
1451 workingctx have been committed. For example, it marks
1452 workingctx have been committed. For example, it marks
1452 modified and added files as normal in the dirstate.
1453 modified and added files as normal in the dirstate.
1453
1454
1454 """
1455 """
1455
1456
1456 with self._repo.dirstate.parentchange():
1457 with self._repo.dirstate.parentchange():
1457 for f in self.modified() + self.added():
1458 for f in self.modified() + self.added():
1458 self._repo.dirstate.normal(f)
1459 self._repo.dirstate.normal(f)
1459 for f in self.removed():
1460 for f in self.removed():
1460 self._repo.dirstate.drop(f)
1461 self._repo.dirstate.drop(f)
1461 self._repo.dirstate.setparents(node)
1462 self._repo.dirstate.setparents(node)
1462
1463
1463 # write changes out explicitly, because nesting wlock at
1464 # write changes out explicitly, because nesting wlock at
1464 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1465 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1465 # from immediately doing so for subsequent changing files
1466 # from immediately doing so for subsequent changing files
1466 self._repo.dirstate.write(self._repo.currenttransaction())
1467 self._repo.dirstate.write(self._repo.currenttransaction())
1467
1468
1468 def dirty(self, missing=False, merge=True, branch=True):
1469 def dirty(self, missing=False, merge=True, branch=True):
1469 return False
1470 return False
1470
1471
1471 class workingctx(committablectx):
1472 class workingctx(committablectx):
1472 """A workingctx object makes access to data related to
1473 """A workingctx object makes access to data related to
1473 the current working directory convenient.
1474 the current working directory convenient.
1474 date - any valid date string or (unixtime, offset), or None.
1475 date - any valid date string or (unixtime, offset), or None.
1475 user - username string, or None.
1476 user - username string, or None.
1476 extra - a dictionary of extra values, or None.
1477 extra - a dictionary of extra values, or None.
1477 changes - a list of file lists as returned by localrepo.status()
1478 changes - a list of file lists as returned by localrepo.status()
1478 or None to use the repository status.
1479 or None to use the repository status.
1479 """
1480 """
1480 def __init__(self, repo, text="", user=None, date=None, extra=None,
1481 def __init__(self, repo, text="", user=None, date=None, extra=None,
1481 changes=None):
1482 changes=None):
1482 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1483 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1483
1484
1484 def __iter__(self):
1485 def __iter__(self):
1485 d = self._repo.dirstate
1486 d = self._repo.dirstate
1486 for f in d:
1487 for f in d:
1487 if d[f] != 'r':
1488 if d[f] != 'r':
1488 yield f
1489 yield f
1489
1490
1490 def __contains__(self, key):
1491 def __contains__(self, key):
1491 return self._repo.dirstate[key] not in "?r"
1492 return self._repo.dirstate[key] not in "?r"
1492
1493
1493 def hex(self):
1494 def hex(self):
1494 return hex(wdirid)
1495 return hex(wdirid)
1495
1496
1496 @propertycache
1497 @propertycache
1497 def _parents(self):
1498 def _parents(self):
1498 p = self._repo.dirstate.parents()
1499 p = self._repo.dirstate.parents()
1499 if p[1] == nullid:
1500 if p[1] == nullid:
1500 p = p[:-1]
1501 p = p[:-1]
1501 return [changectx(self._repo, x) for x in p]
1502 return [changectx(self._repo, x) for x in p]
1502
1503
1503 def filectx(self, path, filelog=None):
1504 def filectx(self, path, filelog=None):
1504 """get a file context from the working directory"""
1505 """get a file context from the working directory"""
1505 return workingfilectx(self._repo, path, workingctx=self,
1506 return workingfilectx(self._repo, path, workingctx=self,
1506 filelog=filelog)
1507 filelog=filelog)
1507
1508
1508 def dirty(self, missing=False, merge=True, branch=True):
1509 def dirty(self, missing=False, merge=True, branch=True):
1509 "check whether a working directory is modified"
1510 "check whether a working directory is modified"
1510 # check subrepos first
1511 # check subrepos first
1511 for s in sorted(self.substate):
1512 for s in sorted(self.substate):
1512 if self.sub(s).dirty(missing=missing):
1513 if self.sub(s).dirty(missing=missing):
1513 return True
1514 return True
1514 # check current working dir
1515 # check current working dir
1515 return ((merge and self.p2()) or
1516 return ((merge and self.p2()) or
1516 (branch and self.branch() != self.p1().branch()) or
1517 (branch and self.branch() != self.p1().branch()) or
1517 self.modified() or self.added() or self.removed() or
1518 self.modified() or self.added() or self.removed() or
1518 (missing and self.deleted()))
1519 (missing and self.deleted()))
1519
1520
1520 def add(self, list, prefix=""):
1521 def add(self, list, prefix=""):
1521 join = lambda f: os.path.join(prefix, f)
1522 with self._repo.wlock():
1522 with self._repo.wlock():
1523 ui, ds = self._repo.ui, self._repo.dirstate
1523 ui, ds = self._repo.ui, self._repo.dirstate
1524 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1524 rejected = []
1525 rejected = []
1525 lstat = self._repo.wvfs.lstat
1526 lstat = self._repo.wvfs.lstat
1526 for f in list:
1527 for f in list:
1527 scmutil.checkportable(ui, join(f))
1528 # ds.pathto() returns an absolute file when this is invoked from
1529 # the keyword extension. That gets flagged as non-portable on
1530 # Windows, since it contains the drive letter and colon.
1531 scmutil.checkportable(ui, os.path.join(prefix, f))
1528 try:
1532 try:
1529 st = lstat(f)
1533 st = lstat(f)
1530 except OSError:
1534 except OSError:
1531 ui.warn(_("%s does not exist!\n") % join(f))
1535 ui.warn(_("%s does not exist!\n") % uipath(f))
1532 rejected.append(f)
1536 rejected.append(f)
1533 continue
1537 continue
1534 if st.st_size > 10000000:
1538 if st.st_size > 10000000:
1535 ui.warn(_("%s: up to %d MB of RAM may be required "
1539 ui.warn(_("%s: up to %d MB of RAM may be required "
1536 "to manage this file\n"
1540 "to manage this file\n"
1537 "(use 'hg revert %s' to cancel the "
1541 "(use 'hg revert %s' to cancel the "
1538 "pending addition)\n")
1542 "pending addition)\n")
1539 % (f, 3 * st.st_size // 1000000, join(f)))
1543 % (f, 3 * st.st_size // 1000000, uipath(f)))
1540 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1544 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1541 ui.warn(_("%s not added: only files and symlinks "
1545 ui.warn(_("%s not added: only files and symlinks "
1542 "supported currently\n") % join(f))
1546 "supported currently\n") % uipath(f))
1543 rejected.append(f)
1547 rejected.append(f)
1544 elif ds[f] in 'amn':
1548 elif ds[f] in 'amn':
1545 ui.warn(_("%s already tracked!\n") % join(f))
1549 ui.warn(_("%s already tracked!\n") % uipath(f))
1546 elif ds[f] == 'r':
1550 elif ds[f] == 'r':
1547 ds.normallookup(f)
1551 ds.normallookup(f)
1548 else:
1552 else:
1549 ds.add(f)
1553 ds.add(f)
1550 return rejected
1554 return rejected
1551
1555
1552 def forget(self, files, prefix=""):
1556 def forget(self, files, prefix=""):
1553 join = lambda f: os.path.join(prefix, f)
1554 with self._repo.wlock():
1557 with self._repo.wlock():
1558 ds = self._repo.dirstate
1559 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1555 rejected = []
1560 rejected = []
1556 for f in files:
1561 for f in files:
1557 if f not in self._repo.dirstate:
1562 if f not in self._repo.dirstate:
1558 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1563 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1559 rejected.append(f)
1564 rejected.append(f)
1560 elif self._repo.dirstate[f] != 'a':
1565 elif self._repo.dirstate[f] != 'a':
1561 self._repo.dirstate.remove(f)
1566 self._repo.dirstate.remove(f)
1562 else:
1567 else:
1563 self._repo.dirstate.drop(f)
1568 self._repo.dirstate.drop(f)
1564 return rejected
1569 return rejected
1565
1570
1566 def undelete(self, list):
1571 def undelete(self, list):
1567 pctxs = self.parents()
1572 pctxs = self.parents()
1568 with self._repo.wlock():
1573 with self._repo.wlock():
1574 ds = self._repo.dirstate
1569 for f in list:
1575 for f in list:
1570 if self._repo.dirstate[f] != 'r':
1576 if self._repo.dirstate[f] != 'r':
1571 self._repo.ui.warn(_("%s not removed!\n") % f)
1577 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1572 else:
1578 else:
1573 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1579 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1574 t = fctx.data()
1580 t = fctx.data()
1575 self._repo.wwrite(f, t, fctx.flags())
1581 self._repo.wwrite(f, t, fctx.flags())
1576 self._repo.dirstate.normal(f)
1582 self._repo.dirstate.normal(f)
1577
1583
1578 def copy(self, source, dest):
1584 def copy(self, source, dest):
1579 try:
1585 try:
1580 st = self._repo.wvfs.lstat(dest)
1586 st = self._repo.wvfs.lstat(dest)
1581 except OSError as err:
1587 except OSError as err:
1582 if err.errno != errno.ENOENT:
1588 if err.errno != errno.ENOENT:
1583 raise
1589 raise
1584 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1590 self._repo.ui.warn(_("%s does not exist!\n")
1591 % self._repo.dirstate.pathto(dest))
1585 return
1592 return
1586 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1593 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1587 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1594 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1588 "symbolic link\n") % dest)
1595 "symbolic link\n")
1596 % self._repo.dirstate.pathto(dest))
1589 else:
1597 else:
1590 with self._repo.wlock():
1598 with self._repo.wlock():
1591 if self._repo.dirstate[dest] in '?':
1599 if self._repo.dirstate[dest] in '?':
1592 self._repo.dirstate.add(dest)
1600 self._repo.dirstate.add(dest)
1593 elif self._repo.dirstate[dest] in 'r':
1601 elif self._repo.dirstate[dest] in 'r':
1594 self._repo.dirstate.normallookup(dest)
1602 self._repo.dirstate.normallookup(dest)
1595 self._repo.dirstate.copy(source, dest)
1603 self._repo.dirstate.copy(source, dest)
1596
1604
1597 def match(self, pats=None, include=None, exclude=None, default='glob',
1605 def match(self, pats=None, include=None, exclude=None, default='glob',
1598 listsubrepos=False, badfn=None):
1606 listsubrepos=False, badfn=None):
1599 r = self._repo
1607 r = self._repo
1600
1608
1601 # Only a case insensitive filesystem needs magic to translate user input
1609 # Only a case insensitive filesystem needs magic to translate user input
1602 # to actual case in the filesystem.
1610 # to actual case in the filesystem.
1603 icasefs = not util.fscasesensitive(r.root)
1611 icasefs = not util.fscasesensitive(r.root)
1604 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1612 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1605 default, auditor=r.auditor, ctx=self,
1613 default, auditor=r.auditor, ctx=self,
1606 listsubrepos=listsubrepos, badfn=badfn,
1614 listsubrepos=listsubrepos, badfn=badfn,
1607 icasefs=icasefs)
1615 icasefs=icasefs)
1608
1616
1609 def _filtersuspectsymlink(self, files):
1617 def _filtersuspectsymlink(self, files):
1610 if not files or self._repo.dirstate._checklink:
1618 if not files or self._repo.dirstate._checklink:
1611 return files
1619 return files
1612
1620
1613 # Symlink placeholders may get non-symlink-like contents
1621 # Symlink placeholders may get non-symlink-like contents
1614 # via user error or dereferencing by NFS or Samba servers,
1622 # via user error or dereferencing by NFS or Samba servers,
1615 # so we filter out any placeholders that don't look like a
1623 # so we filter out any placeholders that don't look like a
1616 # symlink
1624 # symlink
1617 sane = []
1625 sane = []
1618 for f in files:
1626 for f in files:
1619 if self.flags(f) == 'l':
1627 if self.flags(f) == 'l':
1620 d = self[f].data()
1628 d = self[f].data()
1621 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1629 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1622 self._repo.ui.debug('ignoring suspect symlink placeholder'
1630 self._repo.ui.debug('ignoring suspect symlink placeholder'
1623 ' "%s"\n' % f)
1631 ' "%s"\n' % f)
1624 continue
1632 continue
1625 sane.append(f)
1633 sane.append(f)
1626 return sane
1634 return sane
1627
1635
1628 def _checklookup(self, files):
1636 def _checklookup(self, files):
1629 # check for any possibly clean files
1637 # check for any possibly clean files
1630 if not files:
1638 if not files:
1631 return [], [], []
1639 return [], [], []
1632
1640
1633 modified = []
1641 modified = []
1634 deleted = []
1642 deleted = []
1635 fixup = []
1643 fixup = []
1636 pctx = self._parents[0]
1644 pctx = self._parents[0]
1637 # do a full compare of any files that might have changed
1645 # do a full compare of any files that might have changed
1638 for f in sorted(files):
1646 for f in sorted(files):
1639 try:
1647 try:
1640 # This will return True for a file that got replaced by a
1648 # This will return True for a file that got replaced by a
1641 # directory in the interim, but fixing that is pretty hard.
1649 # directory in the interim, but fixing that is pretty hard.
1642 if (f not in pctx or self.flags(f) != pctx.flags(f)
1650 if (f not in pctx or self.flags(f) != pctx.flags(f)
1643 or pctx[f].cmp(self[f])):
1651 or pctx[f].cmp(self[f])):
1644 modified.append(f)
1652 modified.append(f)
1645 else:
1653 else:
1646 fixup.append(f)
1654 fixup.append(f)
1647 except (IOError, OSError):
1655 except (IOError, OSError):
1648 # A file become inaccessible in between? Mark it as deleted,
1656 # A file become inaccessible in between? Mark it as deleted,
1649 # matching dirstate behavior (issue5584).
1657 # matching dirstate behavior (issue5584).
1650 # The dirstate has more complex behavior around whether a
1658 # The dirstate has more complex behavior around whether a
1651 # missing file matches a directory, etc, but we don't need to
1659 # missing file matches a directory, etc, but we don't need to
1652 # bother with that: if f has made it to this point, we're sure
1660 # bother with that: if f has made it to this point, we're sure
1653 # it's in the dirstate.
1661 # it's in the dirstate.
1654 deleted.append(f)
1662 deleted.append(f)
1655
1663
1656 return modified, deleted, fixup
1664 return modified, deleted, fixup
1657
1665
1658 def _poststatusfixup(self, status, fixup):
1666 def _poststatusfixup(self, status, fixup):
1659 """update dirstate for files that are actually clean"""
1667 """update dirstate for files that are actually clean"""
1660 poststatus = self._repo.postdsstatus()
1668 poststatus = self._repo.postdsstatus()
1661 if fixup or poststatus:
1669 if fixup or poststatus:
1662 try:
1670 try:
1663 oldid = self._repo.dirstate.identity()
1671 oldid = self._repo.dirstate.identity()
1664
1672
1665 # updating the dirstate is optional
1673 # updating the dirstate is optional
1666 # so we don't wait on the lock
1674 # so we don't wait on the lock
1667 # wlock can invalidate the dirstate, so cache normal _after_
1675 # wlock can invalidate the dirstate, so cache normal _after_
1668 # taking the lock
1676 # taking the lock
1669 with self._repo.wlock(False):
1677 with self._repo.wlock(False):
1670 if self._repo.dirstate.identity() == oldid:
1678 if self._repo.dirstate.identity() == oldid:
1671 if fixup:
1679 if fixup:
1672 normal = self._repo.dirstate.normal
1680 normal = self._repo.dirstate.normal
1673 for f in fixup:
1681 for f in fixup:
1674 normal(f)
1682 normal(f)
1675 # write changes out explicitly, because nesting
1683 # write changes out explicitly, because nesting
1676 # wlock at runtime may prevent 'wlock.release()'
1684 # wlock at runtime may prevent 'wlock.release()'
1677 # after this block from doing so for subsequent
1685 # after this block from doing so for subsequent
1678 # changing files
1686 # changing files
1679 tr = self._repo.currenttransaction()
1687 tr = self._repo.currenttransaction()
1680 self._repo.dirstate.write(tr)
1688 self._repo.dirstate.write(tr)
1681
1689
1682 if poststatus:
1690 if poststatus:
1683 for ps in poststatus:
1691 for ps in poststatus:
1684 ps(self, status)
1692 ps(self, status)
1685 else:
1693 else:
1686 # in this case, writing changes out breaks
1694 # in this case, writing changes out breaks
1687 # consistency, because .hg/dirstate was
1695 # consistency, because .hg/dirstate was
1688 # already changed simultaneously after last
1696 # already changed simultaneously after last
1689 # caching (see also issue5584 for detail)
1697 # caching (see also issue5584 for detail)
1690 self._repo.ui.debug('skip updating dirstate: '
1698 self._repo.ui.debug('skip updating dirstate: '
1691 'identity mismatch\n')
1699 'identity mismatch\n')
1692 except error.LockError:
1700 except error.LockError:
1693 pass
1701 pass
1694 finally:
1702 finally:
1695 # Even if the wlock couldn't be grabbed, clear out the list.
1703 # Even if the wlock couldn't be grabbed, clear out the list.
1696 self._repo.clearpostdsstatus()
1704 self._repo.clearpostdsstatus()
1697
1705
1698 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1706 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1699 unknown=False):
1707 unknown=False):
1700 '''Gets the status from the dirstate -- internal use only.'''
1708 '''Gets the status from the dirstate -- internal use only.'''
1701 listignored, listclean, listunknown = ignored, clean, unknown
1709 listignored, listclean, listunknown = ignored, clean, unknown
1702 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1710 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1703 subrepos = []
1711 subrepos = []
1704 if '.hgsub' in self:
1712 if '.hgsub' in self:
1705 subrepos = sorted(self.substate)
1713 subrepos = sorted(self.substate)
1706 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1714 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1707 listclean, listunknown)
1715 listclean, listunknown)
1708
1716
1709 # check for any possibly clean files
1717 # check for any possibly clean files
1710 fixup = []
1718 fixup = []
1711 if cmp:
1719 if cmp:
1712 modified2, deleted2, fixup = self._checklookup(cmp)
1720 modified2, deleted2, fixup = self._checklookup(cmp)
1713 s.modified.extend(modified2)
1721 s.modified.extend(modified2)
1714 s.deleted.extend(deleted2)
1722 s.deleted.extend(deleted2)
1715
1723
1716 if fixup and listclean:
1724 if fixup and listclean:
1717 s.clean.extend(fixup)
1725 s.clean.extend(fixup)
1718
1726
1719 self._poststatusfixup(s, fixup)
1727 self._poststatusfixup(s, fixup)
1720
1728
1721 if match.always():
1729 if match.always():
1722 # cache for performance
1730 # cache for performance
1723 if s.unknown or s.ignored or s.clean:
1731 if s.unknown or s.ignored or s.clean:
1724 # "_status" is cached with list*=False in the normal route
1732 # "_status" is cached with list*=False in the normal route
1725 self._status = scmutil.status(s.modified, s.added, s.removed,
1733 self._status = scmutil.status(s.modified, s.added, s.removed,
1726 s.deleted, [], [], [])
1734 s.deleted, [], [], [])
1727 else:
1735 else:
1728 self._status = s
1736 self._status = s
1729
1737
1730 return s
1738 return s
1731
1739
1732 @propertycache
1740 @propertycache
1733 def _manifest(self):
1741 def _manifest(self):
1734 """generate a manifest corresponding to the values in self._status
1742 """generate a manifest corresponding to the values in self._status
1735
1743
1736 This reuse the file nodeid from parent, but we use special node
1744 This reuse the file nodeid from parent, but we use special node
1737 identifiers for added and modified files. This is used by manifests
1745 identifiers for added and modified files. This is used by manifests
1738 merge to see that files are different and by update logic to avoid
1746 merge to see that files are different and by update logic to avoid
1739 deleting newly added files.
1747 deleting newly added files.
1740 """
1748 """
1741 return self._buildstatusmanifest(self._status)
1749 return self._buildstatusmanifest(self._status)
1742
1750
1743 def _buildstatusmanifest(self, status):
1751 def _buildstatusmanifest(self, status):
1744 """Builds a manifest that includes the given status results."""
1752 """Builds a manifest that includes the given status results."""
1745 parents = self.parents()
1753 parents = self.parents()
1746
1754
1747 man = parents[0].manifest().copy()
1755 man = parents[0].manifest().copy()
1748
1756
1749 ff = self._flagfunc
1757 ff = self._flagfunc
1750 for i, l in ((addednodeid, status.added),
1758 for i, l in ((addednodeid, status.added),
1751 (modifiednodeid, status.modified)):
1759 (modifiednodeid, status.modified)):
1752 for f in l:
1760 for f in l:
1753 man[f] = i
1761 man[f] = i
1754 try:
1762 try:
1755 man.setflag(f, ff(f))
1763 man.setflag(f, ff(f))
1756 except OSError:
1764 except OSError:
1757 pass
1765 pass
1758
1766
1759 for f in status.deleted + status.removed:
1767 for f in status.deleted + status.removed:
1760 if f in man:
1768 if f in man:
1761 del man[f]
1769 del man[f]
1762
1770
1763 return man
1771 return man
1764
1772
1765 def _buildstatus(self, other, s, match, listignored, listclean,
1773 def _buildstatus(self, other, s, match, listignored, listclean,
1766 listunknown):
1774 listunknown):
1767 """build a status with respect to another context
1775 """build a status with respect to another context
1768
1776
1769 This includes logic for maintaining the fast path of status when
1777 This includes logic for maintaining the fast path of status when
1770 comparing the working directory against its parent, which is to skip
1778 comparing the working directory against its parent, which is to skip
1771 building a new manifest if self (working directory) is not comparing
1779 building a new manifest if self (working directory) is not comparing
1772 against its parent (repo['.']).
1780 against its parent (repo['.']).
1773 """
1781 """
1774 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1782 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1775 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1783 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1776 # might have accidentally ended up with the entire contents of the file
1784 # might have accidentally ended up with the entire contents of the file
1777 # they are supposed to be linking to.
1785 # they are supposed to be linking to.
1778 s.modified[:] = self._filtersuspectsymlink(s.modified)
1786 s.modified[:] = self._filtersuspectsymlink(s.modified)
1779 if other != self._repo['.']:
1787 if other != self._repo['.']:
1780 s = super(workingctx, self)._buildstatus(other, s, match,
1788 s = super(workingctx, self)._buildstatus(other, s, match,
1781 listignored, listclean,
1789 listignored, listclean,
1782 listunknown)
1790 listunknown)
1783 return s
1791 return s
1784
1792
1785 def _matchstatus(self, other, match):
1793 def _matchstatus(self, other, match):
1786 """override the match method with a filter for directory patterns
1794 """override the match method with a filter for directory patterns
1787
1795
1788 We use inheritance to customize the match.bad method only in cases of
1796 We use inheritance to customize the match.bad method only in cases of
1789 workingctx since it belongs only to the working directory when
1797 workingctx since it belongs only to the working directory when
1790 comparing against the parent changeset.
1798 comparing against the parent changeset.
1791
1799
1792 If we aren't comparing against the working directory's parent, then we
1800 If we aren't comparing against the working directory's parent, then we
1793 just use the default match object sent to us.
1801 just use the default match object sent to us.
1794 """
1802 """
1795 superself = super(workingctx, self)
1803 superself = super(workingctx, self)
1796 match = superself._matchstatus(other, match)
1804 match = superself._matchstatus(other, match)
1797 if other != self._repo['.']:
1805 if other != self._repo['.']:
1798 def bad(f, msg):
1806 def bad(f, msg):
1799 # 'f' may be a directory pattern from 'match.files()',
1807 # 'f' may be a directory pattern from 'match.files()',
1800 # so 'f not in ctx1' is not enough
1808 # so 'f not in ctx1' is not enough
1801 if f not in other and not other.hasdir(f):
1809 if f not in other and not other.hasdir(f):
1802 self._repo.ui.warn('%s: %s\n' %
1810 self._repo.ui.warn('%s: %s\n' %
1803 (self._repo.dirstate.pathto(f), msg))
1811 (self._repo.dirstate.pathto(f), msg))
1804 match.bad = bad
1812 match.bad = bad
1805 return match
1813 return match
1806
1814
1807 def markcommitted(self, node):
1815 def markcommitted(self, node):
1808 super(workingctx, self).markcommitted(node)
1816 super(workingctx, self).markcommitted(node)
1809
1817
1810 sparse.aftercommit(self._repo, node)
1818 sparse.aftercommit(self._repo, node)
1811
1819
1812 class committablefilectx(basefilectx):
1820 class committablefilectx(basefilectx):
1813 """A committablefilectx provides common functionality for a file context
1821 """A committablefilectx provides common functionality for a file context
1814 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1822 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1815 def __init__(self, repo, path, filelog=None, ctx=None):
1823 def __init__(self, repo, path, filelog=None, ctx=None):
1816 self._repo = repo
1824 self._repo = repo
1817 self._path = path
1825 self._path = path
1818 self._changeid = None
1826 self._changeid = None
1819 self._filerev = self._filenode = None
1827 self._filerev = self._filenode = None
1820
1828
1821 if filelog is not None:
1829 if filelog is not None:
1822 self._filelog = filelog
1830 self._filelog = filelog
1823 if ctx:
1831 if ctx:
1824 self._changectx = ctx
1832 self._changectx = ctx
1825
1833
1826 def __nonzero__(self):
1834 def __nonzero__(self):
1827 return True
1835 return True
1828
1836
1829 __bool__ = __nonzero__
1837 __bool__ = __nonzero__
1830
1838
1831 def linkrev(self):
1839 def linkrev(self):
1832 # linked to self._changectx no matter if file is modified or not
1840 # linked to self._changectx no matter if file is modified or not
1833 return self.rev()
1841 return self.rev()
1834
1842
1835 def parents(self):
1843 def parents(self):
1836 '''return parent filectxs, following copies if necessary'''
1844 '''return parent filectxs, following copies if necessary'''
1837 def filenode(ctx, path):
1845 def filenode(ctx, path):
1838 return ctx._manifest.get(path, nullid)
1846 return ctx._manifest.get(path, nullid)
1839
1847
1840 path = self._path
1848 path = self._path
1841 fl = self._filelog
1849 fl = self._filelog
1842 pcl = self._changectx._parents
1850 pcl = self._changectx._parents
1843 renamed = self.renamed()
1851 renamed = self.renamed()
1844
1852
1845 if renamed:
1853 if renamed:
1846 pl = [renamed + (None,)]
1854 pl = [renamed + (None,)]
1847 else:
1855 else:
1848 pl = [(path, filenode(pcl[0], path), fl)]
1856 pl = [(path, filenode(pcl[0], path), fl)]
1849
1857
1850 for pc in pcl[1:]:
1858 for pc in pcl[1:]:
1851 pl.append((path, filenode(pc, path), fl))
1859 pl.append((path, filenode(pc, path), fl))
1852
1860
1853 return [self._parentfilectx(p, fileid=n, filelog=l)
1861 return [self._parentfilectx(p, fileid=n, filelog=l)
1854 for p, n, l in pl if n != nullid]
1862 for p, n, l in pl if n != nullid]
1855
1863
1856 def children(self):
1864 def children(self):
1857 return []
1865 return []
1858
1866
1859 class workingfilectx(committablefilectx):
1867 class workingfilectx(committablefilectx):
1860 """A workingfilectx object makes access to data related to a particular
1868 """A workingfilectx object makes access to data related to a particular
1861 file in the working directory convenient."""
1869 file in the working directory convenient."""
1862 def __init__(self, repo, path, filelog=None, workingctx=None):
1870 def __init__(self, repo, path, filelog=None, workingctx=None):
1863 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1871 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1864
1872
1865 @propertycache
1873 @propertycache
1866 def _changectx(self):
1874 def _changectx(self):
1867 return workingctx(self._repo)
1875 return workingctx(self._repo)
1868
1876
1869 def data(self):
1877 def data(self):
1870 return self._repo.wread(self._path)
1878 return self._repo.wread(self._path)
1871 def renamed(self):
1879 def renamed(self):
1872 rp = self._repo.dirstate.copied(self._path)
1880 rp = self._repo.dirstate.copied(self._path)
1873 if not rp:
1881 if not rp:
1874 return None
1882 return None
1875 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1883 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1876
1884
1877 def size(self):
1885 def size(self):
1878 return self._repo.wvfs.lstat(self._path).st_size
1886 return self._repo.wvfs.lstat(self._path).st_size
1879 def date(self):
1887 def date(self):
1880 t, tz = self._changectx.date()
1888 t, tz = self._changectx.date()
1881 try:
1889 try:
1882 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1890 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1883 except OSError as err:
1891 except OSError as err:
1884 if err.errno != errno.ENOENT:
1892 if err.errno != errno.ENOENT:
1885 raise
1893 raise
1886 return (t, tz)
1894 return (t, tz)
1887
1895
1888 def exists(self):
1896 def exists(self):
1889 return self._repo.wvfs.exists(self._path)
1897 return self._repo.wvfs.exists(self._path)
1890
1898
1891 def lexists(self):
1899 def lexists(self):
1892 return self._repo.wvfs.lexists(self._path)
1900 return self._repo.wvfs.lexists(self._path)
1893
1901
1894 def audit(self):
1902 def audit(self):
1895 return self._repo.wvfs.audit(self._path)
1903 return self._repo.wvfs.audit(self._path)
1896
1904
1897 def cmp(self, fctx):
1905 def cmp(self, fctx):
1898 """compare with other file context
1906 """compare with other file context
1899
1907
1900 returns True if different than fctx.
1908 returns True if different than fctx.
1901 """
1909 """
1902 # fctx should be a filectx (not a workingfilectx)
1910 # fctx should be a filectx (not a workingfilectx)
1903 # invert comparison to reuse the same code path
1911 # invert comparison to reuse the same code path
1904 return fctx.cmp(self)
1912 return fctx.cmp(self)
1905
1913
1906 def remove(self, ignoremissing=False):
1914 def remove(self, ignoremissing=False):
1907 """wraps unlink for a repo's working directory"""
1915 """wraps unlink for a repo's working directory"""
1908 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1916 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1909
1917
1910 def write(self, data, flags, backgroundclose=False):
1918 def write(self, data, flags, backgroundclose=False):
1911 """wraps repo.wwrite"""
1919 """wraps repo.wwrite"""
1912 self._repo.wwrite(self._path, data, flags,
1920 self._repo.wwrite(self._path, data, flags,
1913 backgroundclose=backgroundclose)
1921 backgroundclose=backgroundclose)
1914
1922
1915 def setflags(self, l, x):
1923 def setflags(self, l, x):
1916 self._repo.wvfs.setflags(self._path, l, x)
1924 self._repo.wvfs.setflags(self._path, l, x)
1917
1925
1918 class workingcommitctx(workingctx):
1926 class workingcommitctx(workingctx):
1919 """A workingcommitctx object makes access to data related to
1927 """A workingcommitctx object makes access to data related to
1920 the revision being committed convenient.
1928 the revision being committed convenient.
1921
1929
1922 This hides changes in the working directory, if they aren't
1930 This hides changes in the working directory, if they aren't
1923 committed in this context.
1931 committed in this context.
1924 """
1932 """
1925 def __init__(self, repo, changes,
1933 def __init__(self, repo, changes,
1926 text="", user=None, date=None, extra=None):
1934 text="", user=None, date=None, extra=None):
1927 super(workingctx, self).__init__(repo, text, user, date, extra,
1935 super(workingctx, self).__init__(repo, text, user, date, extra,
1928 changes)
1936 changes)
1929
1937
1930 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1938 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1931 unknown=False):
1939 unknown=False):
1932 """Return matched files only in ``self._status``
1940 """Return matched files only in ``self._status``
1933
1941
1934 Uncommitted files appear "clean" via this context, even if
1942 Uncommitted files appear "clean" via this context, even if
1935 they aren't actually so in the working directory.
1943 they aren't actually so in the working directory.
1936 """
1944 """
1937 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1945 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1938 if clean:
1946 if clean:
1939 clean = [f for f in self._manifest if f not in self._changedset]
1947 clean = [f for f in self._manifest if f not in self._changedset]
1940 else:
1948 else:
1941 clean = []
1949 clean = []
1942 return scmutil.status([f for f in self._status.modified if match(f)],
1950 return scmutil.status([f for f in self._status.modified if match(f)],
1943 [f for f in self._status.added if match(f)],
1951 [f for f in self._status.added if match(f)],
1944 [f for f in self._status.removed if match(f)],
1952 [f for f in self._status.removed if match(f)],
1945 [], [], [], clean)
1953 [], [], [], clean)
1946
1954
1947 @propertycache
1955 @propertycache
1948 def _changedset(self):
1956 def _changedset(self):
1949 """Return the set of files changed in this context
1957 """Return the set of files changed in this context
1950 """
1958 """
1951 changed = set(self._status.modified)
1959 changed = set(self._status.modified)
1952 changed.update(self._status.added)
1960 changed.update(self._status.added)
1953 changed.update(self._status.removed)
1961 changed.update(self._status.removed)
1954 return changed
1962 return changed
1955
1963
1956 def makecachingfilectxfn(func):
1964 def makecachingfilectxfn(func):
1957 """Create a filectxfn that caches based on the path.
1965 """Create a filectxfn that caches based on the path.
1958
1966
1959 We can't use util.cachefunc because it uses all arguments as the cache
1967 We can't use util.cachefunc because it uses all arguments as the cache
1960 key and this creates a cycle since the arguments include the repo and
1968 key and this creates a cycle since the arguments include the repo and
1961 memctx.
1969 memctx.
1962 """
1970 """
1963 cache = {}
1971 cache = {}
1964
1972
1965 def getfilectx(repo, memctx, path):
1973 def getfilectx(repo, memctx, path):
1966 if path not in cache:
1974 if path not in cache:
1967 cache[path] = func(repo, memctx, path)
1975 cache[path] = func(repo, memctx, path)
1968 return cache[path]
1976 return cache[path]
1969
1977
1970 return getfilectx
1978 return getfilectx
1971
1979
1972 def memfilefromctx(ctx):
1980 def memfilefromctx(ctx):
1973 """Given a context return a memfilectx for ctx[path]
1981 """Given a context return a memfilectx for ctx[path]
1974
1982
1975 This is a convenience method for building a memctx based on another
1983 This is a convenience method for building a memctx based on another
1976 context.
1984 context.
1977 """
1985 """
1978 def getfilectx(repo, memctx, path):
1986 def getfilectx(repo, memctx, path):
1979 fctx = ctx[path]
1987 fctx = ctx[path]
1980 # this is weird but apparently we only keep track of one parent
1988 # this is weird but apparently we only keep track of one parent
1981 # (why not only store that instead of a tuple?)
1989 # (why not only store that instead of a tuple?)
1982 copied = fctx.renamed()
1990 copied = fctx.renamed()
1983 if copied:
1991 if copied:
1984 copied = copied[0]
1992 copied = copied[0]
1985 return memfilectx(repo, path, fctx.data(),
1993 return memfilectx(repo, path, fctx.data(),
1986 islink=fctx.islink(), isexec=fctx.isexec(),
1994 islink=fctx.islink(), isexec=fctx.isexec(),
1987 copied=copied, memctx=memctx)
1995 copied=copied, memctx=memctx)
1988
1996
1989 return getfilectx
1997 return getfilectx
1990
1998
1991 def memfilefrompatch(patchstore):
1999 def memfilefrompatch(patchstore):
1992 """Given a patch (e.g. patchstore object) return a memfilectx
2000 """Given a patch (e.g. patchstore object) return a memfilectx
1993
2001
1994 This is a convenience method for building a memctx based on a patchstore.
2002 This is a convenience method for building a memctx based on a patchstore.
1995 """
2003 """
1996 def getfilectx(repo, memctx, path):
2004 def getfilectx(repo, memctx, path):
1997 data, mode, copied = patchstore.getfile(path)
2005 data, mode, copied = patchstore.getfile(path)
1998 if data is None:
2006 if data is None:
1999 return None
2007 return None
2000 islink, isexec = mode
2008 islink, isexec = mode
2001 return memfilectx(repo, path, data, islink=islink,
2009 return memfilectx(repo, path, data, islink=islink,
2002 isexec=isexec, copied=copied,
2010 isexec=isexec, copied=copied,
2003 memctx=memctx)
2011 memctx=memctx)
2004
2012
2005 return getfilectx
2013 return getfilectx
2006
2014
2007 class memctx(committablectx):
2015 class memctx(committablectx):
2008 """Use memctx to perform in-memory commits via localrepo.commitctx().
2016 """Use memctx to perform in-memory commits via localrepo.commitctx().
2009
2017
2010 Revision information is supplied at initialization time while
2018 Revision information is supplied at initialization time while
2011 related files data and is made available through a callback
2019 related files data and is made available through a callback
2012 mechanism. 'repo' is the current localrepo, 'parents' is a
2020 mechanism. 'repo' is the current localrepo, 'parents' is a
2013 sequence of two parent revisions identifiers (pass None for every
2021 sequence of two parent revisions identifiers (pass None for every
2014 missing parent), 'text' is the commit message and 'files' lists
2022 missing parent), 'text' is the commit message and 'files' lists
2015 names of files touched by the revision (normalized and relative to
2023 names of files touched by the revision (normalized and relative to
2016 repository root).
2024 repository root).
2017
2025
2018 filectxfn(repo, memctx, path) is a callable receiving the
2026 filectxfn(repo, memctx, path) is a callable receiving the
2019 repository, the current memctx object and the normalized path of
2027 repository, the current memctx object and the normalized path of
2020 requested file, relative to repository root. It is fired by the
2028 requested file, relative to repository root. It is fired by the
2021 commit function for every file in 'files', but calls order is
2029 commit function for every file in 'files', but calls order is
2022 undefined. If the file is available in the revision being
2030 undefined. If the file is available in the revision being
2023 committed (updated or added), filectxfn returns a memfilectx
2031 committed (updated or added), filectxfn returns a memfilectx
2024 object. If the file was removed, filectxfn return None for recent
2032 object. If the file was removed, filectxfn return None for recent
2025 Mercurial. Moved files are represented by marking the source file
2033 Mercurial. Moved files are represented by marking the source file
2026 removed and the new file added with copy information (see
2034 removed and the new file added with copy information (see
2027 memfilectx).
2035 memfilectx).
2028
2036
2029 user receives the committer name and defaults to current
2037 user receives the committer name and defaults to current
2030 repository username, date is the commit date in any format
2038 repository username, date is the commit date in any format
2031 supported by util.parsedate() and defaults to current date, extra
2039 supported by util.parsedate() and defaults to current date, extra
2032 is a dictionary of metadata or is left empty.
2040 is a dictionary of metadata or is left empty.
2033 """
2041 """
2034
2042
2035 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2043 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2036 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2044 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2037 # this field to determine what to do in filectxfn.
2045 # this field to determine what to do in filectxfn.
2038 _returnnoneformissingfiles = True
2046 _returnnoneformissingfiles = True
2039
2047
2040 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2048 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2041 date=None, extra=None, branch=None, editor=False):
2049 date=None, extra=None, branch=None, editor=False):
2042 super(memctx, self).__init__(repo, text, user, date, extra)
2050 super(memctx, self).__init__(repo, text, user, date, extra)
2043 self._rev = None
2051 self._rev = None
2044 self._node = None
2052 self._node = None
2045 parents = [(p or nullid) for p in parents]
2053 parents = [(p or nullid) for p in parents]
2046 p1, p2 = parents
2054 p1, p2 = parents
2047 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2055 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2048 files = sorted(set(files))
2056 files = sorted(set(files))
2049 self._files = files
2057 self._files = files
2050 if branch is not None:
2058 if branch is not None:
2051 self._extra['branch'] = encoding.fromlocal(branch)
2059 self._extra['branch'] = encoding.fromlocal(branch)
2052 self.substate = {}
2060 self.substate = {}
2053
2061
2054 if isinstance(filectxfn, patch.filestore):
2062 if isinstance(filectxfn, patch.filestore):
2055 filectxfn = memfilefrompatch(filectxfn)
2063 filectxfn = memfilefrompatch(filectxfn)
2056 elif not callable(filectxfn):
2064 elif not callable(filectxfn):
2057 # if store is not callable, wrap it in a function
2065 # if store is not callable, wrap it in a function
2058 filectxfn = memfilefromctx(filectxfn)
2066 filectxfn = memfilefromctx(filectxfn)
2059
2067
2060 # memoizing increases performance for e.g. vcs convert scenarios.
2068 # memoizing increases performance for e.g. vcs convert scenarios.
2061 self._filectxfn = makecachingfilectxfn(filectxfn)
2069 self._filectxfn = makecachingfilectxfn(filectxfn)
2062
2070
2063 if editor:
2071 if editor:
2064 self._text = editor(self._repo, self, [])
2072 self._text = editor(self._repo, self, [])
2065 self._repo.savecommitmessage(self._text)
2073 self._repo.savecommitmessage(self._text)
2066
2074
2067 def filectx(self, path, filelog=None):
2075 def filectx(self, path, filelog=None):
2068 """get a file context from the working directory
2076 """get a file context from the working directory
2069
2077
2070 Returns None if file doesn't exist and should be removed."""
2078 Returns None if file doesn't exist and should be removed."""
2071 return self._filectxfn(self._repo, self, path)
2079 return self._filectxfn(self._repo, self, path)
2072
2080
2073 def commit(self):
2081 def commit(self):
2074 """commit context to the repo"""
2082 """commit context to the repo"""
2075 return self._repo.commitctx(self)
2083 return self._repo.commitctx(self)
2076
2084
2077 @propertycache
2085 @propertycache
2078 def _manifest(self):
2086 def _manifest(self):
2079 """generate a manifest based on the return values of filectxfn"""
2087 """generate a manifest based on the return values of filectxfn"""
2080
2088
2081 # keep this simple for now; just worry about p1
2089 # keep this simple for now; just worry about p1
2082 pctx = self._parents[0]
2090 pctx = self._parents[0]
2083 man = pctx.manifest().copy()
2091 man = pctx.manifest().copy()
2084
2092
2085 for f in self._status.modified:
2093 for f in self._status.modified:
2086 p1node = nullid
2094 p1node = nullid
2087 p2node = nullid
2095 p2node = nullid
2088 p = pctx[f].parents() # if file isn't in pctx, check p2?
2096 p = pctx[f].parents() # if file isn't in pctx, check p2?
2089 if len(p) > 0:
2097 if len(p) > 0:
2090 p1node = p[0].filenode()
2098 p1node = p[0].filenode()
2091 if len(p) > 1:
2099 if len(p) > 1:
2092 p2node = p[1].filenode()
2100 p2node = p[1].filenode()
2093 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2101 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2094
2102
2095 for f in self._status.added:
2103 for f in self._status.added:
2096 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2104 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2097
2105
2098 for f in self._status.removed:
2106 for f in self._status.removed:
2099 if f in man:
2107 if f in man:
2100 del man[f]
2108 del man[f]
2101
2109
2102 return man
2110 return man
2103
2111
2104 @propertycache
2112 @propertycache
2105 def _status(self):
2113 def _status(self):
2106 """Calculate exact status from ``files`` specified at construction
2114 """Calculate exact status from ``files`` specified at construction
2107 """
2115 """
2108 man1 = self.p1().manifest()
2116 man1 = self.p1().manifest()
2109 p2 = self._parents[1]
2117 p2 = self._parents[1]
2110 # "1 < len(self._parents)" can't be used for checking
2118 # "1 < len(self._parents)" can't be used for checking
2111 # existence of the 2nd parent, because "memctx._parents" is
2119 # existence of the 2nd parent, because "memctx._parents" is
2112 # explicitly initialized by the list, of which length is 2.
2120 # explicitly initialized by the list, of which length is 2.
2113 if p2.node() != nullid:
2121 if p2.node() != nullid:
2114 man2 = p2.manifest()
2122 man2 = p2.manifest()
2115 managing = lambda f: f in man1 or f in man2
2123 managing = lambda f: f in man1 or f in man2
2116 else:
2124 else:
2117 managing = lambda f: f in man1
2125 managing = lambda f: f in man1
2118
2126
2119 modified, added, removed = [], [], []
2127 modified, added, removed = [], [], []
2120 for f in self._files:
2128 for f in self._files:
2121 if not managing(f):
2129 if not managing(f):
2122 added.append(f)
2130 added.append(f)
2123 elif self[f]:
2131 elif self[f]:
2124 modified.append(f)
2132 modified.append(f)
2125 else:
2133 else:
2126 removed.append(f)
2134 removed.append(f)
2127
2135
2128 return scmutil.status(modified, added, removed, [], [], [], [])
2136 return scmutil.status(modified, added, removed, [], [], [], [])
2129
2137
2130 class memfilectx(committablefilectx):
2138 class memfilectx(committablefilectx):
2131 """memfilectx represents an in-memory file to commit.
2139 """memfilectx represents an in-memory file to commit.
2132
2140
2133 See memctx and committablefilectx for more details.
2141 See memctx and committablefilectx for more details.
2134 """
2142 """
2135 def __init__(self, repo, path, data, islink=False,
2143 def __init__(self, repo, path, data, islink=False,
2136 isexec=False, copied=None, memctx=None):
2144 isexec=False, copied=None, memctx=None):
2137 """
2145 """
2138 path is the normalized file path relative to repository root.
2146 path is the normalized file path relative to repository root.
2139 data is the file content as a string.
2147 data is the file content as a string.
2140 islink is True if the file is a symbolic link.
2148 islink is True if the file is a symbolic link.
2141 isexec is True if the file is executable.
2149 isexec is True if the file is executable.
2142 copied is the source file path if current file was copied in the
2150 copied is the source file path if current file was copied in the
2143 revision being committed, or None."""
2151 revision being committed, or None."""
2144 super(memfilectx, self).__init__(repo, path, None, memctx)
2152 super(memfilectx, self).__init__(repo, path, None, memctx)
2145 self._data = data
2153 self._data = data
2146 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2154 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2147 self._copied = None
2155 self._copied = None
2148 if copied:
2156 if copied:
2149 self._copied = (copied, nullid)
2157 self._copied = (copied, nullid)
2150
2158
2151 def data(self):
2159 def data(self):
2152 return self._data
2160 return self._data
2153
2161
2154 def remove(self, ignoremissing=False):
2162 def remove(self, ignoremissing=False):
2155 """wraps unlink for a repo's working directory"""
2163 """wraps unlink for a repo's working directory"""
2156 # need to figure out what to do here
2164 # need to figure out what to do here
2157 del self._changectx[self._path]
2165 del self._changectx[self._path]
2158
2166
2159 def write(self, data, flags):
2167 def write(self, data, flags):
2160 """wraps repo.wwrite"""
2168 """wraps repo.wwrite"""
2161 self._data = data
2169 self._data = data
2162
2170
2163 class overlayfilectx(committablefilectx):
2171 class overlayfilectx(committablefilectx):
2164 """Like memfilectx but take an original filectx and optional parameters to
2172 """Like memfilectx but take an original filectx and optional parameters to
2165 override parts of it. This is useful when fctx.data() is expensive (i.e.
2173 override parts of it. This is useful when fctx.data() is expensive (i.e.
2166 flag processor is expensive) and raw data, flags, and filenode could be
2174 flag processor is expensive) and raw data, flags, and filenode could be
2167 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2175 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2168 """
2176 """
2169
2177
2170 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2178 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2171 copied=None, ctx=None):
2179 copied=None, ctx=None):
2172 """originalfctx: filecontext to duplicate
2180 """originalfctx: filecontext to duplicate
2173
2181
2174 datafunc: None or a function to override data (file content). It is a
2182 datafunc: None or a function to override data (file content). It is a
2175 function to be lazy. path, flags, copied, ctx: None or overridden value
2183 function to be lazy. path, flags, copied, ctx: None or overridden value
2176
2184
2177 copied could be (path, rev), or False. copied could also be just path,
2185 copied could be (path, rev), or False. copied could also be just path,
2178 and will be converted to (path, nullid). This simplifies some callers.
2186 and will be converted to (path, nullid). This simplifies some callers.
2179 """
2187 """
2180
2188
2181 if path is None:
2189 if path is None:
2182 path = originalfctx.path()
2190 path = originalfctx.path()
2183 if ctx is None:
2191 if ctx is None:
2184 ctx = originalfctx.changectx()
2192 ctx = originalfctx.changectx()
2185 ctxmatch = lambda: True
2193 ctxmatch = lambda: True
2186 else:
2194 else:
2187 ctxmatch = lambda: ctx == originalfctx.changectx()
2195 ctxmatch = lambda: ctx == originalfctx.changectx()
2188
2196
2189 repo = originalfctx.repo()
2197 repo = originalfctx.repo()
2190 flog = originalfctx.filelog()
2198 flog = originalfctx.filelog()
2191 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2199 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2192
2200
2193 if copied is None:
2201 if copied is None:
2194 copied = originalfctx.renamed()
2202 copied = originalfctx.renamed()
2195 copiedmatch = lambda: True
2203 copiedmatch = lambda: True
2196 else:
2204 else:
2197 if copied and not isinstance(copied, tuple):
2205 if copied and not isinstance(copied, tuple):
2198 # repo._filecommit will recalculate copyrev so nullid is okay
2206 # repo._filecommit will recalculate copyrev so nullid is okay
2199 copied = (copied, nullid)
2207 copied = (copied, nullid)
2200 copiedmatch = lambda: copied == originalfctx.renamed()
2208 copiedmatch = lambda: copied == originalfctx.renamed()
2201
2209
2202 # When data, copied (could affect data), ctx (could affect filelog
2210 # When data, copied (could affect data), ctx (could affect filelog
2203 # parents) are not overridden, rawdata, rawflags, and filenode may be
2211 # parents) are not overridden, rawdata, rawflags, and filenode may be
2204 # reused (repo._filecommit should double check filelog parents).
2212 # reused (repo._filecommit should double check filelog parents).
2205 #
2213 #
2206 # path, flags are not hashed in filelog (but in manifestlog) so they do
2214 # path, flags are not hashed in filelog (but in manifestlog) so they do
2207 # not affect reusable here.
2215 # not affect reusable here.
2208 #
2216 #
2209 # If ctx or copied is overridden to a same value with originalfctx,
2217 # If ctx or copied is overridden to a same value with originalfctx,
2210 # still consider it's reusable. originalfctx.renamed() may be a bit
2218 # still consider it's reusable. originalfctx.renamed() may be a bit
2211 # expensive so it's not called unless necessary. Assuming datafunc is
2219 # expensive so it's not called unless necessary. Assuming datafunc is
2212 # always expensive, do not call it for this "reusable" test.
2220 # always expensive, do not call it for this "reusable" test.
2213 reusable = datafunc is None and ctxmatch() and copiedmatch()
2221 reusable = datafunc is None and ctxmatch() and copiedmatch()
2214
2222
2215 if datafunc is None:
2223 if datafunc is None:
2216 datafunc = originalfctx.data
2224 datafunc = originalfctx.data
2217 if flags is None:
2225 if flags is None:
2218 flags = originalfctx.flags()
2226 flags = originalfctx.flags()
2219
2227
2220 self._datafunc = datafunc
2228 self._datafunc = datafunc
2221 self._flags = flags
2229 self._flags = flags
2222 self._copied = copied
2230 self._copied = copied
2223
2231
2224 if reusable:
2232 if reusable:
2225 # copy extra fields from originalfctx
2233 # copy extra fields from originalfctx
2226 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2234 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2227 for attr in attrs:
2235 for attr in attrs:
2228 if util.safehasattr(originalfctx, attr):
2236 if util.safehasattr(originalfctx, attr):
2229 setattr(self, attr, getattr(originalfctx, attr))
2237 setattr(self, attr, getattr(originalfctx, attr))
2230
2238
2231 def data(self):
2239 def data(self):
2232 return self._datafunc()
2240 return self._datafunc()
2233
2241
2234 class metadataonlyctx(committablectx):
2242 class metadataonlyctx(committablectx):
2235 """Like memctx but it's reusing the manifest of different commit.
2243 """Like memctx but it's reusing the manifest of different commit.
2236 Intended to be used by lightweight operations that are creating
2244 Intended to be used by lightweight operations that are creating
2237 metadata-only changes.
2245 metadata-only changes.
2238
2246
2239 Revision information is supplied at initialization time. 'repo' is the
2247 Revision information is supplied at initialization time. 'repo' is the
2240 current localrepo, 'ctx' is original revision which manifest we're reuisng
2248 current localrepo, 'ctx' is original revision which manifest we're reuisng
2241 'parents' is a sequence of two parent revisions identifiers (pass None for
2249 'parents' is a sequence of two parent revisions identifiers (pass None for
2242 every missing parent), 'text' is the commit.
2250 every missing parent), 'text' is the commit.
2243
2251
2244 user receives the committer name and defaults to current repository
2252 user receives the committer name and defaults to current repository
2245 username, date is the commit date in any format supported by
2253 username, date is the commit date in any format supported by
2246 util.parsedate() and defaults to current date, extra is a dictionary of
2254 util.parsedate() and defaults to current date, extra is a dictionary of
2247 metadata or is left empty.
2255 metadata or is left empty.
2248 """
2256 """
2249 def __new__(cls, repo, originalctx, *args, **kwargs):
2257 def __new__(cls, repo, originalctx, *args, **kwargs):
2250 return super(metadataonlyctx, cls).__new__(cls, repo)
2258 return super(metadataonlyctx, cls).__new__(cls, repo)
2251
2259
2252 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2260 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2253 extra=None, editor=False):
2261 extra=None, editor=False):
2254 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2262 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2255 self._rev = None
2263 self._rev = None
2256 self._node = None
2264 self._node = None
2257 self._originalctx = originalctx
2265 self._originalctx = originalctx
2258 self._manifestnode = originalctx.manifestnode()
2266 self._manifestnode = originalctx.manifestnode()
2259 parents = [(p or nullid) for p in parents]
2267 parents = [(p or nullid) for p in parents]
2260 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2268 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2261
2269
2262 # sanity check to ensure that the reused manifest parents are
2270 # sanity check to ensure that the reused manifest parents are
2263 # manifests of our commit parents
2271 # manifests of our commit parents
2264 mp1, mp2 = self.manifestctx().parents
2272 mp1, mp2 = self.manifestctx().parents
2265 if p1 != nullid and p1.manifestnode() != mp1:
2273 if p1 != nullid and p1.manifestnode() != mp1:
2266 raise RuntimeError('can\'t reuse the manifest: '
2274 raise RuntimeError('can\'t reuse the manifest: '
2267 'its p1 doesn\'t match the new ctx p1')
2275 'its p1 doesn\'t match the new ctx p1')
2268 if p2 != nullid and p2.manifestnode() != mp2:
2276 if p2 != nullid and p2.manifestnode() != mp2:
2269 raise RuntimeError('can\'t reuse the manifest: '
2277 raise RuntimeError('can\'t reuse the manifest: '
2270 'its p2 doesn\'t match the new ctx p2')
2278 'its p2 doesn\'t match the new ctx p2')
2271
2279
2272 self._files = originalctx.files()
2280 self._files = originalctx.files()
2273 self.substate = {}
2281 self.substate = {}
2274
2282
2275 if editor:
2283 if editor:
2276 self._text = editor(self._repo, self, [])
2284 self._text = editor(self._repo, self, [])
2277 self._repo.savecommitmessage(self._text)
2285 self._repo.savecommitmessage(self._text)
2278
2286
2279 def manifestnode(self):
2287 def manifestnode(self):
2280 return self._manifestnode
2288 return self._manifestnode
2281
2289
2282 @property
2290 @property
2283 def _manifestctx(self):
2291 def _manifestctx(self):
2284 return self._repo.manifestlog[self._manifestnode]
2292 return self._repo.manifestlog[self._manifestnode]
2285
2293
2286 def filectx(self, path, filelog=None):
2294 def filectx(self, path, filelog=None):
2287 return self._originalctx.filectx(path, filelog=filelog)
2295 return self._originalctx.filectx(path, filelog=filelog)
2288
2296
2289 def commit(self):
2297 def commit(self):
2290 """commit context to the repo"""
2298 """commit context to the repo"""
2291 return self._repo.commitctx(self)
2299 return self._repo.commitctx(self)
2292
2300
2293 @property
2301 @property
2294 def _manifest(self):
2302 def _manifest(self):
2295 return self._originalctx.manifest()
2303 return self._originalctx.manifest()
2296
2304
2297 @propertycache
2305 @propertycache
2298 def _status(self):
2306 def _status(self):
2299 """Calculate exact status from ``files`` specified in the ``origctx``
2307 """Calculate exact status from ``files`` specified in the ``origctx``
2300 and parents manifests.
2308 and parents manifests.
2301 """
2309 """
2302 man1 = self.p1().manifest()
2310 man1 = self.p1().manifest()
2303 p2 = self._parents[1]
2311 p2 = self._parents[1]
2304 # "1 < len(self._parents)" can't be used for checking
2312 # "1 < len(self._parents)" can't be used for checking
2305 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2313 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2306 # explicitly initialized by the list, of which length is 2.
2314 # explicitly initialized by the list, of which length is 2.
2307 if p2.node() != nullid:
2315 if p2.node() != nullid:
2308 man2 = p2.manifest()
2316 man2 = p2.manifest()
2309 managing = lambda f: f in man1 or f in man2
2317 managing = lambda f: f in man1 or f in man2
2310 else:
2318 else:
2311 managing = lambda f: f in man1
2319 managing = lambda f: f in man1
2312
2320
2313 modified, added, removed = [], [], []
2321 modified, added, removed = [], [], []
2314 for f in self._files:
2322 for f in self._files:
2315 if not managing(f):
2323 if not managing(f):
2316 added.append(f)
2324 added.append(f)
2317 elif self[f]:
2325 elif self[f]:
2318 modified.append(f)
2326 modified.append(f)
2319 else:
2327 else:
2320 removed.append(f)
2328 removed.append(f)
2321
2329
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2330 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,246 +1,251 b''
1 $ hg init a
1 $ hg init a
2 $ cd a
2 $ cd a
3 $ echo a > a
3 $ echo a > a
4 $ hg add -n
4 $ hg add -n
5 adding a
5 adding a
6 $ hg st
6 $ hg st
7 ? a
7 ? a
8 $ hg add
8 $ hg add
9 adding a
9 adding a
10 $ hg st
10 $ hg st
11 A a
11 A a
12 $ hg forget a
12 $ hg forget a
13 $ hg add
13 $ hg add
14 adding a
14 adding a
15 $ hg st
15 $ hg st
16 A a
16 A a
17 $ mkdir dir
18 $ cd dir
19 $ hg add ../a
20 ../a already tracked!
21 $ cd ..
17
22
18 $ echo b > b
23 $ echo b > b
19 $ hg add -n b
24 $ hg add -n b
20 $ hg st
25 $ hg st
21 A a
26 A a
22 ? b
27 ? b
23 $ hg add b
28 $ hg add b
24 $ hg st
29 $ hg st
25 A a
30 A a
26 A b
31 A b
27
32
28 should fail
33 should fail
29
34
30 $ hg add b
35 $ hg add b
31 b already tracked!
36 b already tracked!
32 $ hg st
37 $ hg st
33 A a
38 A a
34 A b
39 A b
35
40
36 #if no-windows
41 #if no-windows
37 $ echo foo > con.xml
42 $ echo foo > con.xml
38 $ hg --config ui.portablefilenames=jump add con.xml
43 $ hg --config ui.portablefilenames=jump add con.xml
39 abort: ui.portablefilenames value is invalid ('jump')
44 abort: ui.portablefilenames value is invalid ('jump')
40 [255]
45 [255]
41 $ hg --config ui.portablefilenames=abort add con.xml
46 $ hg --config ui.portablefilenames=abort add con.xml
42 abort: filename contains 'con', which is reserved on Windows: 'con.xml'
47 abort: filename contains 'con', which is reserved on Windows: 'con.xml'
43 [255]
48 [255]
44 $ hg st
49 $ hg st
45 A a
50 A a
46 A b
51 A b
47 ? con.xml
52 ? con.xml
48 $ hg add con.xml
53 $ hg add con.xml
49 warning: filename contains 'con', which is reserved on Windows: 'con.xml'
54 warning: filename contains 'con', which is reserved on Windows: 'con.xml'
50 $ hg st
55 $ hg st
51 A a
56 A a
52 A b
57 A b
53 A con.xml
58 A con.xml
54 $ hg forget con.xml
59 $ hg forget con.xml
55 $ rm con.xml
60 $ rm con.xml
56 #endif
61 #endif
57
62
58 #if eol-in-paths
63 #if eol-in-paths
59 $ echo bla > 'hello:world'
64 $ echo bla > 'hello:world'
60 $ hg --config ui.portablefilenames=abort add
65 $ hg --config ui.portablefilenames=abort add
61 adding hello:world
66 adding hello:world
62 abort: filename contains ':', which is reserved on Windows: 'hello:world'
67 abort: filename contains ':', which is reserved on Windows: 'hello:world'
63 [255]
68 [255]
64 $ hg st
69 $ hg st
65 A a
70 A a
66 A b
71 A b
67 ? hello:world
72 ? hello:world
68 $ hg --config ui.portablefilenames=ignore add
73 $ hg --config ui.portablefilenames=ignore add
69 adding hello:world
74 adding hello:world
70 $ hg st
75 $ hg st
71 A a
76 A a
72 A b
77 A b
73 A hello:world
78 A hello:world
74 #endif
79 #endif
75
80
76 $ hg ci -m 0 --traceback
81 $ hg ci -m 0 --traceback
77
82
78 $ hg log -r "heads(. or wdir() & file('**'))"
83 $ hg log -r "heads(. or wdir() & file('**'))"
79 changeset: 0:* (glob)
84 changeset: 0:* (glob)
80 tag: tip
85 tag: tip
81 user: test
86 user: test
82 date: Thu Jan 01 00:00:00 1970 +0000
87 date: Thu Jan 01 00:00:00 1970 +0000
83 summary: 0
88 summary: 0
84
89
85 should fail
90 should fail
86
91
87 $ hg add a
92 $ hg add a
88 a already tracked!
93 a already tracked!
89
94
90 $ echo aa > a
95 $ echo aa > a
91 $ hg ci -m 1
96 $ hg ci -m 1
92 $ hg up 0
97 $ hg up 0
93 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 $ echo aaa > a
99 $ echo aaa > a
95 $ hg ci -m 2
100 $ hg ci -m 2
96 created new head
101 created new head
97
102
98 $ hg merge
103 $ hg merge
99 merging a
104 merging a
100 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
105 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
101 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
106 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
102 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
107 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
103 [1]
108 [1]
104 $ hg st
109 $ hg st
105 M a
110 M a
106 ? a.orig
111 ? a.orig
107
112
108 wdir doesn't cause a crash, and can be dynamically selected if dirty
113 wdir doesn't cause a crash, and can be dynamically selected if dirty
109
114
110 $ hg log -r "heads(. or wdir() & file('**'))"
115 $ hg log -r "heads(. or wdir() & file('**'))"
111 changeset: 2147483647:ffffffffffff
116 changeset: 2147483647:ffffffffffff
112 parent: 2:* (glob)
117 parent: 2:* (glob)
113 parent: 1:* (glob)
118 parent: 1:* (glob)
114 user: test
119 user: test
115 date: * (glob)
120 date: * (glob)
116
121
117 should fail
122 should fail
118
123
119 $ hg add a
124 $ hg add a
120 a already tracked!
125 a already tracked!
121 $ hg st
126 $ hg st
122 M a
127 M a
123 ? a.orig
128 ? a.orig
124 $ hg resolve -m a
129 $ hg resolve -m a
125 (no more unresolved files)
130 (no more unresolved files)
126 $ hg ci -m merge
131 $ hg ci -m merge
127
132
128 Issue683: peculiarity with hg revert of an removed then added file
133 Issue683: peculiarity with hg revert of an removed then added file
129
134
130 $ hg forget a
135 $ hg forget a
131 $ hg add a
136 $ hg add a
132 $ hg st
137 $ hg st
133 ? a.orig
138 ? a.orig
134 $ hg rm a
139 $ hg rm a
135 $ hg st
140 $ hg st
136 R a
141 R a
137 ? a.orig
142 ? a.orig
138 $ echo a > a
143 $ echo a > a
139 $ hg add a
144 $ hg add a
140 $ hg st
145 $ hg st
141 M a
146 M a
142 ? a.orig
147 ? a.orig
143
148
144 Forgotten file can be added back (as either clean or modified)
149 Forgotten file can be added back (as either clean or modified)
145
150
146 $ hg forget b
151 $ hg forget b
147 $ hg add b
152 $ hg add b
148 $ hg st -A b
153 $ hg st -A b
149 C b
154 C b
150 $ hg forget b
155 $ hg forget b
151 $ echo modified > b
156 $ echo modified > b
152 $ hg add b
157 $ hg add b
153 $ hg st -A b
158 $ hg st -A b
154 M b
159 M b
155 $ hg revert -qC b
160 $ hg revert -qC b
156
161
157 $ hg add c && echo "unexpected addition of missing file"
162 $ hg add c && echo "unexpected addition of missing file"
158 c: * (glob)
163 c: * (glob)
159 [1]
164 [1]
160 $ echo c > c
165 $ echo c > c
161 $ hg add d c && echo "unexpected addition of missing file"
166 $ hg add d c && echo "unexpected addition of missing file"
162 d: * (glob)
167 d: * (glob)
163 [1]
168 [1]
164 $ hg st
169 $ hg st
165 M a
170 M a
166 A c
171 A c
167 ? a.orig
172 ? a.orig
168 $ hg up -C
173 $ hg up -C
169 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170
175
171 forget and get should have the right order: added but missing dir should be
176 forget and get should have the right order: added but missing dir should be
172 forgotten before file with same name is added
177 forgotten before file with same name is added
173
178
174 $ echo file d > d
179 $ echo file d > d
175 $ hg add d
180 $ hg add d
176 $ hg ci -md
181 $ hg ci -md
177 $ hg rm d
182 $ hg rm d
178 $ mkdir d
183 $ mkdir d
179 $ echo a > d/a
184 $ echo a > d/a
180 $ hg add d/a
185 $ hg add d/a
181 $ rm -r d
186 $ rm -r d
182 $ hg up -C
187 $ hg up -C
183 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
188 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
184 $ cat d
189 $ cat d
185 file d
190 file d
186
191
187 Test that adding a directory doesn't require case matching (issue4578)
192 Test that adding a directory doesn't require case matching (issue4578)
188 #if icasefs
193 #if icasefs
189 $ mkdir -p CapsDir1/CapsDir
194 $ mkdir -p CapsDir1/CapsDir
190 $ echo abc > CapsDir1/CapsDir/AbC.txt
195 $ echo abc > CapsDir1/CapsDir/AbC.txt
191 $ mkdir CapsDir1/CapsDir/SubDir
196 $ mkdir CapsDir1/CapsDir/SubDir
192 $ echo def > CapsDir1/CapsDir/SubDir/Def.txt
197 $ echo def > CapsDir1/CapsDir/SubDir/Def.txt
193
198
194 $ hg add capsdir1/capsdir
199 $ hg add capsdir1/capsdir
195 adding CapsDir1/CapsDir/AbC.txt (glob)
200 adding CapsDir1/CapsDir/AbC.txt (glob)
196 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
201 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
197
202
198 $ hg forget capsdir1/capsdir/abc.txt
203 $ hg forget capsdir1/capsdir/abc.txt
199
204
200 $ hg forget capsdir1/capsdir
205 $ hg forget capsdir1/capsdir
201 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
206 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
202
207
203 $ hg add capsdir1
208 $ hg add capsdir1
204 adding CapsDir1/CapsDir/AbC.txt (glob)
209 adding CapsDir1/CapsDir/AbC.txt (glob)
205 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
210 adding CapsDir1/CapsDir/SubDir/Def.txt (glob)
206
211
207 $ hg ci -m "AbCDef" capsdir1/capsdir
212 $ hg ci -m "AbCDef" capsdir1/capsdir
208
213
209 $ hg status -A capsdir1/capsdir
214 $ hg status -A capsdir1/capsdir
210 C CapsDir1/CapsDir/AbC.txt
215 C CapsDir1/CapsDir/AbC.txt
211 C CapsDir1/CapsDir/SubDir/Def.txt
216 C CapsDir1/CapsDir/SubDir/Def.txt
212
217
213 $ hg files capsdir1/capsdir
218 $ hg files capsdir1/capsdir
214 CapsDir1/CapsDir/AbC.txt (glob)
219 CapsDir1/CapsDir/AbC.txt (glob)
215 CapsDir1/CapsDir/SubDir/Def.txt (glob)
220 CapsDir1/CapsDir/SubDir/Def.txt (glob)
216
221
217 $ echo xyz > CapsDir1/CapsDir/SubDir/Def.txt
222 $ echo xyz > CapsDir1/CapsDir/SubDir/Def.txt
218 $ hg ci -m xyz capsdir1/capsdir/subdir/def.txt
223 $ hg ci -m xyz capsdir1/capsdir/subdir/def.txt
219
224
220 $ hg revert -r '.^' capsdir1/capsdir
225 $ hg revert -r '.^' capsdir1/capsdir
221 reverting CapsDir1/CapsDir/SubDir/Def.txt (glob)
226 reverting CapsDir1/CapsDir/SubDir/Def.txt (glob)
222
227
223 The conditional tests above mean the hash on the diff line differs on Windows
228 The conditional tests above mean the hash on the diff line differs on Windows
224 and OS X
229 and OS X
225 $ hg diff capsdir1/capsdir
230 $ hg diff capsdir1/capsdir
226 diff -r * CapsDir1/CapsDir/SubDir/Def.txt (glob)
231 diff -r * CapsDir1/CapsDir/SubDir/Def.txt (glob)
227 --- a/CapsDir1/CapsDir/SubDir/Def.txt Thu Jan 01 00:00:00 1970 +0000
232 --- a/CapsDir1/CapsDir/SubDir/Def.txt Thu Jan 01 00:00:00 1970 +0000
228 +++ b/CapsDir1/CapsDir/SubDir/Def.txt * (glob)
233 +++ b/CapsDir1/CapsDir/SubDir/Def.txt * (glob)
229 @@ -1,1 +1,1 @@
234 @@ -1,1 +1,1 @@
230 -xyz
235 -xyz
231 +def
236 +def
232
237
233 $ hg mv CapsDir1/CapsDir/abc.txt CapsDir1/CapsDir/ABC.txt
238 $ hg mv CapsDir1/CapsDir/abc.txt CapsDir1/CapsDir/ABC.txt
234 $ hg ci -m "case changing rename" CapsDir1/CapsDir/AbC.txt CapsDir1/CapsDir/ABC.txt
239 $ hg ci -m "case changing rename" CapsDir1/CapsDir/AbC.txt CapsDir1/CapsDir/ABC.txt
235
240
236 $ hg status -A capsdir1/capsdir
241 $ hg status -A capsdir1/capsdir
237 M CapsDir1/CapsDir/SubDir/Def.txt
242 M CapsDir1/CapsDir/SubDir/Def.txt
238 C CapsDir1/CapsDir/ABC.txt
243 C CapsDir1/CapsDir/ABC.txt
239
244
240 $ hg remove -f 'glob:**.txt' -X capsdir1/capsdir
245 $ hg remove -f 'glob:**.txt' -X capsdir1/capsdir
241 $ hg remove -f 'glob:**.txt' -I capsdir1/capsdir
246 $ hg remove -f 'glob:**.txt' -I capsdir1/capsdir
242 removing CapsDir1/CapsDir/ABC.txt (glob)
247 removing CapsDir1/CapsDir/ABC.txt (glob)
243 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
248 removing CapsDir1/CapsDir/SubDir/Def.txt (glob)
244 #endif
249 #endif
245
250
246 $ cd ..
251 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now