##// END OF EJS Templates
context: work around `long` not existing on Python 3...
Augie Fackler -
r31343:ff2f9050 default
parent child Browse files
Show More
@@ -1,2105 +1,2106
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 pycompat,
36 repoview,
37 repoview,
37 revlog,
38 revlog,
38 scmutil,
39 scmutil,
39 subrepo,
40 subrepo,
40 util,
41 util,
41 )
42 )
42
43
43 propertycache = util.propertycache
44 propertycache = util.propertycache
44
45
45 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46
47
47 class basectx(object):
48 class basectx(object):
48 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
49 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
50 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
51 be committed,
52 be committed,
52 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
53 be committed."""
54 be committed."""
54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
56 return changeid
57 return changeid
57
58
58 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
59
60
60 o._repo = repo
61 o._repo = repo
61 o._rev = nullrev
62 o._rev = nullrev
62 o._node = nullid
63 o._node = nullid
63
64
64 return o
65 return o
65
66
66 def __str__(self):
67 def __str__(self):
67 return short(self.node())
68 return short(self.node())
68
69
69 def __int__(self):
70 def __int__(self):
70 return self.rev()
71 return self.rev()
71
72
72 def __repr__(self):
73 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
74
75
75 def __eq__(self, other):
76 def __eq__(self, other):
76 try:
77 try:
77 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
79 except AttributeError:
79 return False
80 return False
80
81
81 def __ne__(self, other):
82 def __ne__(self, other):
82 return not (self == other)
83 return not (self == other)
83
84
84 def __contains__(self, key):
85 def __contains__(self, key):
85 return key in self._manifest
86 return key in self._manifest
86
87
87 def __getitem__(self, key):
88 def __getitem__(self, key):
88 return self.filectx(key)
89 return self.filectx(key)
89
90
90 def __iter__(self):
91 def __iter__(self):
91 return iter(self._manifest)
92 return iter(self._manifest)
92
93
93 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
94 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
95 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
96 the normal manifest."""
97 the normal manifest."""
97 return self.manifest()
98 return self.manifest()
98
99
99 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
100 """return match.always if match is none
101 """return match.always if match is none
101
102
102 This internal method provides a way for child objects to override the
103 This internal method provides a way for child objects to override the
103 match operator.
104 match operator.
104 """
105 """
105 return match or matchmod.always(self._repo.root, self._repo.getcwd())
106 return match or matchmod.always(self._repo.root, self._repo.getcwd())
106
107
107 def _buildstatus(self, other, s, match, listignored, listclean,
108 def _buildstatus(self, other, s, match, listignored, listclean,
108 listunknown):
109 listunknown):
109 """build a status with respect to another context"""
110 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
116 # delta application.
116 mf2 = None
117 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
121 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
122
123
123 modified, added = [], []
124 modified, added = [], []
124 removed = []
125 removed = []
125 clean = []
126 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
128 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in d.iteritems():
130 for fn, value in d.iteritems():
130 if fn in deletedset:
131 if fn in deletedset:
131 continue
132 continue
132 if value is None:
133 if value is None:
133 clean.append(fn)
134 clean.append(fn)
134 continue
135 continue
135 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
137 if node1 is None:
137 added.append(fn)
138 added.append(fn)
138 elif node2 is None:
139 elif node2 is None:
139 removed.append(fn)
140 removed.append(fn)
140 elif flag1 != flag2:
141 elif flag1 != flag2:
141 modified.append(fn)
142 modified.append(fn)
142 elif node2 not in wdirnodes:
143 elif node2 not in wdirnodes:
143 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
147 # to a file as a modification.
147 modified.append(fn)
148 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
150 modified.append(fn)
150 else:
151 else:
151 clean.append(fn)
152 clean.append(fn)
152
153
153 if removed:
154 if removed:
154 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
155 unknown = [fn for fn in unknown if fn not in mf1 and
156 unknown = [fn for fn in unknown if fn not in mf1 and
156 (not match or match(fn))]
157 (not match or match(fn))]
157 ignored = [fn for fn in ignored if fn not in mf1 and
158 ignored = [fn for fn in ignored if fn not in mf1 and
158 (not match or match(fn))]
159 (not match or match(fn))]
159 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
160 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
161
162
162 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 ignored, clean)
164 ignored, clean)
164
165
165 @propertycache
166 @propertycache
166 def substate(self):
167 def substate(self):
167 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
168
169
169 def subrev(self, subpath):
170 def subrev(self, subpath):
170 return self.substate[subpath][1]
171 return self.substate[subpath][1]
171
172
172 def rev(self):
173 def rev(self):
173 return self._rev
174 return self._rev
174 def node(self):
175 def node(self):
175 return self._node
176 return self._node
176 def hex(self):
177 def hex(self):
177 return hex(self.node())
178 return hex(self.node())
178 def manifest(self):
179 def manifest(self):
179 return self._manifest
180 return self._manifest
180 def manifestctx(self):
181 def manifestctx(self):
181 return self._manifestctx
182 return self._manifestctx
182 def repo(self):
183 def repo(self):
183 return self._repo
184 return self._repo
184 def phasestr(self):
185 def phasestr(self):
185 return phases.phasenames[self.phase()]
186 return phases.phasenames[self.phase()]
186 def mutable(self):
187 def mutable(self):
187 return self.phase() > phases.public
188 return self.phase() > phases.public
188
189
189 def getfileset(self, expr):
190 def getfileset(self, expr):
190 return fileset.getfileset(self, expr)
191 return fileset.getfileset(self, expr)
191
192
192 def obsolete(self):
193 def obsolete(self):
193 """True if the changeset is obsolete"""
194 """True if the changeset is obsolete"""
194 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
195 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
195
196
196 def extinct(self):
197 def extinct(self):
197 """True if the changeset is extinct"""
198 """True if the changeset is extinct"""
198 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
199 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
199
200
200 def unstable(self):
201 def unstable(self):
201 """True if the changeset is not obsolete but it's ancestor are"""
202 """True if the changeset is not obsolete but it's ancestor are"""
202 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
203 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
203
204
204 def bumped(self):
205 def bumped(self):
205 """True if the changeset try to be a successor of a public changeset
206 """True if the changeset try to be a successor of a public changeset
206
207
207 Only non-public and non-obsolete changesets may be bumped.
208 Only non-public and non-obsolete changesets may be bumped.
208 """
209 """
209 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
210 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
210
211
211 def divergent(self):
212 def divergent(self):
212 """Is a successors of a changeset with multiple possible successors set
213 """Is a successors of a changeset with multiple possible successors set
213
214
214 Only non-public and non-obsolete changesets may be divergent.
215 Only non-public and non-obsolete changesets may be divergent.
215 """
216 """
216 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
217 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
217
218
218 def troubled(self):
219 def troubled(self):
219 """True if the changeset is either unstable, bumped or divergent"""
220 """True if the changeset is either unstable, bumped or divergent"""
220 return self.unstable() or self.bumped() or self.divergent()
221 return self.unstable() or self.bumped() or self.divergent()
221
222
222 def troubles(self):
223 def troubles(self):
223 """return the list of troubles affecting this changesets.
224 """return the list of troubles affecting this changesets.
224
225
225 Troubles are returned as strings. possible values are:
226 Troubles are returned as strings. possible values are:
226 - unstable,
227 - unstable,
227 - bumped,
228 - bumped,
228 - divergent.
229 - divergent.
229 """
230 """
230 troubles = []
231 troubles = []
231 if self.unstable():
232 if self.unstable():
232 troubles.append('unstable')
233 troubles.append('unstable')
233 if self.bumped():
234 if self.bumped():
234 troubles.append('bumped')
235 troubles.append('bumped')
235 if self.divergent():
236 if self.divergent():
236 troubles.append('divergent')
237 troubles.append('divergent')
237 return troubles
238 return troubles
238
239
239 def parents(self):
240 def parents(self):
240 """return contexts for each parent changeset"""
241 """return contexts for each parent changeset"""
241 return self._parents
242 return self._parents
242
243
243 def p1(self):
244 def p1(self):
244 return self._parents[0]
245 return self._parents[0]
245
246
246 def p2(self):
247 def p2(self):
247 parents = self._parents
248 parents = self._parents
248 if len(parents) == 2:
249 if len(parents) == 2:
249 return parents[1]
250 return parents[1]
250 return changectx(self._repo, nullrev)
251 return changectx(self._repo, nullrev)
251
252
252 def _fileinfo(self, path):
253 def _fileinfo(self, path):
253 if '_manifest' in self.__dict__:
254 if '_manifest' in self.__dict__:
254 try:
255 try:
255 return self._manifest[path], self._manifest.flags(path)
256 return self._manifest[path], self._manifest.flags(path)
256 except KeyError:
257 except KeyError:
257 raise error.ManifestLookupError(self._node, path,
258 raise error.ManifestLookupError(self._node, path,
258 _('not found in manifest'))
259 _('not found in manifest'))
259 if '_manifestdelta' in self.__dict__ or path in self.files():
260 if '_manifestdelta' in self.__dict__ or path in self.files():
260 if path in self._manifestdelta:
261 if path in self._manifestdelta:
261 return (self._manifestdelta[path],
262 return (self._manifestdelta[path],
262 self._manifestdelta.flags(path))
263 self._manifestdelta.flags(path))
263 mfl = self._repo.manifestlog
264 mfl = self._repo.manifestlog
264 try:
265 try:
265 node, flag = mfl[self._changeset.manifest].find(path)
266 node, flag = mfl[self._changeset.manifest].find(path)
266 except KeyError:
267 except KeyError:
267 raise error.ManifestLookupError(self._node, path,
268 raise error.ManifestLookupError(self._node, path,
268 _('not found in manifest'))
269 _('not found in manifest'))
269
270
270 return node, flag
271 return node, flag
271
272
272 def filenode(self, path):
273 def filenode(self, path):
273 return self._fileinfo(path)[0]
274 return self._fileinfo(path)[0]
274
275
275 def flags(self, path):
276 def flags(self, path):
276 try:
277 try:
277 return self._fileinfo(path)[1]
278 return self._fileinfo(path)[1]
278 except error.LookupError:
279 except error.LookupError:
279 return ''
280 return ''
280
281
281 def sub(self, path, allowcreate=True):
282 def sub(self, path, allowcreate=True):
282 '''return a subrepo for the stored revision of path, never wdir()'''
283 '''return a subrepo for the stored revision of path, never wdir()'''
283 return subrepo.subrepo(self, path, allowcreate=allowcreate)
284 return subrepo.subrepo(self, path, allowcreate=allowcreate)
284
285
285 def nullsub(self, path, pctx):
286 def nullsub(self, path, pctx):
286 return subrepo.nullsubrepo(self, path, pctx)
287 return subrepo.nullsubrepo(self, path, pctx)
287
288
288 def workingsub(self, path):
289 def workingsub(self, path):
289 '''return a subrepo for the stored revision, or wdir if this is a wdir
290 '''return a subrepo for the stored revision, or wdir if this is a wdir
290 context.
291 context.
291 '''
292 '''
292 return subrepo.subrepo(self, path, allowwdir=True)
293 return subrepo.subrepo(self, path, allowwdir=True)
293
294
294 def match(self, pats=[], include=None, exclude=None, default='glob',
295 def match(self, pats=[], include=None, exclude=None, default='glob',
295 listsubrepos=False, badfn=None):
296 listsubrepos=False, badfn=None):
296 r = self._repo
297 r = self._repo
297 return matchmod.match(r.root, r.getcwd(), pats,
298 return matchmod.match(r.root, r.getcwd(), pats,
298 include, exclude, default,
299 include, exclude, default,
299 auditor=r.nofsauditor, ctx=self,
300 auditor=r.nofsauditor, ctx=self,
300 listsubrepos=listsubrepos, badfn=badfn)
301 listsubrepos=listsubrepos, badfn=badfn)
301
302
302 def diff(self, ctx2=None, match=None, **opts):
303 def diff(self, ctx2=None, match=None, **opts):
303 """Returns a diff generator for the given contexts and matcher"""
304 """Returns a diff generator for the given contexts and matcher"""
304 if ctx2 is None:
305 if ctx2 is None:
305 ctx2 = self.p1()
306 ctx2 = self.p1()
306 if ctx2 is not None:
307 if ctx2 is not None:
307 ctx2 = self._repo[ctx2]
308 ctx2 = self._repo[ctx2]
308 diffopts = patch.diffopts(self._repo.ui, opts)
309 diffopts = patch.diffopts(self._repo.ui, opts)
309 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
310 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
310
311
311 def dirs(self):
312 def dirs(self):
312 return self._manifest.dirs()
313 return self._manifest.dirs()
313
314
314 def hasdir(self, dir):
315 def hasdir(self, dir):
315 return self._manifest.hasdir(dir)
316 return self._manifest.hasdir(dir)
316
317
317 def dirty(self, missing=False, merge=True, branch=True):
318 def dirty(self, missing=False, merge=True, branch=True):
318 return False
319 return False
319
320
320 def status(self, other=None, match=None, listignored=False,
321 def status(self, other=None, match=None, listignored=False,
321 listclean=False, listunknown=False, listsubrepos=False):
322 listclean=False, listunknown=False, listsubrepos=False):
322 """return status of files between two nodes or node and working
323 """return status of files between two nodes or node and working
323 directory.
324 directory.
324
325
325 If other is None, compare this node with working directory.
326 If other is None, compare this node with working directory.
326
327
327 returns (modified, added, removed, deleted, unknown, ignored, clean)
328 returns (modified, added, removed, deleted, unknown, ignored, clean)
328 """
329 """
329
330
330 ctx1 = self
331 ctx1 = self
331 ctx2 = self._repo[other]
332 ctx2 = self._repo[other]
332
333
333 # This next code block is, admittedly, fragile logic that tests for
334 # This next code block is, admittedly, fragile logic that tests for
334 # reversing the contexts and wouldn't need to exist if it weren't for
335 # reversing the contexts and wouldn't need to exist if it weren't for
335 # the fast (and common) code path of comparing the working directory
336 # the fast (and common) code path of comparing the working directory
336 # with its first parent.
337 # with its first parent.
337 #
338 #
338 # What we're aiming for here is the ability to call:
339 # What we're aiming for here is the ability to call:
339 #
340 #
340 # workingctx.status(parentctx)
341 # workingctx.status(parentctx)
341 #
342 #
342 # If we always built the manifest for each context and compared those,
343 # If we always built the manifest for each context and compared those,
343 # then we'd be done. But the special case of the above call means we
344 # then we'd be done. But the special case of the above call means we
344 # just copy the manifest of the parent.
345 # just copy the manifest of the parent.
345 reversed = False
346 reversed = False
346 if (not isinstance(ctx1, changectx)
347 if (not isinstance(ctx1, changectx)
347 and isinstance(ctx2, changectx)):
348 and isinstance(ctx2, changectx)):
348 reversed = True
349 reversed = True
349 ctx1, ctx2 = ctx2, ctx1
350 ctx1, ctx2 = ctx2, ctx1
350
351
351 match = ctx2._matchstatus(ctx1, match)
352 match = ctx2._matchstatus(ctx1, match)
352 r = scmutil.status([], [], [], [], [], [], [])
353 r = scmutil.status([], [], [], [], [], [], [])
353 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 listunknown)
355 listunknown)
355
356
356 if reversed:
357 if reversed:
357 # Reverse added and removed. Clear deleted, unknown and ignored as
358 # Reverse added and removed. Clear deleted, unknown and ignored as
358 # these make no sense to reverse.
359 # these make no sense to reverse.
359 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 r.clean)
361 r.clean)
361
362
362 if listsubrepos:
363 if listsubrepos:
363 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 try:
365 try:
365 rev2 = ctx2.subrev(subpath)
366 rev2 = ctx2.subrev(subpath)
366 except KeyError:
367 except KeyError:
367 # A subrepo that existed in node1 was deleted between
368 # A subrepo that existed in node1 was deleted between
368 # node1 and node2 (inclusive). Thus, ctx2's substate
369 # node1 and node2 (inclusive). Thus, ctx2's substate
369 # won't contain that subpath. The best we can do ignore it.
370 # won't contain that subpath. The best we can do ignore it.
370 rev2 = None
371 rev2 = None
371 submatch = matchmod.subdirmatcher(subpath, match)
372 submatch = matchmod.subdirmatcher(subpath, match)
372 s = sub.status(rev2, match=submatch, ignored=listignored,
373 s = sub.status(rev2, match=submatch, ignored=listignored,
373 clean=listclean, unknown=listunknown,
374 clean=listclean, unknown=listunknown,
374 listsubrepos=True)
375 listsubrepos=True)
375 for rfiles, sfiles in zip(r, s):
376 for rfiles, sfiles in zip(r, s):
376 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377
378
378 for l in r:
379 for l in r:
379 l.sort()
380 l.sort()
380
381
381 return r
382 return r
382
383
383
384
384 def makememctx(repo, parents, text, user, date, branch, files, store,
385 def makememctx(repo, parents, text, user, date, branch, files, store,
385 editor=None, extra=None):
386 editor=None, extra=None):
386 def getfilectx(repo, memctx, path):
387 def getfilectx(repo, memctx, path):
387 data, mode, copied = store.getfile(path)
388 data, mode, copied = store.getfile(path)
388 if data is None:
389 if data is None:
389 return None
390 return None
390 islink, isexec = mode
391 islink, isexec = mode
391 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
392 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
392 copied=copied, memctx=memctx)
393 copied=copied, memctx=memctx)
393 if extra is None:
394 if extra is None:
394 extra = {}
395 extra = {}
395 if branch:
396 if branch:
396 extra['branch'] = encoding.fromlocal(branch)
397 extra['branch'] = encoding.fromlocal(branch)
397 ctx = memctx(repo, parents, text, files, getfilectx, user,
398 ctx = memctx(repo, parents, text, files, getfilectx, user,
398 date, extra, editor)
399 date, extra, editor)
399 return ctx
400 return ctx
400
401
401 class changectx(basectx):
402 class changectx(basectx):
402 """A changecontext object makes access to data related to a particular
403 """A changecontext object makes access to data related to a particular
403 changeset convenient. It represents a read-only context already present in
404 changeset convenient. It represents a read-only context already present in
404 the repo."""
405 the repo."""
405 def __init__(self, repo, changeid=''):
406 def __init__(self, repo, changeid=''):
406 """changeid is a revision number, node, or tag"""
407 """changeid is a revision number, node, or tag"""
407
408
408 # since basectx.__new__ already took care of copying the object, we
409 # since basectx.__new__ already took care of copying the object, we
409 # don't need to do anything in __init__, so we just exit here
410 # don't need to do anything in __init__, so we just exit here
410 if isinstance(changeid, basectx):
411 if isinstance(changeid, basectx):
411 return
412 return
412
413
413 if changeid == '':
414 if changeid == '':
414 changeid = '.'
415 changeid = '.'
415 self._repo = repo
416 self._repo = repo
416
417
417 try:
418 try:
418 if isinstance(changeid, int):
419 if isinstance(changeid, int):
419 self._node = repo.changelog.node(changeid)
420 self._node = repo.changelog.node(changeid)
420 self._rev = changeid
421 self._rev = changeid
421 return
422 return
422 if isinstance(changeid, long):
423 if not pycompat.ispy3 and isinstance(changeid, long):
423 changeid = str(changeid)
424 changeid = str(changeid)
424 if changeid == 'null':
425 if changeid == 'null':
425 self._node = nullid
426 self._node = nullid
426 self._rev = nullrev
427 self._rev = nullrev
427 return
428 return
428 if changeid == 'tip':
429 if changeid == 'tip':
429 self._node = repo.changelog.tip()
430 self._node = repo.changelog.tip()
430 self._rev = repo.changelog.rev(self._node)
431 self._rev = repo.changelog.rev(self._node)
431 return
432 return
432 if changeid == '.' or changeid == repo.dirstate.p1():
433 if changeid == '.' or changeid == repo.dirstate.p1():
433 # this is a hack to delay/avoid loading obsmarkers
434 # this is a hack to delay/avoid loading obsmarkers
434 # when we know that '.' won't be hidden
435 # when we know that '.' won't be hidden
435 self._node = repo.dirstate.p1()
436 self._node = repo.dirstate.p1()
436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 self._rev = repo.unfiltered().changelog.rev(self._node)
437 return
438 return
438 if len(changeid) == 20:
439 if len(changeid) == 20:
439 try:
440 try:
440 self._node = changeid
441 self._node = changeid
441 self._rev = repo.changelog.rev(changeid)
442 self._rev = repo.changelog.rev(changeid)
442 return
443 return
443 except error.FilteredRepoLookupError:
444 except error.FilteredRepoLookupError:
444 raise
445 raise
445 except LookupError:
446 except LookupError:
446 pass
447 pass
447
448
448 try:
449 try:
449 r = int(changeid)
450 r = int(changeid)
450 if str(r) != changeid:
451 if str(r) != changeid:
451 raise ValueError
452 raise ValueError
452 l = len(repo.changelog)
453 l = len(repo.changelog)
453 if r < 0:
454 if r < 0:
454 r += l
455 r += l
455 if r < 0 or r >= l:
456 if r < 0 or r >= l:
456 raise ValueError
457 raise ValueError
457 self._rev = r
458 self._rev = r
458 self._node = repo.changelog.node(r)
459 self._node = repo.changelog.node(r)
459 return
460 return
460 except error.FilteredIndexError:
461 except error.FilteredIndexError:
461 raise
462 raise
462 except (ValueError, OverflowError, IndexError):
463 except (ValueError, OverflowError, IndexError):
463 pass
464 pass
464
465
465 if len(changeid) == 40:
466 if len(changeid) == 40:
466 try:
467 try:
467 self._node = bin(changeid)
468 self._node = bin(changeid)
468 self._rev = repo.changelog.rev(self._node)
469 self._rev = repo.changelog.rev(self._node)
469 return
470 return
470 except error.FilteredLookupError:
471 except error.FilteredLookupError:
471 raise
472 raise
472 except (TypeError, LookupError):
473 except (TypeError, LookupError):
473 pass
474 pass
474
475
475 # lookup bookmarks through the name interface
476 # lookup bookmarks through the name interface
476 try:
477 try:
477 self._node = repo.names.singlenode(repo, changeid)
478 self._node = repo.names.singlenode(repo, changeid)
478 self._rev = repo.changelog.rev(self._node)
479 self._rev = repo.changelog.rev(self._node)
479 return
480 return
480 except KeyError:
481 except KeyError:
481 pass
482 pass
482 except error.FilteredRepoLookupError:
483 except error.FilteredRepoLookupError:
483 raise
484 raise
484 except error.RepoLookupError:
485 except error.RepoLookupError:
485 pass
486 pass
486
487
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 if self._node is not None:
489 if self._node is not None:
489 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
490 return
491 return
491
492
492 # lookup failed
493 # lookup failed
493 # check if it might have come from damaged dirstate
494 # check if it might have come from damaged dirstate
494 #
495 #
495 # XXX we could avoid the unfiltered if we had a recognizable
496 # XXX we could avoid the unfiltered if we had a recognizable
496 # exception for filtered changeset access
497 # exception for filtered changeset access
497 if changeid in repo.unfiltered().dirstate.parents():
498 if changeid in repo.unfiltered().dirstate.parents():
498 msg = _("working directory has unknown parent '%s'!")
499 msg = _("working directory has unknown parent '%s'!")
499 raise error.Abort(msg % short(changeid))
500 raise error.Abort(msg % short(changeid))
500 try:
501 try:
501 if len(changeid) == 20 and nonascii(changeid):
502 if len(changeid) == 20 and nonascii(changeid):
502 changeid = hex(changeid)
503 changeid = hex(changeid)
503 except TypeError:
504 except TypeError:
504 pass
505 pass
505 except (error.FilteredIndexError, error.FilteredLookupError,
506 except (error.FilteredIndexError, error.FilteredLookupError,
506 error.FilteredRepoLookupError):
507 error.FilteredRepoLookupError):
507 if repo.filtername.startswith('visible'):
508 if repo.filtername.startswith('visible'):
508 msg = _("hidden revision '%s'") % changeid
509 msg = _("hidden revision '%s'") % changeid
509 hint = _('use --hidden to access hidden revisions')
510 hint = _('use --hidden to access hidden revisions')
510 raise error.FilteredRepoLookupError(msg, hint=hint)
511 raise error.FilteredRepoLookupError(msg, hint=hint)
511 msg = _("filtered revision '%s' (not in '%s' subset)")
512 msg = _("filtered revision '%s' (not in '%s' subset)")
512 msg %= (changeid, repo.filtername)
513 msg %= (changeid, repo.filtername)
513 raise error.FilteredRepoLookupError(msg)
514 raise error.FilteredRepoLookupError(msg)
514 except IndexError:
515 except IndexError:
515 pass
516 pass
516 raise error.RepoLookupError(
517 raise error.RepoLookupError(
517 _("unknown revision '%s'") % changeid)
518 _("unknown revision '%s'") % changeid)
518
519
519 def __hash__(self):
520 def __hash__(self):
520 try:
521 try:
521 return hash(self._rev)
522 return hash(self._rev)
522 except AttributeError:
523 except AttributeError:
523 return id(self)
524 return id(self)
524
525
525 def __nonzero__(self):
526 def __nonzero__(self):
526 return self._rev != nullrev
527 return self._rev != nullrev
527
528
528 @propertycache
529 @propertycache
529 def _changeset(self):
530 def _changeset(self):
530 return self._repo.changelog.changelogrevision(self.rev())
531 return self._repo.changelog.changelogrevision(self.rev())
531
532
532 @propertycache
533 @propertycache
533 def _manifest(self):
534 def _manifest(self):
534 return self._manifestctx.read()
535 return self._manifestctx.read()
535
536
536 @propertycache
537 @propertycache
537 def _manifestctx(self):
538 def _manifestctx(self):
538 return self._repo.manifestlog[self._changeset.manifest]
539 return self._repo.manifestlog[self._changeset.manifest]
539
540
540 @propertycache
541 @propertycache
541 def _manifestdelta(self):
542 def _manifestdelta(self):
542 return self._manifestctx.readdelta()
543 return self._manifestctx.readdelta()
543
544
544 @propertycache
545 @propertycache
545 def _parents(self):
546 def _parents(self):
546 repo = self._repo
547 repo = self._repo
547 p1, p2 = repo.changelog.parentrevs(self._rev)
548 p1, p2 = repo.changelog.parentrevs(self._rev)
548 if p2 == nullrev:
549 if p2 == nullrev:
549 return [changectx(repo, p1)]
550 return [changectx(repo, p1)]
550 return [changectx(repo, p1), changectx(repo, p2)]
551 return [changectx(repo, p1), changectx(repo, p2)]
551
552
552 def changeset(self):
553 def changeset(self):
553 c = self._changeset
554 c = self._changeset
554 return (
555 return (
555 c.manifest,
556 c.manifest,
556 c.user,
557 c.user,
557 c.date,
558 c.date,
558 c.files,
559 c.files,
559 c.description,
560 c.description,
560 c.extra,
561 c.extra,
561 )
562 )
562 def manifestnode(self):
563 def manifestnode(self):
563 return self._changeset.manifest
564 return self._changeset.manifest
564
565
565 def user(self):
566 def user(self):
566 return self._changeset.user
567 return self._changeset.user
567 def date(self):
568 def date(self):
568 return self._changeset.date
569 return self._changeset.date
569 def files(self):
570 def files(self):
570 return self._changeset.files
571 return self._changeset.files
571 def description(self):
572 def description(self):
572 return self._changeset.description
573 return self._changeset.description
573 def branch(self):
574 def branch(self):
574 return encoding.tolocal(self._changeset.extra.get("branch"))
575 return encoding.tolocal(self._changeset.extra.get("branch"))
575 def closesbranch(self):
576 def closesbranch(self):
576 return 'close' in self._changeset.extra
577 return 'close' in self._changeset.extra
577 def extra(self):
578 def extra(self):
578 return self._changeset.extra
579 return self._changeset.extra
579 def tags(self):
580 def tags(self):
580 return self._repo.nodetags(self._node)
581 return self._repo.nodetags(self._node)
581 def bookmarks(self):
582 def bookmarks(self):
582 return self._repo.nodebookmarks(self._node)
583 return self._repo.nodebookmarks(self._node)
583 def phase(self):
584 def phase(self):
584 return self._repo._phasecache.phase(self._repo, self._rev)
585 return self._repo._phasecache.phase(self._repo, self._rev)
585 def hidden(self):
586 def hidden(self):
586 return self._rev in repoview.filterrevs(self._repo, 'visible')
587 return self._rev in repoview.filterrevs(self._repo, 'visible')
587
588
588 def children(self):
589 def children(self):
589 """return contexts for each child changeset"""
590 """return contexts for each child changeset"""
590 c = self._repo.changelog.children(self._node)
591 c = self._repo.changelog.children(self._node)
591 return [changectx(self._repo, x) for x in c]
592 return [changectx(self._repo, x) for x in c]
592
593
593 def ancestors(self):
594 def ancestors(self):
594 for a in self._repo.changelog.ancestors([self._rev]):
595 for a in self._repo.changelog.ancestors([self._rev]):
595 yield changectx(self._repo, a)
596 yield changectx(self._repo, a)
596
597
597 def descendants(self):
598 def descendants(self):
598 for d in self._repo.changelog.descendants([self._rev]):
599 for d in self._repo.changelog.descendants([self._rev]):
599 yield changectx(self._repo, d)
600 yield changectx(self._repo, d)
600
601
601 def filectx(self, path, fileid=None, filelog=None):
602 def filectx(self, path, fileid=None, filelog=None):
602 """get a file context from this changeset"""
603 """get a file context from this changeset"""
603 if fileid is None:
604 if fileid is None:
604 fileid = self.filenode(path)
605 fileid = self.filenode(path)
605 return filectx(self._repo, path, fileid=fileid,
606 return filectx(self._repo, path, fileid=fileid,
606 changectx=self, filelog=filelog)
607 changectx=self, filelog=filelog)
607
608
608 def ancestor(self, c2, warn=False):
609 def ancestor(self, c2, warn=False):
609 """return the "best" ancestor context of self and c2
610 """return the "best" ancestor context of self and c2
610
611
611 If there are multiple candidates, it will show a message and check
612 If there are multiple candidates, it will show a message and check
612 merge.preferancestor configuration before falling back to the
613 merge.preferancestor configuration before falling back to the
613 revlog ancestor."""
614 revlog ancestor."""
614 # deal with workingctxs
615 # deal with workingctxs
615 n2 = c2._node
616 n2 = c2._node
616 if n2 is None:
617 if n2 is None:
617 n2 = c2._parents[0]._node
618 n2 = c2._parents[0]._node
618 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
619 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
619 if not cahs:
620 if not cahs:
620 anc = nullid
621 anc = nullid
621 elif len(cahs) == 1:
622 elif len(cahs) == 1:
622 anc = cahs[0]
623 anc = cahs[0]
623 else:
624 else:
624 # experimental config: merge.preferancestor
625 # experimental config: merge.preferancestor
625 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
626 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
626 try:
627 try:
627 ctx = changectx(self._repo, r)
628 ctx = changectx(self._repo, r)
628 except error.RepoLookupError:
629 except error.RepoLookupError:
629 continue
630 continue
630 anc = ctx.node()
631 anc = ctx.node()
631 if anc in cahs:
632 if anc in cahs:
632 break
633 break
633 else:
634 else:
634 anc = self._repo.changelog.ancestor(self._node, n2)
635 anc = self._repo.changelog.ancestor(self._node, n2)
635 if warn:
636 if warn:
636 self._repo.ui.status(
637 self._repo.ui.status(
637 (_("note: using %s as ancestor of %s and %s\n") %
638 (_("note: using %s as ancestor of %s and %s\n") %
638 (short(anc), short(self._node), short(n2))) +
639 (short(anc), short(self._node), short(n2))) +
639 ''.join(_(" alternatively, use --config "
640 ''.join(_(" alternatively, use --config "
640 "merge.preferancestor=%s\n") %
641 "merge.preferancestor=%s\n") %
641 short(n) for n in sorted(cahs) if n != anc))
642 short(n) for n in sorted(cahs) if n != anc))
642 return changectx(self._repo, anc)
643 return changectx(self._repo, anc)
643
644
644 def descendant(self, other):
645 def descendant(self, other):
645 """True if other is descendant of this changeset"""
646 """True if other is descendant of this changeset"""
646 return self._repo.changelog.descendant(self._rev, other._rev)
647 return self._repo.changelog.descendant(self._rev, other._rev)
647
648
648 def walk(self, match):
649 def walk(self, match):
649 '''Generates matching file names.'''
650 '''Generates matching file names.'''
650
651
651 # Wrap match.bad method to have message with nodeid
652 # Wrap match.bad method to have message with nodeid
652 def bad(fn, msg):
653 def bad(fn, msg):
653 # The manifest doesn't know about subrepos, so don't complain about
654 # The manifest doesn't know about subrepos, so don't complain about
654 # paths into valid subrepos.
655 # paths into valid subrepos.
655 if any(fn == s or fn.startswith(s + '/')
656 if any(fn == s or fn.startswith(s + '/')
656 for s in self.substate):
657 for s in self.substate):
657 return
658 return
658 match.bad(fn, _('no such file in rev %s') % self)
659 match.bad(fn, _('no such file in rev %s') % self)
659
660
660 m = matchmod.badmatch(match, bad)
661 m = matchmod.badmatch(match, bad)
661 return self._manifest.walk(m)
662 return self._manifest.walk(m)
662
663
663 def matches(self, match):
664 def matches(self, match):
664 return self.walk(match)
665 return self.walk(match)
665
666
666 class basefilectx(object):
667 class basefilectx(object):
667 """A filecontext object represents the common logic for its children:
668 """A filecontext object represents the common logic for its children:
668 filectx: read-only access to a filerevision that is already present
669 filectx: read-only access to a filerevision that is already present
669 in the repo,
670 in the repo,
670 workingfilectx: a filecontext that represents files from the working
671 workingfilectx: a filecontext that represents files from the working
671 directory,
672 directory,
672 memfilectx: a filecontext that represents files in-memory."""
673 memfilectx: a filecontext that represents files in-memory."""
673 def __new__(cls, repo, path, *args, **kwargs):
674 def __new__(cls, repo, path, *args, **kwargs):
674 return super(basefilectx, cls).__new__(cls)
675 return super(basefilectx, cls).__new__(cls)
675
676
676 @propertycache
677 @propertycache
677 def _filelog(self):
678 def _filelog(self):
678 return self._repo.file(self._path)
679 return self._repo.file(self._path)
679
680
680 @propertycache
681 @propertycache
681 def _changeid(self):
682 def _changeid(self):
682 if '_changeid' in self.__dict__:
683 if '_changeid' in self.__dict__:
683 return self._changeid
684 return self._changeid
684 elif '_changectx' in self.__dict__:
685 elif '_changectx' in self.__dict__:
685 return self._changectx.rev()
686 return self._changectx.rev()
686 elif '_descendantrev' in self.__dict__:
687 elif '_descendantrev' in self.__dict__:
687 # this file context was created from a revision with a known
688 # this file context was created from a revision with a known
688 # descendant, we can (lazily) correct for linkrev aliases
689 # descendant, we can (lazily) correct for linkrev aliases
689 return self._adjustlinkrev(self._descendantrev)
690 return self._adjustlinkrev(self._descendantrev)
690 else:
691 else:
691 return self._filelog.linkrev(self._filerev)
692 return self._filelog.linkrev(self._filerev)
692
693
693 @propertycache
694 @propertycache
694 def _filenode(self):
695 def _filenode(self):
695 if '_fileid' in self.__dict__:
696 if '_fileid' in self.__dict__:
696 return self._filelog.lookup(self._fileid)
697 return self._filelog.lookup(self._fileid)
697 else:
698 else:
698 return self._changectx.filenode(self._path)
699 return self._changectx.filenode(self._path)
699
700
700 @propertycache
701 @propertycache
701 def _filerev(self):
702 def _filerev(self):
702 return self._filelog.rev(self._filenode)
703 return self._filelog.rev(self._filenode)
703
704
704 @propertycache
705 @propertycache
705 def _repopath(self):
706 def _repopath(self):
706 return self._path
707 return self._path
707
708
708 def __nonzero__(self):
709 def __nonzero__(self):
709 try:
710 try:
710 self._filenode
711 self._filenode
711 return True
712 return True
712 except error.LookupError:
713 except error.LookupError:
713 # file is missing
714 # file is missing
714 return False
715 return False
715
716
716 def __str__(self):
717 def __str__(self):
717 try:
718 try:
718 return "%s@%s" % (self.path(), self._changectx)
719 return "%s@%s" % (self.path(), self._changectx)
719 except error.LookupError:
720 except error.LookupError:
720 return "%s@???" % self.path()
721 return "%s@???" % self.path()
721
722
722 def __repr__(self):
723 def __repr__(self):
723 return "<%s %s>" % (type(self).__name__, str(self))
724 return "<%s %s>" % (type(self).__name__, str(self))
724
725
725 def __hash__(self):
726 def __hash__(self):
726 try:
727 try:
727 return hash((self._path, self._filenode))
728 return hash((self._path, self._filenode))
728 except AttributeError:
729 except AttributeError:
729 return id(self)
730 return id(self)
730
731
731 def __eq__(self, other):
732 def __eq__(self, other):
732 try:
733 try:
733 return (type(self) == type(other) and self._path == other._path
734 return (type(self) == type(other) and self._path == other._path
734 and self._filenode == other._filenode)
735 and self._filenode == other._filenode)
735 except AttributeError:
736 except AttributeError:
736 return False
737 return False
737
738
738 def __ne__(self, other):
739 def __ne__(self, other):
739 return not (self == other)
740 return not (self == other)
740
741
741 def filerev(self):
742 def filerev(self):
742 return self._filerev
743 return self._filerev
743 def filenode(self):
744 def filenode(self):
744 return self._filenode
745 return self._filenode
745 def flags(self):
746 def flags(self):
746 return self._changectx.flags(self._path)
747 return self._changectx.flags(self._path)
747 def filelog(self):
748 def filelog(self):
748 return self._filelog
749 return self._filelog
749 def rev(self):
750 def rev(self):
750 return self._changeid
751 return self._changeid
751 def linkrev(self):
752 def linkrev(self):
752 return self._filelog.linkrev(self._filerev)
753 return self._filelog.linkrev(self._filerev)
753 def node(self):
754 def node(self):
754 return self._changectx.node()
755 return self._changectx.node()
755 def hex(self):
756 def hex(self):
756 return self._changectx.hex()
757 return self._changectx.hex()
757 def user(self):
758 def user(self):
758 return self._changectx.user()
759 return self._changectx.user()
759 def date(self):
760 def date(self):
760 return self._changectx.date()
761 return self._changectx.date()
761 def files(self):
762 def files(self):
762 return self._changectx.files()
763 return self._changectx.files()
763 def description(self):
764 def description(self):
764 return self._changectx.description()
765 return self._changectx.description()
765 def branch(self):
766 def branch(self):
766 return self._changectx.branch()
767 return self._changectx.branch()
767 def extra(self):
768 def extra(self):
768 return self._changectx.extra()
769 return self._changectx.extra()
769 def phase(self):
770 def phase(self):
770 return self._changectx.phase()
771 return self._changectx.phase()
771 def phasestr(self):
772 def phasestr(self):
772 return self._changectx.phasestr()
773 return self._changectx.phasestr()
773 def manifest(self):
774 def manifest(self):
774 return self._changectx.manifest()
775 return self._changectx.manifest()
775 def changectx(self):
776 def changectx(self):
776 return self._changectx
777 return self._changectx
777 def repo(self):
778 def repo(self):
778 return self._repo
779 return self._repo
779
780
780 def path(self):
781 def path(self):
781 return self._path
782 return self._path
782
783
783 def isbinary(self):
784 def isbinary(self):
784 try:
785 try:
785 return util.binary(self.data())
786 return util.binary(self.data())
786 except IOError:
787 except IOError:
787 return False
788 return False
788 def isexec(self):
789 def isexec(self):
789 return 'x' in self.flags()
790 return 'x' in self.flags()
790 def islink(self):
791 def islink(self):
791 return 'l' in self.flags()
792 return 'l' in self.flags()
792
793
793 def isabsent(self):
794 def isabsent(self):
794 """whether this filectx represents a file not in self._changectx
795 """whether this filectx represents a file not in self._changectx
795
796
796 This is mainly for merge code to detect change/delete conflicts. This is
797 This is mainly for merge code to detect change/delete conflicts. This is
797 expected to be True for all subclasses of basectx."""
798 expected to be True for all subclasses of basectx."""
798 return False
799 return False
799
800
800 _customcmp = False
801 _customcmp = False
801 def cmp(self, fctx):
802 def cmp(self, fctx):
802 """compare with other file context
803 """compare with other file context
803
804
804 returns True if different than fctx.
805 returns True if different than fctx.
805 """
806 """
806 if fctx._customcmp:
807 if fctx._customcmp:
807 return fctx.cmp(self)
808 return fctx.cmp(self)
808
809
809 if (fctx._filenode is None
810 if (fctx._filenode is None
810 and (self._repo._encodefilterpats
811 and (self._repo._encodefilterpats
811 # if file data starts with '\1\n', empty metadata block is
812 # if file data starts with '\1\n', empty metadata block is
812 # prepended, which adds 4 bytes to filelog.size().
813 # prepended, which adds 4 bytes to filelog.size().
813 or self.size() - 4 == fctx.size())
814 or self.size() - 4 == fctx.size())
814 or self.size() == fctx.size()):
815 or self.size() == fctx.size()):
815 return self._filelog.cmp(self._filenode, fctx.data())
816 return self._filelog.cmp(self._filenode, fctx.data())
816
817
817 return True
818 return True
818
819
819 def _adjustlinkrev(self, srcrev, inclusive=False):
820 def _adjustlinkrev(self, srcrev, inclusive=False):
820 """return the first ancestor of <srcrev> introducing <fnode>
821 """return the first ancestor of <srcrev> introducing <fnode>
821
822
822 If the linkrev of the file revision does not point to an ancestor of
823 If the linkrev of the file revision does not point to an ancestor of
823 srcrev, we'll walk down the ancestors until we find one introducing
824 srcrev, we'll walk down the ancestors until we find one introducing
824 this file revision.
825 this file revision.
825
826
826 :srcrev: the changeset revision we search ancestors from
827 :srcrev: the changeset revision we search ancestors from
827 :inclusive: if true, the src revision will also be checked
828 :inclusive: if true, the src revision will also be checked
828 """
829 """
829 repo = self._repo
830 repo = self._repo
830 cl = repo.unfiltered().changelog
831 cl = repo.unfiltered().changelog
831 mfl = repo.manifestlog
832 mfl = repo.manifestlog
832 # fetch the linkrev
833 # fetch the linkrev
833 lkr = self.linkrev()
834 lkr = self.linkrev()
834 # hack to reuse ancestor computation when searching for renames
835 # hack to reuse ancestor computation when searching for renames
835 memberanc = getattr(self, '_ancestrycontext', None)
836 memberanc = getattr(self, '_ancestrycontext', None)
836 iteranc = None
837 iteranc = None
837 if srcrev is None:
838 if srcrev is None:
838 # wctx case, used by workingfilectx during mergecopy
839 # wctx case, used by workingfilectx during mergecopy
839 revs = [p.rev() for p in self._repo[None].parents()]
840 revs = [p.rev() for p in self._repo[None].parents()]
840 inclusive = True # we skipped the real (revless) source
841 inclusive = True # we skipped the real (revless) source
841 else:
842 else:
842 revs = [srcrev]
843 revs = [srcrev]
843 if memberanc is None:
844 if memberanc is None:
844 memberanc = iteranc = cl.ancestors(revs, lkr,
845 memberanc = iteranc = cl.ancestors(revs, lkr,
845 inclusive=inclusive)
846 inclusive=inclusive)
846 # check if this linkrev is an ancestor of srcrev
847 # check if this linkrev is an ancestor of srcrev
847 if lkr not in memberanc:
848 if lkr not in memberanc:
848 if iteranc is None:
849 if iteranc is None:
849 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
850 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
850 fnode = self._filenode
851 fnode = self._filenode
851 path = self._path
852 path = self._path
852 for a in iteranc:
853 for a in iteranc:
853 ac = cl.read(a) # get changeset data (we avoid object creation)
854 ac = cl.read(a) # get changeset data (we avoid object creation)
854 if path in ac[3]: # checking the 'files' field.
855 if path in ac[3]: # checking the 'files' field.
855 # The file has been touched, check if the content is
856 # The file has been touched, check if the content is
856 # similar to the one we search for.
857 # similar to the one we search for.
857 if fnode == mfl[ac[0]].readfast().get(path):
858 if fnode == mfl[ac[0]].readfast().get(path):
858 return a
859 return a
859 # In theory, we should never get out of that loop without a result.
860 # In theory, we should never get out of that loop without a result.
860 # But if manifest uses a buggy file revision (not children of the
861 # But if manifest uses a buggy file revision (not children of the
861 # one it replaces) we could. Such a buggy situation will likely
862 # one it replaces) we could. Such a buggy situation will likely
862 # result is crash somewhere else at to some point.
863 # result is crash somewhere else at to some point.
863 return lkr
864 return lkr
864
865
865 def introrev(self):
866 def introrev(self):
866 """return the rev of the changeset which introduced this file revision
867 """return the rev of the changeset which introduced this file revision
867
868
868 This method is different from linkrev because it take into account the
869 This method is different from linkrev because it take into account the
869 changeset the filectx was created from. It ensures the returned
870 changeset the filectx was created from. It ensures the returned
870 revision is one of its ancestors. This prevents bugs from
871 revision is one of its ancestors. This prevents bugs from
871 'linkrev-shadowing' when a file revision is used by multiple
872 'linkrev-shadowing' when a file revision is used by multiple
872 changesets.
873 changesets.
873 """
874 """
874 lkr = self.linkrev()
875 lkr = self.linkrev()
875 attrs = vars(self)
876 attrs = vars(self)
876 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
877 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
877 if noctx or self.rev() == lkr:
878 if noctx or self.rev() == lkr:
878 return self.linkrev()
879 return self.linkrev()
879 return self._adjustlinkrev(self.rev(), inclusive=True)
880 return self._adjustlinkrev(self.rev(), inclusive=True)
880
881
881 def _parentfilectx(self, path, fileid, filelog):
882 def _parentfilectx(self, path, fileid, filelog):
882 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 if '_changeid' in vars(self) or '_changectx' in vars(self):
885 if '_changeid' in vars(self) or '_changectx' in vars(self):
885 # If self is associated with a changeset (probably explicitly
886 # If self is associated with a changeset (probably explicitly
886 # fed), ensure the created filectx is associated with a
887 # fed), ensure the created filectx is associated with a
887 # changeset that is an ancestor of self.changectx.
888 # changeset that is an ancestor of self.changectx.
888 # This lets us later use _adjustlinkrev to get a correct link.
889 # This lets us later use _adjustlinkrev to get a correct link.
889 fctx._descendantrev = self.rev()
890 fctx._descendantrev = self.rev()
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 elif '_descendantrev' in vars(self):
892 elif '_descendantrev' in vars(self):
892 # Otherwise propagate _descendantrev if we have one associated.
893 # Otherwise propagate _descendantrev if we have one associated.
893 fctx._descendantrev = self._descendantrev
894 fctx._descendantrev = self._descendantrev
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 return fctx
896 return fctx
896
897
897 def parents(self):
898 def parents(self):
898 _path = self._path
899 _path = self._path
899 fl = self._filelog
900 fl = self._filelog
900 parents = self._filelog.parents(self._filenode)
901 parents = self._filelog.parents(self._filenode)
901 pl = [(_path, node, fl) for node in parents if node != nullid]
902 pl = [(_path, node, fl) for node in parents if node != nullid]
902
903
903 r = fl.renamed(self._filenode)
904 r = fl.renamed(self._filenode)
904 if r:
905 if r:
905 # - In the simple rename case, both parent are nullid, pl is empty.
906 # - In the simple rename case, both parent are nullid, pl is empty.
906 # - In case of merge, only one of the parent is null id and should
907 # - In case of merge, only one of the parent is null id and should
907 # be replaced with the rename information. This parent is -always-
908 # be replaced with the rename information. This parent is -always-
908 # the first one.
909 # the first one.
909 #
910 #
910 # As null id have always been filtered out in the previous list
911 # As null id have always been filtered out in the previous list
911 # comprehension, inserting to 0 will always result in "replacing
912 # comprehension, inserting to 0 will always result in "replacing
912 # first nullid parent with rename information.
913 # first nullid parent with rename information.
913 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914
915
915 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916
917
917 def p1(self):
918 def p1(self):
918 return self.parents()[0]
919 return self.parents()[0]
919
920
920 def p2(self):
921 def p2(self):
921 p = self.parents()
922 p = self.parents()
922 if len(p) == 2:
923 if len(p) == 2:
923 return p[1]
924 return p[1]
924 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925
926
926 def annotate(self, follow=False, linenumber=False, diffopts=None):
927 def annotate(self, follow=False, linenumber=False, diffopts=None):
927 '''returns a list of tuples of ((ctx, number), line) for each line
928 '''returns a list of tuples of ((ctx, number), line) for each line
928 in the file, where ctx is the filectx of the node where
929 in the file, where ctx is the filectx of the node where
929 that line was last changed; if linenumber parameter is true, number is
930 that line was last changed; if linenumber parameter is true, number is
930 the line number at the first appearance in the managed file, otherwise,
931 the line number at the first appearance in the managed file, otherwise,
931 number has a fixed value of False.
932 number has a fixed value of False.
932 '''
933 '''
933
934
934 def lines(text):
935 def lines(text):
935 if text.endswith("\n"):
936 if text.endswith("\n"):
936 return text.count("\n")
937 return text.count("\n")
937 return text.count("\n") + int(bool(text))
938 return text.count("\n") + int(bool(text))
938
939
939 if linenumber:
940 if linenumber:
940 def decorate(text, rev):
941 def decorate(text, rev):
941 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
942 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
942 else:
943 else:
943 def decorate(text, rev):
944 def decorate(text, rev):
944 return ([(rev, False)] * lines(text), text)
945 return ([(rev, False)] * lines(text), text)
945
946
946 def pair(parent, child):
947 def pair(parent, child):
947 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
948 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
948 for (a1, a2, b1, b2), t in blocks:
949 for (a1, a2, b1, b2), t in blocks:
949 # Changed blocks ('!') or blocks made only of blank lines ('~')
950 # Changed blocks ('!') or blocks made only of blank lines ('~')
950 # belong to the child.
951 # belong to the child.
951 if t == '=':
952 if t == '=':
952 child[0][b1:b2] = parent[0][a1:a2]
953 child[0][b1:b2] = parent[0][a1:a2]
953 return child
954 return child
954
955
955 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
956 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
956
957
957 def parents(f):
958 def parents(f):
958 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
959 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
959 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
960 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
960 # from the topmost introrev (= srcrev) down to p.linkrev() if it
961 # from the topmost introrev (= srcrev) down to p.linkrev() if it
961 # isn't an ancestor of the srcrev.
962 # isn't an ancestor of the srcrev.
962 f._changeid
963 f._changeid
963 pl = f.parents()
964 pl = f.parents()
964
965
965 # Don't return renamed parents if we aren't following.
966 # Don't return renamed parents if we aren't following.
966 if not follow:
967 if not follow:
967 pl = [p for p in pl if p.path() == f.path()]
968 pl = [p for p in pl if p.path() == f.path()]
968
969
969 # renamed filectx won't have a filelog yet, so set it
970 # renamed filectx won't have a filelog yet, so set it
970 # from the cache to save time
971 # from the cache to save time
971 for p in pl:
972 for p in pl:
972 if not '_filelog' in p.__dict__:
973 if not '_filelog' in p.__dict__:
973 p._filelog = getlog(p.path())
974 p._filelog = getlog(p.path())
974
975
975 return pl
976 return pl
976
977
977 # use linkrev to find the first changeset where self appeared
978 # use linkrev to find the first changeset where self appeared
978 base = self
979 base = self
979 introrev = self.introrev()
980 introrev = self.introrev()
980 if self.rev() != introrev:
981 if self.rev() != introrev:
981 base = self.filectx(self.filenode(), changeid=introrev)
982 base = self.filectx(self.filenode(), changeid=introrev)
982 if getattr(base, '_ancestrycontext', None) is None:
983 if getattr(base, '_ancestrycontext', None) is None:
983 cl = self._repo.changelog
984 cl = self._repo.changelog
984 if introrev is None:
985 if introrev is None:
985 # wctx is not inclusive, but works because _ancestrycontext
986 # wctx is not inclusive, but works because _ancestrycontext
986 # is used to test filelog revisions
987 # is used to test filelog revisions
987 ac = cl.ancestors([p.rev() for p in base.parents()],
988 ac = cl.ancestors([p.rev() for p in base.parents()],
988 inclusive=True)
989 inclusive=True)
989 else:
990 else:
990 ac = cl.ancestors([introrev], inclusive=True)
991 ac = cl.ancestors([introrev], inclusive=True)
991 base._ancestrycontext = ac
992 base._ancestrycontext = ac
992
993
993 # This algorithm would prefer to be recursive, but Python is a
994 # This algorithm would prefer to be recursive, but Python is a
994 # bit recursion-hostile. Instead we do an iterative
995 # bit recursion-hostile. Instead we do an iterative
995 # depth-first search.
996 # depth-first search.
996
997
997 # 1st DFS pre-calculates pcache and needed
998 # 1st DFS pre-calculates pcache and needed
998 visit = [base]
999 visit = [base]
999 pcache = {}
1000 pcache = {}
1000 needed = {base: 1}
1001 needed = {base: 1}
1001 while visit:
1002 while visit:
1002 f = visit.pop()
1003 f = visit.pop()
1003 if f in pcache:
1004 if f in pcache:
1004 continue
1005 continue
1005 pl = parents(f)
1006 pl = parents(f)
1006 pcache[f] = pl
1007 pcache[f] = pl
1007 for p in pl:
1008 for p in pl:
1008 needed[p] = needed.get(p, 0) + 1
1009 needed[p] = needed.get(p, 0) + 1
1009 if p not in pcache:
1010 if p not in pcache:
1010 visit.append(p)
1011 visit.append(p)
1011
1012
1012 # 2nd DFS does the actual annotate
1013 # 2nd DFS does the actual annotate
1013 visit[:] = [base]
1014 visit[:] = [base]
1014 hist = {}
1015 hist = {}
1015 while visit:
1016 while visit:
1016 f = visit[-1]
1017 f = visit[-1]
1017 if f in hist:
1018 if f in hist:
1018 visit.pop()
1019 visit.pop()
1019 continue
1020 continue
1020
1021
1021 ready = True
1022 ready = True
1022 pl = pcache[f]
1023 pl = pcache[f]
1023 for p in pl:
1024 for p in pl:
1024 if p not in hist:
1025 if p not in hist:
1025 ready = False
1026 ready = False
1026 visit.append(p)
1027 visit.append(p)
1027 if ready:
1028 if ready:
1028 visit.pop()
1029 visit.pop()
1029 curr = decorate(f.data(), f)
1030 curr = decorate(f.data(), f)
1030 for p in pl:
1031 for p in pl:
1031 curr = pair(hist[p], curr)
1032 curr = pair(hist[p], curr)
1032 if needed[p] == 1:
1033 if needed[p] == 1:
1033 del hist[p]
1034 del hist[p]
1034 del needed[p]
1035 del needed[p]
1035 else:
1036 else:
1036 needed[p] -= 1
1037 needed[p] -= 1
1037
1038
1038 hist[f] = curr
1039 hist[f] = curr
1039 del pcache[f]
1040 del pcache[f]
1040
1041
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1042 return zip(hist[base][0], hist[base][1].splitlines(True))
1042
1043
1043 def ancestors(self, followfirst=False):
1044 def ancestors(self, followfirst=False):
1044 visit = {}
1045 visit = {}
1045 c = self
1046 c = self
1046 if followfirst:
1047 if followfirst:
1047 cut = 1
1048 cut = 1
1048 else:
1049 else:
1049 cut = None
1050 cut = None
1050
1051
1051 while True:
1052 while True:
1052 for parent in c.parents()[:cut]:
1053 for parent in c.parents()[:cut]:
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1054 visit[(parent.linkrev(), parent.filenode())] = parent
1054 if not visit:
1055 if not visit:
1055 break
1056 break
1056 c = visit.pop(max(visit))
1057 c = visit.pop(max(visit))
1057 yield c
1058 yield c
1058
1059
1059 class filectx(basefilectx):
1060 class filectx(basefilectx):
1060 """A filecontext object makes access to data related to a particular
1061 """A filecontext object makes access to data related to a particular
1061 filerevision convenient."""
1062 filerevision convenient."""
1062 def __init__(self, repo, path, changeid=None, fileid=None,
1063 def __init__(self, repo, path, changeid=None, fileid=None,
1063 filelog=None, changectx=None):
1064 filelog=None, changectx=None):
1064 """changeid can be a changeset revision, node, or tag.
1065 """changeid can be a changeset revision, node, or tag.
1065 fileid can be a file revision or node."""
1066 fileid can be a file revision or node."""
1066 self._repo = repo
1067 self._repo = repo
1067 self._path = path
1068 self._path = path
1068
1069
1069 assert (changeid is not None
1070 assert (changeid is not None
1070 or fileid is not None
1071 or fileid is not None
1071 or changectx is not None), \
1072 or changectx is not None), \
1072 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1073 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1073 % (changeid, fileid, changectx))
1074 % (changeid, fileid, changectx))
1074
1075
1075 if filelog is not None:
1076 if filelog is not None:
1076 self._filelog = filelog
1077 self._filelog = filelog
1077
1078
1078 if changeid is not None:
1079 if changeid is not None:
1079 self._changeid = changeid
1080 self._changeid = changeid
1080 if changectx is not None:
1081 if changectx is not None:
1081 self._changectx = changectx
1082 self._changectx = changectx
1082 if fileid is not None:
1083 if fileid is not None:
1083 self._fileid = fileid
1084 self._fileid = fileid
1084
1085
1085 @propertycache
1086 @propertycache
1086 def _changectx(self):
1087 def _changectx(self):
1087 try:
1088 try:
1088 return changectx(self._repo, self._changeid)
1089 return changectx(self._repo, self._changeid)
1089 except error.FilteredRepoLookupError:
1090 except error.FilteredRepoLookupError:
1090 # Linkrev may point to any revision in the repository. When the
1091 # Linkrev may point to any revision in the repository. When the
1091 # repository is filtered this may lead to `filectx` trying to build
1092 # repository is filtered this may lead to `filectx` trying to build
1092 # `changectx` for filtered revision. In such case we fallback to
1093 # `changectx` for filtered revision. In such case we fallback to
1093 # creating `changectx` on the unfiltered version of the reposition.
1094 # creating `changectx` on the unfiltered version of the reposition.
1094 # This fallback should not be an issue because `changectx` from
1095 # This fallback should not be an issue because `changectx` from
1095 # `filectx` are not used in complex operations that care about
1096 # `filectx` are not used in complex operations that care about
1096 # filtering.
1097 # filtering.
1097 #
1098 #
1098 # This fallback is a cheap and dirty fix that prevent several
1099 # This fallback is a cheap and dirty fix that prevent several
1099 # crashes. It does not ensure the behavior is correct. However the
1100 # crashes. It does not ensure the behavior is correct. However the
1100 # behavior was not correct before filtering either and "incorrect
1101 # behavior was not correct before filtering either and "incorrect
1101 # behavior" is seen as better as "crash"
1102 # behavior" is seen as better as "crash"
1102 #
1103 #
1103 # Linkrevs have several serious troubles with filtering that are
1104 # Linkrevs have several serious troubles with filtering that are
1104 # complicated to solve. Proper handling of the issue here should be
1105 # complicated to solve. Proper handling of the issue here should be
1105 # considered when solving linkrev issue are on the table.
1106 # considered when solving linkrev issue are on the table.
1106 return changectx(self._repo.unfiltered(), self._changeid)
1107 return changectx(self._repo.unfiltered(), self._changeid)
1107
1108
1108 def filectx(self, fileid, changeid=None):
1109 def filectx(self, fileid, changeid=None):
1109 '''opens an arbitrary revision of the file without
1110 '''opens an arbitrary revision of the file without
1110 opening a new filelog'''
1111 opening a new filelog'''
1111 return filectx(self._repo, self._path, fileid=fileid,
1112 return filectx(self._repo, self._path, fileid=fileid,
1112 filelog=self._filelog, changeid=changeid)
1113 filelog=self._filelog, changeid=changeid)
1113
1114
1114 def rawdata(self):
1115 def rawdata(self):
1115 return self._filelog.revision(self._filenode, raw=True)
1116 return self._filelog.revision(self._filenode, raw=True)
1116
1117
1117 def data(self):
1118 def data(self):
1118 try:
1119 try:
1119 return self._filelog.read(self._filenode)
1120 return self._filelog.read(self._filenode)
1120 except error.CensoredNodeError:
1121 except error.CensoredNodeError:
1121 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1122 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1122 return ""
1123 return ""
1123 raise error.Abort(_("censored node: %s") % short(self._filenode),
1124 raise error.Abort(_("censored node: %s") % short(self._filenode),
1124 hint=_("set censor.policy to ignore errors"))
1125 hint=_("set censor.policy to ignore errors"))
1125
1126
1126 def size(self):
1127 def size(self):
1127 return self._filelog.size(self._filerev)
1128 return self._filelog.size(self._filerev)
1128
1129
1129 def renamed(self):
1130 def renamed(self):
1130 """check if file was actually renamed in this changeset revision
1131 """check if file was actually renamed in this changeset revision
1131
1132
1132 If rename logged in file revision, we report copy for changeset only
1133 If rename logged in file revision, we report copy for changeset only
1133 if file revisions linkrev points back to the changeset in question
1134 if file revisions linkrev points back to the changeset in question
1134 or both changeset parents contain different file revisions.
1135 or both changeset parents contain different file revisions.
1135 """
1136 """
1136
1137
1137 renamed = self._filelog.renamed(self._filenode)
1138 renamed = self._filelog.renamed(self._filenode)
1138 if not renamed:
1139 if not renamed:
1139 return renamed
1140 return renamed
1140
1141
1141 if self.rev() == self.linkrev():
1142 if self.rev() == self.linkrev():
1142 return renamed
1143 return renamed
1143
1144
1144 name = self.path()
1145 name = self.path()
1145 fnode = self._filenode
1146 fnode = self._filenode
1146 for p in self._changectx.parents():
1147 for p in self._changectx.parents():
1147 try:
1148 try:
1148 if fnode == p.filenode(name):
1149 if fnode == p.filenode(name):
1149 return None
1150 return None
1150 except error.LookupError:
1151 except error.LookupError:
1151 pass
1152 pass
1152 return renamed
1153 return renamed
1153
1154
1154 def children(self):
1155 def children(self):
1155 # hard for renames
1156 # hard for renames
1156 c = self._filelog.children(self._filenode)
1157 c = self._filelog.children(self._filenode)
1157 return [filectx(self._repo, self._path, fileid=x,
1158 return [filectx(self._repo, self._path, fileid=x,
1158 filelog=self._filelog) for x in c]
1159 filelog=self._filelog) for x in c]
1159
1160
1160 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1161 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1161 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1162 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1162 if diff from fctx2 to fctx1 has changes in linerange2 and
1163 if diff from fctx2 to fctx1 has changes in linerange2 and
1163 `linerange1` is the new line range for fctx1.
1164 `linerange1` is the new line range for fctx1.
1164 """
1165 """
1165 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1166 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1166 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1167 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1167 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1168 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1168 return diffinrange, linerange1
1169 return diffinrange, linerange1
1169
1170
1170 def blockancestors(fctx, fromline, toline, followfirst=False):
1171 def blockancestors(fctx, fromline, toline, followfirst=False):
1171 """Yield ancestors of `fctx` with respect to the block of lines within
1172 """Yield ancestors of `fctx` with respect to the block of lines within
1172 `fromline`-`toline` range.
1173 `fromline`-`toline` range.
1173 """
1174 """
1174 diffopts = patch.diffopts(fctx._repo.ui)
1175 diffopts = patch.diffopts(fctx._repo.ui)
1175 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1176 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1176 while visit:
1177 while visit:
1177 c, linerange2 = visit.pop(max(visit))
1178 c, linerange2 = visit.pop(max(visit))
1178 pl = c.parents()
1179 pl = c.parents()
1179 if followfirst:
1180 if followfirst:
1180 pl = pl[:1]
1181 pl = pl[:1]
1181 if not pl:
1182 if not pl:
1182 # The block originates from the initial revision.
1183 # The block originates from the initial revision.
1183 yield c, linerange2
1184 yield c, linerange2
1184 continue
1185 continue
1185 inrange = False
1186 inrange = False
1186 for p in pl:
1187 for p in pl:
1187 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1188 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1188 inrange = inrange or inrangep
1189 inrange = inrange or inrangep
1189 if linerange1[0] == linerange1[1]:
1190 if linerange1[0] == linerange1[1]:
1190 # Parent's linerange is empty, meaning that the block got
1191 # Parent's linerange is empty, meaning that the block got
1191 # introduced in this revision; no need to go futher in this
1192 # introduced in this revision; no need to go futher in this
1192 # branch.
1193 # branch.
1193 continue
1194 continue
1194 visit[p.linkrev(), p.filenode()] = p, linerange1
1195 visit[p.linkrev(), p.filenode()] = p, linerange1
1195 if inrange:
1196 if inrange:
1196 yield c, linerange2
1197 yield c, linerange2
1197
1198
1198 class committablectx(basectx):
1199 class committablectx(basectx):
1199 """A committablectx object provides common functionality for a context that
1200 """A committablectx object provides common functionality for a context that
1200 wants the ability to commit, e.g. workingctx or memctx."""
1201 wants the ability to commit, e.g. workingctx or memctx."""
1201 def __init__(self, repo, text="", user=None, date=None, extra=None,
1202 def __init__(self, repo, text="", user=None, date=None, extra=None,
1202 changes=None):
1203 changes=None):
1203 self._repo = repo
1204 self._repo = repo
1204 self._rev = None
1205 self._rev = None
1205 self._node = None
1206 self._node = None
1206 self._text = text
1207 self._text = text
1207 if date:
1208 if date:
1208 self._date = util.parsedate(date)
1209 self._date = util.parsedate(date)
1209 if user:
1210 if user:
1210 self._user = user
1211 self._user = user
1211 if changes:
1212 if changes:
1212 self._status = changes
1213 self._status = changes
1213
1214
1214 self._extra = {}
1215 self._extra = {}
1215 if extra:
1216 if extra:
1216 self._extra = extra.copy()
1217 self._extra = extra.copy()
1217 if 'branch' not in self._extra:
1218 if 'branch' not in self._extra:
1218 try:
1219 try:
1219 branch = encoding.fromlocal(self._repo.dirstate.branch())
1220 branch = encoding.fromlocal(self._repo.dirstate.branch())
1220 except UnicodeDecodeError:
1221 except UnicodeDecodeError:
1221 raise error.Abort(_('branch name not in UTF-8!'))
1222 raise error.Abort(_('branch name not in UTF-8!'))
1222 self._extra['branch'] = branch
1223 self._extra['branch'] = branch
1223 if self._extra['branch'] == '':
1224 if self._extra['branch'] == '':
1224 self._extra['branch'] = 'default'
1225 self._extra['branch'] = 'default'
1225
1226
1226 def __str__(self):
1227 def __str__(self):
1227 return str(self._parents[0]) + "+"
1228 return str(self._parents[0]) + "+"
1228
1229
1229 def __nonzero__(self):
1230 def __nonzero__(self):
1230 return True
1231 return True
1231
1232
1232 def _buildflagfunc(self):
1233 def _buildflagfunc(self):
1233 # Create a fallback function for getting file flags when the
1234 # Create a fallback function for getting file flags when the
1234 # filesystem doesn't support them
1235 # filesystem doesn't support them
1235
1236
1236 copiesget = self._repo.dirstate.copies().get
1237 copiesget = self._repo.dirstate.copies().get
1237 parents = self.parents()
1238 parents = self.parents()
1238 if len(parents) < 2:
1239 if len(parents) < 2:
1239 # when we have one parent, it's easy: copy from parent
1240 # when we have one parent, it's easy: copy from parent
1240 man = parents[0].manifest()
1241 man = parents[0].manifest()
1241 def func(f):
1242 def func(f):
1242 f = copiesget(f, f)
1243 f = copiesget(f, f)
1243 return man.flags(f)
1244 return man.flags(f)
1244 else:
1245 else:
1245 # merges are tricky: we try to reconstruct the unstored
1246 # merges are tricky: we try to reconstruct the unstored
1246 # result from the merge (issue1802)
1247 # result from the merge (issue1802)
1247 p1, p2 = parents
1248 p1, p2 = parents
1248 pa = p1.ancestor(p2)
1249 pa = p1.ancestor(p2)
1249 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1250 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1250
1251
1251 def func(f):
1252 def func(f):
1252 f = copiesget(f, f) # may be wrong for merges with copies
1253 f = copiesget(f, f) # may be wrong for merges with copies
1253 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1254 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1254 if fl1 == fl2:
1255 if fl1 == fl2:
1255 return fl1
1256 return fl1
1256 if fl1 == fla:
1257 if fl1 == fla:
1257 return fl2
1258 return fl2
1258 if fl2 == fla:
1259 if fl2 == fla:
1259 return fl1
1260 return fl1
1260 return '' # punt for conflicts
1261 return '' # punt for conflicts
1261
1262
1262 return func
1263 return func
1263
1264
1264 @propertycache
1265 @propertycache
1265 def _flagfunc(self):
1266 def _flagfunc(self):
1266 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1267 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1267
1268
1268 @propertycache
1269 @propertycache
1269 def _status(self):
1270 def _status(self):
1270 return self._repo.status()
1271 return self._repo.status()
1271
1272
1272 @propertycache
1273 @propertycache
1273 def _user(self):
1274 def _user(self):
1274 return self._repo.ui.username()
1275 return self._repo.ui.username()
1275
1276
1276 @propertycache
1277 @propertycache
1277 def _date(self):
1278 def _date(self):
1278 return util.makedate()
1279 return util.makedate()
1279
1280
1280 def subrev(self, subpath):
1281 def subrev(self, subpath):
1281 return None
1282 return None
1282
1283
1283 def manifestnode(self):
1284 def manifestnode(self):
1284 return None
1285 return None
1285 def user(self):
1286 def user(self):
1286 return self._user or self._repo.ui.username()
1287 return self._user or self._repo.ui.username()
1287 def date(self):
1288 def date(self):
1288 return self._date
1289 return self._date
1289 def description(self):
1290 def description(self):
1290 return self._text
1291 return self._text
1291 def files(self):
1292 def files(self):
1292 return sorted(self._status.modified + self._status.added +
1293 return sorted(self._status.modified + self._status.added +
1293 self._status.removed)
1294 self._status.removed)
1294
1295
1295 def modified(self):
1296 def modified(self):
1296 return self._status.modified
1297 return self._status.modified
1297 def added(self):
1298 def added(self):
1298 return self._status.added
1299 return self._status.added
1299 def removed(self):
1300 def removed(self):
1300 return self._status.removed
1301 return self._status.removed
1301 def deleted(self):
1302 def deleted(self):
1302 return self._status.deleted
1303 return self._status.deleted
1303 def branch(self):
1304 def branch(self):
1304 return encoding.tolocal(self._extra['branch'])
1305 return encoding.tolocal(self._extra['branch'])
1305 def closesbranch(self):
1306 def closesbranch(self):
1306 return 'close' in self._extra
1307 return 'close' in self._extra
1307 def extra(self):
1308 def extra(self):
1308 return self._extra
1309 return self._extra
1309
1310
1310 def tags(self):
1311 def tags(self):
1311 return []
1312 return []
1312
1313
1313 def bookmarks(self):
1314 def bookmarks(self):
1314 b = []
1315 b = []
1315 for p in self.parents():
1316 for p in self.parents():
1316 b.extend(p.bookmarks())
1317 b.extend(p.bookmarks())
1317 return b
1318 return b
1318
1319
1319 def phase(self):
1320 def phase(self):
1320 phase = phases.draft # default phase to draft
1321 phase = phases.draft # default phase to draft
1321 for p in self.parents():
1322 for p in self.parents():
1322 phase = max(phase, p.phase())
1323 phase = max(phase, p.phase())
1323 return phase
1324 return phase
1324
1325
1325 def hidden(self):
1326 def hidden(self):
1326 return False
1327 return False
1327
1328
1328 def children(self):
1329 def children(self):
1329 return []
1330 return []
1330
1331
1331 def flags(self, path):
1332 def flags(self, path):
1332 if '_manifest' in self.__dict__:
1333 if '_manifest' in self.__dict__:
1333 try:
1334 try:
1334 return self._manifest.flags(path)
1335 return self._manifest.flags(path)
1335 except KeyError:
1336 except KeyError:
1336 return ''
1337 return ''
1337
1338
1338 try:
1339 try:
1339 return self._flagfunc(path)
1340 return self._flagfunc(path)
1340 except OSError:
1341 except OSError:
1341 return ''
1342 return ''
1342
1343
1343 def ancestor(self, c2):
1344 def ancestor(self, c2):
1344 """return the "best" ancestor context of self and c2"""
1345 """return the "best" ancestor context of self and c2"""
1345 return self._parents[0].ancestor(c2) # punt on two parents for now
1346 return self._parents[0].ancestor(c2) # punt on two parents for now
1346
1347
1347 def walk(self, match):
1348 def walk(self, match):
1348 '''Generates matching file names.'''
1349 '''Generates matching file names.'''
1349 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1350 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1350 True, False))
1351 True, False))
1351
1352
1352 def matches(self, match):
1353 def matches(self, match):
1353 return sorted(self._repo.dirstate.matches(match))
1354 return sorted(self._repo.dirstate.matches(match))
1354
1355
1355 def ancestors(self):
1356 def ancestors(self):
1356 for p in self._parents:
1357 for p in self._parents:
1357 yield p
1358 yield p
1358 for a in self._repo.changelog.ancestors(
1359 for a in self._repo.changelog.ancestors(
1359 [p.rev() for p in self._parents]):
1360 [p.rev() for p in self._parents]):
1360 yield changectx(self._repo, a)
1361 yield changectx(self._repo, a)
1361
1362
1362 def markcommitted(self, node):
1363 def markcommitted(self, node):
1363 """Perform post-commit cleanup necessary after committing this ctx
1364 """Perform post-commit cleanup necessary after committing this ctx
1364
1365
1365 Specifically, this updates backing stores this working context
1366 Specifically, this updates backing stores this working context
1366 wraps to reflect the fact that the changes reflected by this
1367 wraps to reflect the fact that the changes reflected by this
1367 workingctx have been committed. For example, it marks
1368 workingctx have been committed. For example, it marks
1368 modified and added files as normal in the dirstate.
1369 modified and added files as normal in the dirstate.
1369
1370
1370 """
1371 """
1371
1372
1372 self._repo.dirstate.beginparentchange()
1373 self._repo.dirstate.beginparentchange()
1373 for f in self.modified() + self.added():
1374 for f in self.modified() + self.added():
1374 self._repo.dirstate.normal(f)
1375 self._repo.dirstate.normal(f)
1375 for f in self.removed():
1376 for f in self.removed():
1376 self._repo.dirstate.drop(f)
1377 self._repo.dirstate.drop(f)
1377 self._repo.dirstate.setparents(node)
1378 self._repo.dirstate.setparents(node)
1378 self._repo.dirstate.endparentchange()
1379 self._repo.dirstate.endparentchange()
1379
1380
1380 # write changes out explicitly, because nesting wlock at
1381 # write changes out explicitly, because nesting wlock at
1381 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1382 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1382 # from immediately doing so for subsequent changing files
1383 # from immediately doing so for subsequent changing files
1383 self._repo.dirstate.write(self._repo.currenttransaction())
1384 self._repo.dirstate.write(self._repo.currenttransaction())
1384
1385
1385 class workingctx(committablectx):
1386 class workingctx(committablectx):
1386 """A workingctx object makes access to data related to
1387 """A workingctx object makes access to data related to
1387 the current working directory convenient.
1388 the current working directory convenient.
1388 date - any valid date string or (unixtime, offset), or None.
1389 date - any valid date string or (unixtime, offset), or None.
1389 user - username string, or None.
1390 user - username string, or None.
1390 extra - a dictionary of extra values, or None.
1391 extra - a dictionary of extra values, or None.
1391 changes - a list of file lists as returned by localrepo.status()
1392 changes - a list of file lists as returned by localrepo.status()
1392 or None to use the repository status.
1393 or None to use the repository status.
1393 """
1394 """
1394 def __init__(self, repo, text="", user=None, date=None, extra=None,
1395 def __init__(self, repo, text="", user=None, date=None, extra=None,
1395 changes=None):
1396 changes=None):
1396 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1397 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1397
1398
1398 def __iter__(self):
1399 def __iter__(self):
1399 d = self._repo.dirstate
1400 d = self._repo.dirstate
1400 for f in d:
1401 for f in d:
1401 if d[f] != 'r':
1402 if d[f] != 'r':
1402 yield f
1403 yield f
1403
1404
1404 def __contains__(self, key):
1405 def __contains__(self, key):
1405 return self._repo.dirstate[key] not in "?r"
1406 return self._repo.dirstate[key] not in "?r"
1406
1407
1407 def hex(self):
1408 def hex(self):
1408 return hex(wdirid)
1409 return hex(wdirid)
1409
1410
1410 @propertycache
1411 @propertycache
1411 def _parents(self):
1412 def _parents(self):
1412 p = self._repo.dirstate.parents()
1413 p = self._repo.dirstate.parents()
1413 if p[1] == nullid:
1414 if p[1] == nullid:
1414 p = p[:-1]
1415 p = p[:-1]
1415 return [changectx(self._repo, x) for x in p]
1416 return [changectx(self._repo, x) for x in p]
1416
1417
1417 def filectx(self, path, filelog=None):
1418 def filectx(self, path, filelog=None):
1418 """get a file context from the working directory"""
1419 """get a file context from the working directory"""
1419 return workingfilectx(self._repo, path, workingctx=self,
1420 return workingfilectx(self._repo, path, workingctx=self,
1420 filelog=filelog)
1421 filelog=filelog)
1421
1422
1422 def dirty(self, missing=False, merge=True, branch=True):
1423 def dirty(self, missing=False, merge=True, branch=True):
1423 "check whether a working directory is modified"
1424 "check whether a working directory is modified"
1424 # check subrepos first
1425 # check subrepos first
1425 for s in sorted(self.substate):
1426 for s in sorted(self.substate):
1426 if self.sub(s).dirty():
1427 if self.sub(s).dirty():
1427 return True
1428 return True
1428 # check current working dir
1429 # check current working dir
1429 return ((merge and self.p2()) or
1430 return ((merge and self.p2()) or
1430 (branch and self.branch() != self.p1().branch()) or
1431 (branch and self.branch() != self.p1().branch()) or
1431 self.modified() or self.added() or self.removed() or
1432 self.modified() or self.added() or self.removed() or
1432 (missing and self.deleted()))
1433 (missing and self.deleted()))
1433
1434
1434 def add(self, list, prefix=""):
1435 def add(self, list, prefix=""):
1435 join = lambda f: os.path.join(prefix, f)
1436 join = lambda f: os.path.join(prefix, f)
1436 with self._repo.wlock():
1437 with self._repo.wlock():
1437 ui, ds = self._repo.ui, self._repo.dirstate
1438 ui, ds = self._repo.ui, self._repo.dirstate
1438 rejected = []
1439 rejected = []
1439 lstat = self._repo.wvfs.lstat
1440 lstat = self._repo.wvfs.lstat
1440 for f in list:
1441 for f in list:
1441 scmutil.checkportable(ui, join(f))
1442 scmutil.checkportable(ui, join(f))
1442 try:
1443 try:
1443 st = lstat(f)
1444 st = lstat(f)
1444 except OSError:
1445 except OSError:
1445 ui.warn(_("%s does not exist!\n") % join(f))
1446 ui.warn(_("%s does not exist!\n") % join(f))
1446 rejected.append(f)
1447 rejected.append(f)
1447 continue
1448 continue
1448 if st.st_size > 10000000:
1449 if st.st_size > 10000000:
1449 ui.warn(_("%s: up to %d MB of RAM may be required "
1450 ui.warn(_("%s: up to %d MB of RAM may be required "
1450 "to manage this file\n"
1451 "to manage this file\n"
1451 "(use 'hg revert %s' to cancel the "
1452 "(use 'hg revert %s' to cancel the "
1452 "pending addition)\n")
1453 "pending addition)\n")
1453 % (f, 3 * st.st_size // 1000000, join(f)))
1454 % (f, 3 * st.st_size // 1000000, join(f)))
1454 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1455 ui.warn(_("%s not added: only files and symlinks "
1456 ui.warn(_("%s not added: only files and symlinks "
1456 "supported currently\n") % join(f))
1457 "supported currently\n") % join(f))
1457 rejected.append(f)
1458 rejected.append(f)
1458 elif ds[f] in 'amn':
1459 elif ds[f] in 'amn':
1459 ui.warn(_("%s already tracked!\n") % join(f))
1460 ui.warn(_("%s already tracked!\n") % join(f))
1460 elif ds[f] == 'r':
1461 elif ds[f] == 'r':
1461 ds.normallookup(f)
1462 ds.normallookup(f)
1462 else:
1463 else:
1463 ds.add(f)
1464 ds.add(f)
1464 return rejected
1465 return rejected
1465
1466
1466 def forget(self, files, prefix=""):
1467 def forget(self, files, prefix=""):
1467 join = lambda f: os.path.join(prefix, f)
1468 join = lambda f: os.path.join(prefix, f)
1468 with self._repo.wlock():
1469 with self._repo.wlock():
1469 rejected = []
1470 rejected = []
1470 for f in files:
1471 for f in files:
1471 if f not in self._repo.dirstate:
1472 if f not in self._repo.dirstate:
1472 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1473 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1473 rejected.append(f)
1474 rejected.append(f)
1474 elif self._repo.dirstate[f] != 'a':
1475 elif self._repo.dirstate[f] != 'a':
1475 self._repo.dirstate.remove(f)
1476 self._repo.dirstate.remove(f)
1476 else:
1477 else:
1477 self._repo.dirstate.drop(f)
1478 self._repo.dirstate.drop(f)
1478 return rejected
1479 return rejected
1479
1480
1480 def undelete(self, list):
1481 def undelete(self, list):
1481 pctxs = self.parents()
1482 pctxs = self.parents()
1482 with self._repo.wlock():
1483 with self._repo.wlock():
1483 for f in list:
1484 for f in list:
1484 if self._repo.dirstate[f] != 'r':
1485 if self._repo.dirstate[f] != 'r':
1485 self._repo.ui.warn(_("%s not removed!\n") % f)
1486 self._repo.ui.warn(_("%s not removed!\n") % f)
1486 else:
1487 else:
1487 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1488 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1488 t = fctx.data()
1489 t = fctx.data()
1489 self._repo.wwrite(f, t, fctx.flags())
1490 self._repo.wwrite(f, t, fctx.flags())
1490 self._repo.dirstate.normal(f)
1491 self._repo.dirstate.normal(f)
1491
1492
1492 def copy(self, source, dest):
1493 def copy(self, source, dest):
1493 try:
1494 try:
1494 st = self._repo.wvfs.lstat(dest)
1495 st = self._repo.wvfs.lstat(dest)
1495 except OSError as err:
1496 except OSError as err:
1496 if err.errno != errno.ENOENT:
1497 if err.errno != errno.ENOENT:
1497 raise
1498 raise
1498 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1499 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1499 return
1500 return
1500 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1501 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1501 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1502 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1502 "symbolic link\n") % dest)
1503 "symbolic link\n") % dest)
1503 else:
1504 else:
1504 with self._repo.wlock():
1505 with self._repo.wlock():
1505 if self._repo.dirstate[dest] in '?':
1506 if self._repo.dirstate[dest] in '?':
1506 self._repo.dirstate.add(dest)
1507 self._repo.dirstate.add(dest)
1507 elif self._repo.dirstate[dest] in 'r':
1508 elif self._repo.dirstate[dest] in 'r':
1508 self._repo.dirstate.normallookup(dest)
1509 self._repo.dirstate.normallookup(dest)
1509 self._repo.dirstate.copy(source, dest)
1510 self._repo.dirstate.copy(source, dest)
1510
1511
1511 def match(self, pats=[], include=None, exclude=None, default='glob',
1512 def match(self, pats=[], include=None, exclude=None, default='glob',
1512 listsubrepos=False, badfn=None):
1513 listsubrepos=False, badfn=None):
1513 r = self._repo
1514 r = self._repo
1514
1515
1515 # Only a case insensitive filesystem needs magic to translate user input
1516 # Only a case insensitive filesystem needs magic to translate user input
1516 # to actual case in the filesystem.
1517 # to actual case in the filesystem.
1517 if not util.fscasesensitive(r.root):
1518 if not util.fscasesensitive(r.root):
1518 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1519 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1519 exclude, default, r.auditor, self,
1520 exclude, default, r.auditor, self,
1520 listsubrepos=listsubrepos,
1521 listsubrepos=listsubrepos,
1521 badfn=badfn)
1522 badfn=badfn)
1522 return matchmod.match(r.root, r.getcwd(), pats,
1523 return matchmod.match(r.root, r.getcwd(), pats,
1523 include, exclude, default,
1524 include, exclude, default,
1524 auditor=r.auditor, ctx=self,
1525 auditor=r.auditor, ctx=self,
1525 listsubrepos=listsubrepos, badfn=badfn)
1526 listsubrepos=listsubrepos, badfn=badfn)
1526
1527
1527 def _filtersuspectsymlink(self, files):
1528 def _filtersuspectsymlink(self, files):
1528 if not files or self._repo.dirstate._checklink:
1529 if not files or self._repo.dirstate._checklink:
1529 return files
1530 return files
1530
1531
1531 # Symlink placeholders may get non-symlink-like contents
1532 # Symlink placeholders may get non-symlink-like contents
1532 # via user error or dereferencing by NFS or Samba servers,
1533 # via user error or dereferencing by NFS or Samba servers,
1533 # so we filter out any placeholders that don't look like a
1534 # so we filter out any placeholders that don't look like a
1534 # symlink
1535 # symlink
1535 sane = []
1536 sane = []
1536 for f in files:
1537 for f in files:
1537 if self.flags(f) == 'l':
1538 if self.flags(f) == 'l':
1538 d = self[f].data()
1539 d = self[f].data()
1539 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1540 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1540 self._repo.ui.debug('ignoring suspect symlink placeholder'
1541 self._repo.ui.debug('ignoring suspect symlink placeholder'
1541 ' "%s"\n' % f)
1542 ' "%s"\n' % f)
1542 continue
1543 continue
1543 sane.append(f)
1544 sane.append(f)
1544 return sane
1545 return sane
1545
1546
1546 def _checklookup(self, files):
1547 def _checklookup(self, files):
1547 # check for any possibly clean files
1548 # check for any possibly clean files
1548 if not files:
1549 if not files:
1549 return [], []
1550 return [], []
1550
1551
1551 modified = []
1552 modified = []
1552 fixup = []
1553 fixup = []
1553 pctx = self._parents[0]
1554 pctx = self._parents[0]
1554 # do a full compare of any files that might have changed
1555 # do a full compare of any files that might have changed
1555 for f in sorted(files):
1556 for f in sorted(files):
1556 if (f not in pctx or self.flags(f) != pctx.flags(f)
1557 if (f not in pctx or self.flags(f) != pctx.flags(f)
1557 or pctx[f].cmp(self[f])):
1558 or pctx[f].cmp(self[f])):
1558 modified.append(f)
1559 modified.append(f)
1559 else:
1560 else:
1560 fixup.append(f)
1561 fixup.append(f)
1561
1562
1562 # update dirstate for files that are actually clean
1563 # update dirstate for files that are actually clean
1563 if fixup:
1564 if fixup:
1564 try:
1565 try:
1565 # updating the dirstate is optional
1566 # updating the dirstate is optional
1566 # so we don't wait on the lock
1567 # so we don't wait on the lock
1567 # wlock can invalidate the dirstate, so cache normal _after_
1568 # wlock can invalidate the dirstate, so cache normal _after_
1568 # taking the lock
1569 # taking the lock
1569 with self._repo.wlock(False):
1570 with self._repo.wlock(False):
1570 normal = self._repo.dirstate.normal
1571 normal = self._repo.dirstate.normal
1571 for f in fixup:
1572 for f in fixup:
1572 normal(f)
1573 normal(f)
1573 # write changes out explicitly, because nesting
1574 # write changes out explicitly, because nesting
1574 # wlock at runtime may prevent 'wlock.release()'
1575 # wlock at runtime may prevent 'wlock.release()'
1575 # after this block from doing so for subsequent
1576 # after this block from doing so for subsequent
1576 # changing files
1577 # changing files
1577 self._repo.dirstate.write(self._repo.currenttransaction())
1578 self._repo.dirstate.write(self._repo.currenttransaction())
1578 except error.LockError:
1579 except error.LockError:
1579 pass
1580 pass
1580 return modified, fixup
1581 return modified, fixup
1581
1582
1582 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1583 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1583 unknown=False):
1584 unknown=False):
1584 '''Gets the status from the dirstate -- internal use only.'''
1585 '''Gets the status from the dirstate -- internal use only.'''
1585 listignored, listclean, listunknown = ignored, clean, unknown
1586 listignored, listclean, listunknown = ignored, clean, unknown
1586 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1587 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1587 subrepos = []
1588 subrepos = []
1588 if '.hgsub' in self:
1589 if '.hgsub' in self:
1589 subrepos = sorted(self.substate)
1590 subrepos = sorted(self.substate)
1590 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1591 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1591 listclean, listunknown)
1592 listclean, listunknown)
1592
1593
1593 # check for any possibly clean files
1594 # check for any possibly clean files
1594 if cmp:
1595 if cmp:
1595 modified2, fixup = self._checklookup(cmp)
1596 modified2, fixup = self._checklookup(cmp)
1596 s.modified.extend(modified2)
1597 s.modified.extend(modified2)
1597
1598
1598 # update dirstate for files that are actually clean
1599 # update dirstate for files that are actually clean
1599 if fixup and listclean:
1600 if fixup and listclean:
1600 s.clean.extend(fixup)
1601 s.clean.extend(fixup)
1601
1602
1602 if match.always():
1603 if match.always():
1603 # cache for performance
1604 # cache for performance
1604 if s.unknown or s.ignored or s.clean:
1605 if s.unknown or s.ignored or s.clean:
1605 # "_status" is cached with list*=False in the normal route
1606 # "_status" is cached with list*=False in the normal route
1606 self._status = scmutil.status(s.modified, s.added, s.removed,
1607 self._status = scmutil.status(s.modified, s.added, s.removed,
1607 s.deleted, [], [], [])
1608 s.deleted, [], [], [])
1608 else:
1609 else:
1609 self._status = s
1610 self._status = s
1610
1611
1611 return s
1612 return s
1612
1613
1613 @propertycache
1614 @propertycache
1614 def _manifest(self):
1615 def _manifest(self):
1615 """generate a manifest corresponding to the values in self._status
1616 """generate a manifest corresponding to the values in self._status
1616
1617
1617 This reuse the file nodeid from parent, but we use special node
1618 This reuse the file nodeid from parent, but we use special node
1618 identifiers for added and modified files. This is used by manifests
1619 identifiers for added and modified files. This is used by manifests
1619 merge to see that files are different and by update logic to avoid
1620 merge to see that files are different and by update logic to avoid
1620 deleting newly added files.
1621 deleting newly added files.
1621 """
1622 """
1622 return self._buildstatusmanifest(self._status)
1623 return self._buildstatusmanifest(self._status)
1623
1624
1624 def _buildstatusmanifest(self, status):
1625 def _buildstatusmanifest(self, status):
1625 """Builds a manifest that includes the given status results."""
1626 """Builds a manifest that includes the given status results."""
1626 parents = self.parents()
1627 parents = self.parents()
1627
1628
1628 man = parents[0].manifest().copy()
1629 man = parents[0].manifest().copy()
1629
1630
1630 ff = self._flagfunc
1631 ff = self._flagfunc
1631 for i, l in ((addednodeid, status.added),
1632 for i, l in ((addednodeid, status.added),
1632 (modifiednodeid, status.modified)):
1633 (modifiednodeid, status.modified)):
1633 for f in l:
1634 for f in l:
1634 man[f] = i
1635 man[f] = i
1635 try:
1636 try:
1636 man.setflag(f, ff(f))
1637 man.setflag(f, ff(f))
1637 except OSError:
1638 except OSError:
1638 pass
1639 pass
1639
1640
1640 for f in status.deleted + status.removed:
1641 for f in status.deleted + status.removed:
1641 if f in man:
1642 if f in man:
1642 del man[f]
1643 del man[f]
1643
1644
1644 return man
1645 return man
1645
1646
1646 def _buildstatus(self, other, s, match, listignored, listclean,
1647 def _buildstatus(self, other, s, match, listignored, listclean,
1647 listunknown):
1648 listunknown):
1648 """build a status with respect to another context
1649 """build a status with respect to another context
1649
1650
1650 This includes logic for maintaining the fast path of status when
1651 This includes logic for maintaining the fast path of status when
1651 comparing the working directory against its parent, which is to skip
1652 comparing the working directory against its parent, which is to skip
1652 building a new manifest if self (working directory) is not comparing
1653 building a new manifest if self (working directory) is not comparing
1653 against its parent (repo['.']).
1654 against its parent (repo['.']).
1654 """
1655 """
1655 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1656 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1656 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1657 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1657 # might have accidentally ended up with the entire contents of the file
1658 # might have accidentally ended up with the entire contents of the file
1658 # they are supposed to be linking to.
1659 # they are supposed to be linking to.
1659 s.modified[:] = self._filtersuspectsymlink(s.modified)
1660 s.modified[:] = self._filtersuspectsymlink(s.modified)
1660 if other != self._repo['.']:
1661 if other != self._repo['.']:
1661 s = super(workingctx, self)._buildstatus(other, s, match,
1662 s = super(workingctx, self)._buildstatus(other, s, match,
1662 listignored, listclean,
1663 listignored, listclean,
1663 listunknown)
1664 listunknown)
1664 return s
1665 return s
1665
1666
1666 def _matchstatus(self, other, match):
1667 def _matchstatus(self, other, match):
1667 """override the match method with a filter for directory patterns
1668 """override the match method with a filter for directory patterns
1668
1669
1669 We use inheritance to customize the match.bad method only in cases of
1670 We use inheritance to customize the match.bad method only in cases of
1670 workingctx since it belongs only to the working directory when
1671 workingctx since it belongs only to the working directory when
1671 comparing against the parent changeset.
1672 comparing against the parent changeset.
1672
1673
1673 If we aren't comparing against the working directory's parent, then we
1674 If we aren't comparing against the working directory's parent, then we
1674 just use the default match object sent to us.
1675 just use the default match object sent to us.
1675 """
1676 """
1676 superself = super(workingctx, self)
1677 superself = super(workingctx, self)
1677 match = superself._matchstatus(other, match)
1678 match = superself._matchstatus(other, match)
1678 if other != self._repo['.']:
1679 if other != self._repo['.']:
1679 def bad(f, msg):
1680 def bad(f, msg):
1680 # 'f' may be a directory pattern from 'match.files()',
1681 # 'f' may be a directory pattern from 'match.files()',
1681 # so 'f not in ctx1' is not enough
1682 # so 'f not in ctx1' is not enough
1682 if f not in other and not other.hasdir(f):
1683 if f not in other and not other.hasdir(f):
1683 self._repo.ui.warn('%s: %s\n' %
1684 self._repo.ui.warn('%s: %s\n' %
1684 (self._repo.dirstate.pathto(f), msg))
1685 (self._repo.dirstate.pathto(f), msg))
1685 match.bad = bad
1686 match.bad = bad
1686 return match
1687 return match
1687
1688
1688 class committablefilectx(basefilectx):
1689 class committablefilectx(basefilectx):
1689 """A committablefilectx provides common functionality for a file context
1690 """A committablefilectx provides common functionality for a file context
1690 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1691 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1691 def __init__(self, repo, path, filelog=None, ctx=None):
1692 def __init__(self, repo, path, filelog=None, ctx=None):
1692 self._repo = repo
1693 self._repo = repo
1693 self._path = path
1694 self._path = path
1694 self._changeid = None
1695 self._changeid = None
1695 self._filerev = self._filenode = None
1696 self._filerev = self._filenode = None
1696
1697
1697 if filelog is not None:
1698 if filelog is not None:
1698 self._filelog = filelog
1699 self._filelog = filelog
1699 if ctx:
1700 if ctx:
1700 self._changectx = ctx
1701 self._changectx = ctx
1701
1702
1702 def __nonzero__(self):
1703 def __nonzero__(self):
1703 return True
1704 return True
1704
1705
1705 def linkrev(self):
1706 def linkrev(self):
1706 # linked to self._changectx no matter if file is modified or not
1707 # linked to self._changectx no matter if file is modified or not
1707 return self.rev()
1708 return self.rev()
1708
1709
1709 def parents(self):
1710 def parents(self):
1710 '''return parent filectxs, following copies if necessary'''
1711 '''return parent filectxs, following copies if necessary'''
1711 def filenode(ctx, path):
1712 def filenode(ctx, path):
1712 return ctx._manifest.get(path, nullid)
1713 return ctx._manifest.get(path, nullid)
1713
1714
1714 path = self._path
1715 path = self._path
1715 fl = self._filelog
1716 fl = self._filelog
1716 pcl = self._changectx._parents
1717 pcl = self._changectx._parents
1717 renamed = self.renamed()
1718 renamed = self.renamed()
1718
1719
1719 if renamed:
1720 if renamed:
1720 pl = [renamed + (None,)]
1721 pl = [renamed + (None,)]
1721 else:
1722 else:
1722 pl = [(path, filenode(pcl[0], path), fl)]
1723 pl = [(path, filenode(pcl[0], path), fl)]
1723
1724
1724 for pc in pcl[1:]:
1725 for pc in pcl[1:]:
1725 pl.append((path, filenode(pc, path), fl))
1726 pl.append((path, filenode(pc, path), fl))
1726
1727
1727 return [self._parentfilectx(p, fileid=n, filelog=l)
1728 return [self._parentfilectx(p, fileid=n, filelog=l)
1728 for p, n, l in pl if n != nullid]
1729 for p, n, l in pl if n != nullid]
1729
1730
1730 def children(self):
1731 def children(self):
1731 return []
1732 return []
1732
1733
1733 class workingfilectx(committablefilectx):
1734 class workingfilectx(committablefilectx):
1734 """A workingfilectx object makes access to data related to a particular
1735 """A workingfilectx object makes access to data related to a particular
1735 file in the working directory convenient."""
1736 file in the working directory convenient."""
1736 def __init__(self, repo, path, filelog=None, workingctx=None):
1737 def __init__(self, repo, path, filelog=None, workingctx=None):
1737 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1738 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1738
1739
1739 @propertycache
1740 @propertycache
1740 def _changectx(self):
1741 def _changectx(self):
1741 return workingctx(self._repo)
1742 return workingctx(self._repo)
1742
1743
1743 def data(self):
1744 def data(self):
1744 return self._repo.wread(self._path)
1745 return self._repo.wread(self._path)
1745 def renamed(self):
1746 def renamed(self):
1746 rp = self._repo.dirstate.copied(self._path)
1747 rp = self._repo.dirstate.copied(self._path)
1747 if not rp:
1748 if not rp:
1748 return None
1749 return None
1749 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1750 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1750
1751
1751 def size(self):
1752 def size(self):
1752 return self._repo.wvfs.lstat(self._path).st_size
1753 return self._repo.wvfs.lstat(self._path).st_size
1753 def date(self):
1754 def date(self):
1754 t, tz = self._changectx.date()
1755 t, tz = self._changectx.date()
1755 try:
1756 try:
1756 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1757 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1757 except OSError as err:
1758 except OSError as err:
1758 if err.errno != errno.ENOENT:
1759 if err.errno != errno.ENOENT:
1759 raise
1760 raise
1760 return (t, tz)
1761 return (t, tz)
1761
1762
1762 def cmp(self, fctx):
1763 def cmp(self, fctx):
1763 """compare with other file context
1764 """compare with other file context
1764
1765
1765 returns True if different than fctx.
1766 returns True if different than fctx.
1766 """
1767 """
1767 # fctx should be a filectx (not a workingfilectx)
1768 # fctx should be a filectx (not a workingfilectx)
1768 # invert comparison to reuse the same code path
1769 # invert comparison to reuse the same code path
1769 return fctx.cmp(self)
1770 return fctx.cmp(self)
1770
1771
1771 def remove(self, ignoremissing=False):
1772 def remove(self, ignoremissing=False):
1772 """wraps unlink for a repo's working directory"""
1773 """wraps unlink for a repo's working directory"""
1773 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1774
1775
1775 def write(self, data, flags):
1776 def write(self, data, flags):
1776 """wraps repo.wwrite"""
1777 """wraps repo.wwrite"""
1777 self._repo.wwrite(self._path, data, flags)
1778 self._repo.wwrite(self._path, data, flags)
1778
1779
1779 class workingcommitctx(workingctx):
1780 class workingcommitctx(workingctx):
1780 """A workingcommitctx object makes access to data related to
1781 """A workingcommitctx object makes access to data related to
1781 the revision being committed convenient.
1782 the revision being committed convenient.
1782
1783
1783 This hides changes in the working directory, if they aren't
1784 This hides changes in the working directory, if they aren't
1784 committed in this context.
1785 committed in this context.
1785 """
1786 """
1786 def __init__(self, repo, changes,
1787 def __init__(self, repo, changes,
1787 text="", user=None, date=None, extra=None):
1788 text="", user=None, date=None, extra=None):
1788 super(workingctx, self).__init__(repo, text, user, date, extra,
1789 super(workingctx, self).__init__(repo, text, user, date, extra,
1789 changes)
1790 changes)
1790
1791
1791 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1792 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1792 unknown=False):
1793 unknown=False):
1793 """Return matched files only in ``self._status``
1794 """Return matched files only in ``self._status``
1794
1795
1795 Uncommitted files appear "clean" via this context, even if
1796 Uncommitted files appear "clean" via this context, even if
1796 they aren't actually so in the working directory.
1797 they aren't actually so in the working directory.
1797 """
1798 """
1798 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1799 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1799 if clean:
1800 if clean:
1800 clean = [f for f in self._manifest if f not in self._changedset]
1801 clean = [f for f in self._manifest if f not in self._changedset]
1801 else:
1802 else:
1802 clean = []
1803 clean = []
1803 return scmutil.status([f for f in self._status.modified if match(f)],
1804 return scmutil.status([f for f in self._status.modified if match(f)],
1804 [f for f in self._status.added if match(f)],
1805 [f for f in self._status.added if match(f)],
1805 [f for f in self._status.removed if match(f)],
1806 [f for f in self._status.removed if match(f)],
1806 [], [], [], clean)
1807 [], [], [], clean)
1807
1808
1808 @propertycache
1809 @propertycache
1809 def _changedset(self):
1810 def _changedset(self):
1810 """Return the set of files changed in this context
1811 """Return the set of files changed in this context
1811 """
1812 """
1812 changed = set(self._status.modified)
1813 changed = set(self._status.modified)
1813 changed.update(self._status.added)
1814 changed.update(self._status.added)
1814 changed.update(self._status.removed)
1815 changed.update(self._status.removed)
1815 return changed
1816 return changed
1816
1817
1817 def makecachingfilectxfn(func):
1818 def makecachingfilectxfn(func):
1818 """Create a filectxfn that caches based on the path.
1819 """Create a filectxfn that caches based on the path.
1819
1820
1820 We can't use util.cachefunc because it uses all arguments as the cache
1821 We can't use util.cachefunc because it uses all arguments as the cache
1821 key and this creates a cycle since the arguments include the repo and
1822 key and this creates a cycle since the arguments include the repo and
1822 memctx.
1823 memctx.
1823 """
1824 """
1824 cache = {}
1825 cache = {}
1825
1826
1826 def getfilectx(repo, memctx, path):
1827 def getfilectx(repo, memctx, path):
1827 if path not in cache:
1828 if path not in cache:
1828 cache[path] = func(repo, memctx, path)
1829 cache[path] = func(repo, memctx, path)
1829 return cache[path]
1830 return cache[path]
1830
1831
1831 return getfilectx
1832 return getfilectx
1832
1833
1833 class memctx(committablectx):
1834 class memctx(committablectx):
1834 """Use memctx to perform in-memory commits via localrepo.commitctx().
1835 """Use memctx to perform in-memory commits via localrepo.commitctx().
1835
1836
1836 Revision information is supplied at initialization time while
1837 Revision information is supplied at initialization time while
1837 related files data and is made available through a callback
1838 related files data and is made available through a callback
1838 mechanism. 'repo' is the current localrepo, 'parents' is a
1839 mechanism. 'repo' is the current localrepo, 'parents' is a
1839 sequence of two parent revisions identifiers (pass None for every
1840 sequence of two parent revisions identifiers (pass None for every
1840 missing parent), 'text' is the commit message and 'files' lists
1841 missing parent), 'text' is the commit message and 'files' lists
1841 names of files touched by the revision (normalized and relative to
1842 names of files touched by the revision (normalized and relative to
1842 repository root).
1843 repository root).
1843
1844
1844 filectxfn(repo, memctx, path) is a callable receiving the
1845 filectxfn(repo, memctx, path) is a callable receiving the
1845 repository, the current memctx object and the normalized path of
1846 repository, the current memctx object and the normalized path of
1846 requested file, relative to repository root. It is fired by the
1847 requested file, relative to repository root. It is fired by the
1847 commit function for every file in 'files', but calls order is
1848 commit function for every file in 'files', but calls order is
1848 undefined. If the file is available in the revision being
1849 undefined. If the file is available in the revision being
1849 committed (updated or added), filectxfn returns a memfilectx
1850 committed (updated or added), filectxfn returns a memfilectx
1850 object. If the file was removed, filectxfn raises an
1851 object. If the file was removed, filectxfn raises an
1851 IOError. Moved files are represented by marking the source file
1852 IOError. Moved files are represented by marking the source file
1852 removed and the new file added with copy information (see
1853 removed and the new file added with copy information (see
1853 memfilectx).
1854 memfilectx).
1854
1855
1855 user receives the committer name and defaults to current
1856 user receives the committer name and defaults to current
1856 repository username, date is the commit date in any format
1857 repository username, date is the commit date in any format
1857 supported by util.parsedate() and defaults to current date, extra
1858 supported by util.parsedate() and defaults to current date, extra
1858 is a dictionary of metadata or is left empty.
1859 is a dictionary of metadata or is left empty.
1859 """
1860 """
1860
1861
1861 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1862 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1862 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1863 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1863 # this field to determine what to do in filectxfn.
1864 # this field to determine what to do in filectxfn.
1864 _returnnoneformissingfiles = True
1865 _returnnoneformissingfiles = True
1865
1866
1866 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1867 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1867 date=None, extra=None, editor=False):
1868 date=None, extra=None, editor=False):
1868 super(memctx, self).__init__(repo, text, user, date, extra)
1869 super(memctx, self).__init__(repo, text, user, date, extra)
1869 self._rev = None
1870 self._rev = None
1870 self._node = None
1871 self._node = None
1871 parents = [(p or nullid) for p in parents]
1872 parents = [(p or nullid) for p in parents]
1872 p1, p2 = parents
1873 p1, p2 = parents
1873 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1874 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1874 files = sorted(set(files))
1875 files = sorted(set(files))
1875 self._files = files
1876 self._files = files
1876 self.substate = {}
1877 self.substate = {}
1877
1878
1878 # if store is not callable, wrap it in a function
1879 # if store is not callable, wrap it in a function
1879 if not callable(filectxfn):
1880 if not callable(filectxfn):
1880 def getfilectx(repo, memctx, path):
1881 def getfilectx(repo, memctx, path):
1881 fctx = filectxfn[path]
1882 fctx = filectxfn[path]
1882 # this is weird but apparently we only keep track of one parent
1883 # this is weird but apparently we only keep track of one parent
1883 # (why not only store that instead of a tuple?)
1884 # (why not only store that instead of a tuple?)
1884 copied = fctx.renamed()
1885 copied = fctx.renamed()
1885 if copied:
1886 if copied:
1886 copied = copied[0]
1887 copied = copied[0]
1887 return memfilectx(repo, path, fctx.data(),
1888 return memfilectx(repo, path, fctx.data(),
1888 islink=fctx.islink(), isexec=fctx.isexec(),
1889 islink=fctx.islink(), isexec=fctx.isexec(),
1889 copied=copied, memctx=memctx)
1890 copied=copied, memctx=memctx)
1890 self._filectxfn = getfilectx
1891 self._filectxfn = getfilectx
1891 else:
1892 else:
1892 # memoizing increases performance for e.g. vcs convert scenarios.
1893 # memoizing increases performance for e.g. vcs convert scenarios.
1893 self._filectxfn = makecachingfilectxfn(filectxfn)
1894 self._filectxfn = makecachingfilectxfn(filectxfn)
1894
1895
1895 if extra:
1896 if extra:
1896 self._extra = extra.copy()
1897 self._extra = extra.copy()
1897 else:
1898 else:
1898 self._extra = {}
1899 self._extra = {}
1899
1900
1900 if self._extra.get('branch', '') == '':
1901 if self._extra.get('branch', '') == '':
1901 self._extra['branch'] = 'default'
1902 self._extra['branch'] = 'default'
1902
1903
1903 if editor:
1904 if editor:
1904 self._text = editor(self._repo, self, [])
1905 self._text = editor(self._repo, self, [])
1905 self._repo.savecommitmessage(self._text)
1906 self._repo.savecommitmessage(self._text)
1906
1907
1907 def filectx(self, path, filelog=None):
1908 def filectx(self, path, filelog=None):
1908 """get a file context from the working directory
1909 """get a file context from the working directory
1909
1910
1910 Returns None if file doesn't exist and should be removed."""
1911 Returns None if file doesn't exist and should be removed."""
1911 return self._filectxfn(self._repo, self, path)
1912 return self._filectxfn(self._repo, self, path)
1912
1913
1913 def commit(self):
1914 def commit(self):
1914 """commit context to the repo"""
1915 """commit context to the repo"""
1915 return self._repo.commitctx(self)
1916 return self._repo.commitctx(self)
1916
1917
1917 @propertycache
1918 @propertycache
1918 def _manifest(self):
1919 def _manifest(self):
1919 """generate a manifest based on the return values of filectxfn"""
1920 """generate a manifest based on the return values of filectxfn"""
1920
1921
1921 # keep this simple for now; just worry about p1
1922 # keep this simple for now; just worry about p1
1922 pctx = self._parents[0]
1923 pctx = self._parents[0]
1923 man = pctx.manifest().copy()
1924 man = pctx.manifest().copy()
1924
1925
1925 for f in self._status.modified:
1926 for f in self._status.modified:
1926 p1node = nullid
1927 p1node = nullid
1927 p2node = nullid
1928 p2node = nullid
1928 p = pctx[f].parents() # if file isn't in pctx, check p2?
1929 p = pctx[f].parents() # if file isn't in pctx, check p2?
1929 if len(p) > 0:
1930 if len(p) > 0:
1930 p1node = p[0].filenode()
1931 p1node = p[0].filenode()
1931 if len(p) > 1:
1932 if len(p) > 1:
1932 p2node = p[1].filenode()
1933 p2node = p[1].filenode()
1933 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1934 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1934
1935
1935 for f in self._status.added:
1936 for f in self._status.added:
1936 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1937 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1937
1938
1938 for f in self._status.removed:
1939 for f in self._status.removed:
1939 if f in man:
1940 if f in man:
1940 del man[f]
1941 del man[f]
1941
1942
1942 return man
1943 return man
1943
1944
1944 @propertycache
1945 @propertycache
1945 def _status(self):
1946 def _status(self):
1946 """Calculate exact status from ``files`` specified at construction
1947 """Calculate exact status from ``files`` specified at construction
1947 """
1948 """
1948 man1 = self.p1().manifest()
1949 man1 = self.p1().manifest()
1949 p2 = self._parents[1]
1950 p2 = self._parents[1]
1950 # "1 < len(self._parents)" can't be used for checking
1951 # "1 < len(self._parents)" can't be used for checking
1951 # existence of the 2nd parent, because "memctx._parents" is
1952 # existence of the 2nd parent, because "memctx._parents" is
1952 # explicitly initialized by the list, of which length is 2.
1953 # explicitly initialized by the list, of which length is 2.
1953 if p2.node() != nullid:
1954 if p2.node() != nullid:
1954 man2 = p2.manifest()
1955 man2 = p2.manifest()
1955 managing = lambda f: f in man1 or f in man2
1956 managing = lambda f: f in man1 or f in man2
1956 else:
1957 else:
1957 managing = lambda f: f in man1
1958 managing = lambda f: f in man1
1958
1959
1959 modified, added, removed = [], [], []
1960 modified, added, removed = [], [], []
1960 for f in self._files:
1961 for f in self._files:
1961 if not managing(f):
1962 if not managing(f):
1962 added.append(f)
1963 added.append(f)
1963 elif self[f]:
1964 elif self[f]:
1964 modified.append(f)
1965 modified.append(f)
1965 else:
1966 else:
1966 removed.append(f)
1967 removed.append(f)
1967
1968
1968 return scmutil.status(modified, added, removed, [], [], [], [])
1969 return scmutil.status(modified, added, removed, [], [], [], [])
1969
1970
1970 class memfilectx(committablefilectx):
1971 class memfilectx(committablefilectx):
1971 """memfilectx represents an in-memory file to commit.
1972 """memfilectx represents an in-memory file to commit.
1972
1973
1973 See memctx and committablefilectx for more details.
1974 See memctx and committablefilectx for more details.
1974 """
1975 """
1975 def __init__(self, repo, path, data, islink=False,
1976 def __init__(self, repo, path, data, islink=False,
1976 isexec=False, copied=None, memctx=None):
1977 isexec=False, copied=None, memctx=None):
1977 """
1978 """
1978 path is the normalized file path relative to repository root.
1979 path is the normalized file path relative to repository root.
1979 data is the file content as a string.
1980 data is the file content as a string.
1980 islink is True if the file is a symbolic link.
1981 islink is True if the file is a symbolic link.
1981 isexec is True if the file is executable.
1982 isexec is True if the file is executable.
1982 copied is the source file path if current file was copied in the
1983 copied is the source file path if current file was copied in the
1983 revision being committed, or None."""
1984 revision being committed, or None."""
1984 super(memfilectx, self).__init__(repo, path, None, memctx)
1985 super(memfilectx, self).__init__(repo, path, None, memctx)
1985 self._data = data
1986 self._data = data
1986 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1987 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1987 self._copied = None
1988 self._copied = None
1988 if copied:
1989 if copied:
1989 self._copied = (copied, nullid)
1990 self._copied = (copied, nullid)
1990
1991
1991 def data(self):
1992 def data(self):
1992 return self._data
1993 return self._data
1993 def size(self):
1994 def size(self):
1994 return len(self.data())
1995 return len(self.data())
1995 def flags(self):
1996 def flags(self):
1996 return self._flags
1997 return self._flags
1997 def renamed(self):
1998 def renamed(self):
1998 return self._copied
1999 return self._copied
1999
2000
2000 def remove(self, ignoremissing=False):
2001 def remove(self, ignoremissing=False):
2001 """wraps unlink for a repo's working directory"""
2002 """wraps unlink for a repo's working directory"""
2002 # need to figure out what to do here
2003 # need to figure out what to do here
2003 del self._changectx[self._path]
2004 del self._changectx[self._path]
2004
2005
2005 def write(self, data, flags):
2006 def write(self, data, flags):
2006 """wraps repo.wwrite"""
2007 """wraps repo.wwrite"""
2007 self._data = data
2008 self._data = data
2008
2009
2009 class metadataonlyctx(committablectx):
2010 class metadataonlyctx(committablectx):
2010 """Like memctx but it's reusing the manifest of different commit.
2011 """Like memctx but it's reusing the manifest of different commit.
2011 Intended to be used by lightweight operations that are creating
2012 Intended to be used by lightweight operations that are creating
2012 metadata-only changes.
2013 metadata-only changes.
2013
2014
2014 Revision information is supplied at initialization time. 'repo' is the
2015 Revision information is supplied at initialization time. 'repo' is the
2015 current localrepo, 'ctx' is original revision which manifest we're reuisng
2016 current localrepo, 'ctx' is original revision which manifest we're reuisng
2016 'parents' is a sequence of two parent revisions identifiers (pass None for
2017 'parents' is a sequence of two parent revisions identifiers (pass None for
2017 every missing parent), 'text' is the commit.
2018 every missing parent), 'text' is the commit.
2018
2019
2019 user receives the committer name and defaults to current repository
2020 user receives the committer name and defaults to current repository
2020 username, date is the commit date in any format supported by
2021 username, date is the commit date in any format supported by
2021 util.parsedate() and defaults to current date, extra is a dictionary of
2022 util.parsedate() and defaults to current date, extra is a dictionary of
2022 metadata or is left empty.
2023 metadata or is left empty.
2023 """
2024 """
2024 def __new__(cls, repo, originalctx, *args, **kwargs):
2025 def __new__(cls, repo, originalctx, *args, **kwargs):
2025 return super(metadataonlyctx, cls).__new__(cls, repo)
2026 return super(metadataonlyctx, cls).__new__(cls, repo)
2026
2027
2027 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2028 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2028 extra=None, editor=False):
2029 extra=None, editor=False):
2029 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2030 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2030 self._rev = None
2031 self._rev = None
2031 self._node = None
2032 self._node = None
2032 self._originalctx = originalctx
2033 self._originalctx = originalctx
2033 self._manifestnode = originalctx.manifestnode()
2034 self._manifestnode = originalctx.manifestnode()
2034 parents = [(p or nullid) for p in parents]
2035 parents = [(p or nullid) for p in parents]
2035 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2036 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2036
2037
2037 # sanity check to ensure that the reused manifest parents are
2038 # sanity check to ensure that the reused manifest parents are
2038 # manifests of our commit parents
2039 # manifests of our commit parents
2039 mp1, mp2 = self.manifestctx().parents
2040 mp1, mp2 = self.manifestctx().parents
2040 if p1 != nullid and p1.manifestctx().node() != mp1:
2041 if p1 != nullid and p1.manifestctx().node() != mp1:
2041 raise RuntimeError('can\'t reuse the manifest: '
2042 raise RuntimeError('can\'t reuse the manifest: '
2042 'its p1 doesn\'t match the new ctx p1')
2043 'its p1 doesn\'t match the new ctx p1')
2043 if p2 != nullid and p2.manifestctx().node() != mp2:
2044 if p2 != nullid and p2.manifestctx().node() != mp2:
2044 raise RuntimeError('can\'t reuse the manifest: '
2045 raise RuntimeError('can\'t reuse the manifest: '
2045 'its p2 doesn\'t match the new ctx p2')
2046 'its p2 doesn\'t match the new ctx p2')
2046
2047
2047 self._files = originalctx.files()
2048 self._files = originalctx.files()
2048 self.substate = {}
2049 self.substate = {}
2049
2050
2050 if extra:
2051 if extra:
2051 self._extra = extra.copy()
2052 self._extra = extra.copy()
2052 else:
2053 else:
2053 self._extra = {}
2054 self._extra = {}
2054
2055
2055 if self._extra.get('branch', '') == '':
2056 if self._extra.get('branch', '') == '':
2056 self._extra['branch'] = 'default'
2057 self._extra['branch'] = 'default'
2057
2058
2058 if editor:
2059 if editor:
2059 self._text = editor(self._repo, self, [])
2060 self._text = editor(self._repo, self, [])
2060 self._repo.savecommitmessage(self._text)
2061 self._repo.savecommitmessage(self._text)
2061
2062
2062 def manifestnode(self):
2063 def manifestnode(self):
2063 return self._manifestnode
2064 return self._manifestnode
2064
2065
2065 @propertycache
2066 @propertycache
2066 def _manifestctx(self):
2067 def _manifestctx(self):
2067 return self._repo.manifestlog[self._manifestnode]
2068 return self._repo.manifestlog[self._manifestnode]
2068
2069
2069 def filectx(self, path, filelog=None):
2070 def filectx(self, path, filelog=None):
2070 return self._originalctx.filectx(path, filelog=filelog)
2071 return self._originalctx.filectx(path, filelog=filelog)
2071
2072
2072 def commit(self):
2073 def commit(self):
2073 """commit context to the repo"""
2074 """commit context to the repo"""
2074 return self._repo.commitctx(self)
2075 return self._repo.commitctx(self)
2075
2076
2076 @property
2077 @property
2077 def _manifest(self):
2078 def _manifest(self):
2078 return self._originalctx.manifest()
2079 return self._originalctx.manifest()
2079
2080
2080 @propertycache
2081 @propertycache
2081 def _status(self):
2082 def _status(self):
2082 """Calculate exact status from ``files`` specified in the ``origctx``
2083 """Calculate exact status from ``files`` specified in the ``origctx``
2083 and parents manifests.
2084 and parents manifests.
2084 """
2085 """
2085 man1 = self.p1().manifest()
2086 man1 = self.p1().manifest()
2086 p2 = self._parents[1]
2087 p2 = self._parents[1]
2087 # "1 < len(self._parents)" can't be used for checking
2088 # "1 < len(self._parents)" can't be used for checking
2088 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2089 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2089 # explicitly initialized by the list, of which length is 2.
2090 # explicitly initialized by the list, of which length is 2.
2090 if p2.node() != nullid:
2091 if p2.node() != nullid:
2091 man2 = p2.manifest()
2092 man2 = p2.manifest()
2092 managing = lambda f: f in man1 or f in man2
2093 managing = lambda f: f in man1 or f in man2
2093 else:
2094 else:
2094 managing = lambda f: f in man1
2095 managing = lambda f: f in man1
2095
2096
2096 modified, added, removed = [], [], []
2097 modified, added, removed = [], [], []
2097 for f in self._files:
2098 for f in self._files:
2098 if not managing(f):
2099 if not managing(f):
2099 added.append(f)
2100 added.append(f)
2100 elif self[f]:
2101 elif self[f]:
2101 modified.append(f)
2102 modified.append(f)
2102 else:
2103 else:
2103 removed.append(f)
2104 removed.append(f)
2104
2105
2105 return scmutil.status(modified, added, removed, [], [], [], [])
2106 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now