##// END OF EJS Templates
context: add manifestctx property on changectx...
Durham Goode -
r30344:362f6f65 default
parent child Browse files
Show More
@@ -1,1984 +1,1989
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def manifestctx(self):
182 return self._manifestctx
181 def repo(self):
183 def repo(self):
182 return self._repo
184 return self._repo
183 def phasestr(self):
185 def phasestr(self):
184 return phases.phasenames[self.phase()]
186 return phases.phasenames[self.phase()]
185 def mutable(self):
187 def mutable(self):
186 return self.phase() > phases.public
188 return self.phase() > phases.public
187
189
188 def getfileset(self, expr):
190 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
191 return fileset.getfileset(self, expr)
190
192
191 def obsolete(self):
193 def obsolete(self):
192 """True if the changeset is obsolete"""
194 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
195 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
196
195 def extinct(self):
197 def extinct(self):
196 """True if the changeset is extinct"""
198 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
199 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
200
199 def unstable(self):
201 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
202 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
203 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
204
203 def bumped(self):
205 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
206 """True if the changeset try to be a successor of a public changeset
205
207
206 Only non-public and non-obsolete changesets may be bumped.
208 Only non-public and non-obsolete changesets may be bumped.
207 """
209 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
210 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
211
210 def divergent(self):
212 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
213 """Is a successors of a changeset with multiple possible successors set
212
214
213 Only non-public and non-obsolete changesets may be divergent.
215 Only non-public and non-obsolete changesets may be divergent.
214 """
216 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
217 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
218
217 def troubled(self):
219 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
220 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
221 return self.unstable() or self.bumped() or self.divergent()
220
222
221 def troubles(self):
223 def troubles(self):
222 """return the list of troubles affecting this changesets.
224 """return the list of troubles affecting this changesets.
223
225
224 Troubles are returned as strings. possible values are:
226 Troubles are returned as strings. possible values are:
225 - unstable,
227 - unstable,
226 - bumped,
228 - bumped,
227 - divergent.
229 - divergent.
228 """
230 """
229 troubles = []
231 troubles = []
230 if self.unstable():
232 if self.unstable():
231 troubles.append('unstable')
233 troubles.append('unstable')
232 if self.bumped():
234 if self.bumped():
233 troubles.append('bumped')
235 troubles.append('bumped')
234 if self.divergent():
236 if self.divergent():
235 troubles.append('divergent')
237 troubles.append('divergent')
236 return troubles
238 return troubles
237
239
238 def parents(self):
240 def parents(self):
239 """return contexts for each parent changeset"""
241 """return contexts for each parent changeset"""
240 return self._parents
242 return self._parents
241
243
242 def p1(self):
244 def p1(self):
243 return self._parents[0]
245 return self._parents[0]
244
246
245 def p2(self):
247 def p2(self):
246 parents = self._parents
248 parents = self._parents
247 if len(parents) == 2:
249 if len(parents) == 2:
248 return parents[1]
250 return parents[1]
249 return changectx(self._repo, nullrev)
251 return changectx(self._repo, nullrev)
250
252
251 def _fileinfo(self, path):
253 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
254 if '_manifest' in self.__dict__:
253 try:
255 try:
254 return self._manifest[path], self._manifest.flags(path)
256 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
257 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
258 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
259 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
260 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
261 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
262 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
263 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
264 mfl = self._repo.manifestlog
263 try:
265 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
266 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
267 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
268 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
269 _('not found in manifest'))
268
270
269 return node, flag
271 return node, flag
270
272
271 def filenode(self, path):
273 def filenode(self, path):
272 return self._fileinfo(path)[0]
274 return self._fileinfo(path)[0]
273
275
274 def flags(self, path):
276 def flags(self, path):
275 try:
277 try:
276 return self._fileinfo(path)[1]
278 return self._fileinfo(path)[1]
277 except error.LookupError:
279 except error.LookupError:
278 return ''
280 return ''
279
281
280 def sub(self, path, allowcreate=True):
282 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
283 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
284 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
285
284 def nullsub(self, path, pctx):
286 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
287 return subrepo.nullsubrepo(self, path, pctx)
286
288
287 def workingsub(self, path):
289 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
290 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
291 context.
290 '''
292 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
293 return subrepo.subrepo(self, path, allowwdir=True)
292
294
293 def match(self, pats=[], include=None, exclude=None, default='glob',
295 def match(self, pats=[], include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
296 listsubrepos=False, badfn=None):
295 r = self._repo
297 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
298 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
299 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
300 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
301 listsubrepos=listsubrepos, badfn=badfn)
300
302
301 def diff(self, ctx2=None, match=None, **opts):
303 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
304 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
305 if ctx2 is None:
304 ctx2 = self.p1()
306 ctx2 = self.p1()
305 if ctx2 is not None:
307 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
308 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, opts)
309 diffopts = patch.diffopts(self._repo.ui, opts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
310 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
311
310 def dirs(self):
312 def dirs(self):
311 return self._manifest.dirs()
313 return self._manifest.dirs()
312
314
313 def hasdir(self, dir):
315 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
316 return self._manifest.hasdir(dir)
315
317
316 def dirty(self, missing=False, merge=True, branch=True):
318 def dirty(self, missing=False, merge=True, branch=True):
317 return False
319 return False
318
320
319 def status(self, other=None, match=None, listignored=False,
321 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
322 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
323 """return status of files between two nodes or node and working
322 directory.
324 directory.
323
325
324 If other is None, compare this node with working directory.
326 If other is None, compare this node with working directory.
325
327
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
328 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
329 """
328
330
329 ctx1 = self
331 ctx1 = self
330 ctx2 = self._repo[other]
332 ctx2 = self._repo[other]
331
333
332 # This next code block is, admittedly, fragile logic that tests for
334 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
335 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
336 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
337 # with its first parent.
336 #
338 #
337 # What we're aiming for here is the ability to call:
339 # What we're aiming for here is the ability to call:
338 #
340 #
339 # workingctx.status(parentctx)
341 # workingctx.status(parentctx)
340 #
342 #
341 # If we always built the manifest for each context and compared those,
343 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
344 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
345 # just copy the manifest of the parent.
344 reversed = False
346 reversed = False
345 if (not isinstance(ctx1, changectx)
347 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
348 and isinstance(ctx2, changectx)):
347 reversed = True
349 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
350 ctx1, ctx2 = ctx2, ctx1
349
351
350 match = ctx2._matchstatus(ctx1, match)
352 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
353 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
355 listunknown)
354
356
355 if reversed:
357 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
358 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
359 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
361 r.clean)
360
362
361 if listsubrepos:
363 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
365 try:
364 rev2 = ctx2.subrev(subpath)
366 rev2 = ctx2.subrev(subpath)
365 except KeyError:
367 except KeyError:
366 # A subrepo that existed in node1 was deleted between
368 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
369 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
370 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
371 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
372 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
373 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
374 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
375 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
376 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
378
377 for l in r:
379 for l in r:
378 l.sort()
380 l.sort()
379
381
380 return r
382 return r
381
383
382
384
383 def makememctx(repo, parents, text, user, date, branch, files, store,
385 def makememctx(repo, parents, text, user, date, branch, files, store,
384 editor=None, extra=None):
386 editor=None, extra=None):
385 def getfilectx(repo, memctx, path):
387 def getfilectx(repo, memctx, path):
386 data, mode, copied = store.getfile(path)
388 data, mode, copied = store.getfile(path)
387 if data is None:
389 if data is None:
388 return None
390 return None
389 islink, isexec = mode
391 islink, isexec = mode
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
392 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 copied=copied, memctx=memctx)
393 copied=copied, memctx=memctx)
392 if extra is None:
394 if extra is None:
393 extra = {}
395 extra = {}
394 if branch:
396 if branch:
395 extra['branch'] = encoding.fromlocal(branch)
397 extra['branch'] = encoding.fromlocal(branch)
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
398 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 date, extra, editor)
399 date, extra, editor)
398 return ctx
400 return ctx
399
401
400 class changectx(basectx):
402 class changectx(basectx):
401 """A changecontext object makes access to data related to a particular
403 """A changecontext object makes access to data related to a particular
402 changeset convenient. It represents a read-only context already present in
404 changeset convenient. It represents a read-only context already present in
403 the repo."""
405 the repo."""
404 def __init__(self, repo, changeid=''):
406 def __init__(self, repo, changeid=''):
405 """changeid is a revision number, node, or tag"""
407 """changeid is a revision number, node, or tag"""
406
408
407 # since basectx.__new__ already took care of copying the object, we
409 # since basectx.__new__ already took care of copying the object, we
408 # don't need to do anything in __init__, so we just exit here
410 # don't need to do anything in __init__, so we just exit here
409 if isinstance(changeid, basectx):
411 if isinstance(changeid, basectx):
410 return
412 return
411
413
412 if changeid == '':
414 if changeid == '':
413 changeid = '.'
415 changeid = '.'
414 self._repo = repo
416 self._repo = repo
415
417
416 try:
418 try:
417 if isinstance(changeid, int):
419 if isinstance(changeid, int):
418 self._node = repo.changelog.node(changeid)
420 self._node = repo.changelog.node(changeid)
419 self._rev = changeid
421 self._rev = changeid
420 return
422 return
421 if isinstance(changeid, long):
423 if isinstance(changeid, long):
422 changeid = str(changeid)
424 changeid = str(changeid)
423 if changeid == 'null':
425 if changeid == 'null':
424 self._node = nullid
426 self._node = nullid
425 self._rev = nullrev
427 self._rev = nullrev
426 return
428 return
427 if changeid == 'tip':
429 if changeid == 'tip':
428 self._node = repo.changelog.tip()
430 self._node = repo.changelog.tip()
429 self._rev = repo.changelog.rev(self._node)
431 self._rev = repo.changelog.rev(self._node)
430 return
432 return
431 if changeid == '.' or changeid == repo.dirstate.p1():
433 if changeid == '.' or changeid == repo.dirstate.p1():
432 # this is a hack to delay/avoid loading obsmarkers
434 # this is a hack to delay/avoid loading obsmarkers
433 # when we know that '.' won't be hidden
435 # when we know that '.' won't be hidden
434 self._node = repo.dirstate.p1()
436 self._node = repo.dirstate.p1()
435 self._rev = repo.unfiltered().changelog.rev(self._node)
437 self._rev = repo.unfiltered().changelog.rev(self._node)
436 return
438 return
437 if len(changeid) == 20:
439 if len(changeid) == 20:
438 try:
440 try:
439 self._node = changeid
441 self._node = changeid
440 self._rev = repo.changelog.rev(changeid)
442 self._rev = repo.changelog.rev(changeid)
441 return
443 return
442 except error.FilteredRepoLookupError:
444 except error.FilteredRepoLookupError:
443 raise
445 raise
444 except LookupError:
446 except LookupError:
445 pass
447 pass
446
448
447 try:
449 try:
448 r = int(changeid)
450 r = int(changeid)
449 if str(r) != changeid:
451 if str(r) != changeid:
450 raise ValueError
452 raise ValueError
451 l = len(repo.changelog)
453 l = len(repo.changelog)
452 if r < 0:
454 if r < 0:
453 r += l
455 r += l
454 if r < 0 or r >= l:
456 if r < 0 or r >= l:
455 raise ValueError
457 raise ValueError
456 self._rev = r
458 self._rev = r
457 self._node = repo.changelog.node(r)
459 self._node = repo.changelog.node(r)
458 return
460 return
459 except error.FilteredIndexError:
461 except error.FilteredIndexError:
460 raise
462 raise
461 except (ValueError, OverflowError, IndexError):
463 except (ValueError, OverflowError, IndexError):
462 pass
464 pass
463
465
464 if len(changeid) == 40:
466 if len(changeid) == 40:
465 try:
467 try:
466 self._node = bin(changeid)
468 self._node = bin(changeid)
467 self._rev = repo.changelog.rev(self._node)
469 self._rev = repo.changelog.rev(self._node)
468 return
470 return
469 except error.FilteredLookupError:
471 except error.FilteredLookupError:
470 raise
472 raise
471 except (TypeError, LookupError):
473 except (TypeError, LookupError):
472 pass
474 pass
473
475
474 # lookup bookmarks through the name interface
476 # lookup bookmarks through the name interface
475 try:
477 try:
476 self._node = repo.names.singlenode(repo, changeid)
478 self._node = repo.names.singlenode(repo, changeid)
477 self._rev = repo.changelog.rev(self._node)
479 self._rev = repo.changelog.rev(self._node)
478 return
480 return
479 except KeyError:
481 except KeyError:
480 pass
482 pass
481 except error.FilteredRepoLookupError:
483 except error.FilteredRepoLookupError:
482 raise
484 raise
483 except error.RepoLookupError:
485 except error.RepoLookupError:
484 pass
486 pass
485
487
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 if self._node is not None:
489 if self._node is not None:
488 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
489 return
491 return
490
492
491 # lookup failed
493 # lookup failed
492 # check if it might have come from damaged dirstate
494 # check if it might have come from damaged dirstate
493 #
495 #
494 # XXX we could avoid the unfiltered if we had a recognizable
496 # XXX we could avoid the unfiltered if we had a recognizable
495 # exception for filtered changeset access
497 # exception for filtered changeset access
496 if changeid in repo.unfiltered().dirstate.parents():
498 if changeid in repo.unfiltered().dirstate.parents():
497 msg = _("working directory has unknown parent '%s'!")
499 msg = _("working directory has unknown parent '%s'!")
498 raise error.Abort(msg % short(changeid))
500 raise error.Abort(msg % short(changeid))
499 try:
501 try:
500 if len(changeid) == 20 and nonascii(changeid):
502 if len(changeid) == 20 and nonascii(changeid):
501 changeid = hex(changeid)
503 changeid = hex(changeid)
502 except TypeError:
504 except TypeError:
503 pass
505 pass
504 except (error.FilteredIndexError, error.FilteredLookupError,
506 except (error.FilteredIndexError, error.FilteredLookupError,
505 error.FilteredRepoLookupError):
507 error.FilteredRepoLookupError):
506 if repo.filtername.startswith('visible'):
508 if repo.filtername.startswith('visible'):
507 msg = _("hidden revision '%s'") % changeid
509 msg = _("hidden revision '%s'") % changeid
508 hint = _('use --hidden to access hidden revisions')
510 hint = _('use --hidden to access hidden revisions')
509 raise error.FilteredRepoLookupError(msg, hint=hint)
511 raise error.FilteredRepoLookupError(msg, hint=hint)
510 msg = _("filtered revision '%s' (not in '%s' subset)")
512 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg %= (changeid, repo.filtername)
513 msg %= (changeid, repo.filtername)
512 raise error.FilteredRepoLookupError(msg)
514 raise error.FilteredRepoLookupError(msg)
513 except IndexError:
515 except IndexError:
514 pass
516 pass
515 raise error.RepoLookupError(
517 raise error.RepoLookupError(
516 _("unknown revision '%s'") % changeid)
518 _("unknown revision '%s'") % changeid)
517
519
518 def __hash__(self):
520 def __hash__(self):
519 try:
521 try:
520 return hash(self._rev)
522 return hash(self._rev)
521 except AttributeError:
523 except AttributeError:
522 return id(self)
524 return id(self)
523
525
524 def __nonzero__(self):
526 def __nonzero__(self):
525 return self._rev != nullrev
527 return self._rev != nullrev
526
528
527 @propertycache
529 @propertycache
528 def _changeset(self):
530 def _changeset(self):
529 return self._repo.changelog.changelogrevision(self.rev())
531 return self._repo.changelog.changelogrevision(self.rev())
530
532
531 @propertycache
533 @propertycache
532 def _manifest(self):
534 def _manifest(self):
533 return self._repo.manifestlog[self._changeset.manifest].read()
535 return self._manifestctx.read()
536
537 @propertycache
538 def _manifestctx(self):
539 return self._repo.manifestlog[self._changeset.manifest]
534
540
535 @propertycache
541 @propertycache
536 def _manifestdelta(self):
542 def _manifestdelta(self):
537 mfnode = self._changeset.manifest
543 return self._manifestctx.readdelta()
538 return self._repo.manifestlog[mfnode].readdelta()
539
544
540 @propertycache
545 @propertycache
541 def _parents(self):
546 def _parents(self):
542 repo = self._repo
547 repo = self._repo
543 p1, p2 = repo.changelog.parentrevs(self._rev)
548 p1, p2 = repo.changelog.parentrevs(self._rev)
544 if p2 == nullrev:
549 if p2 == nullrev:
545 return [changectx(repo, p1)]
550 return [changectx(repo, p1)]
546 return [changectx(repo, p1), changectx(repo, p2)]
551 return [changectx(repo, p1), changectx(repo, p2)]
547
552
548 def changeset(self):
553 def changeset(self):
549 c = self._changeset
554 c = self._changeset
550 return (
555 return (
551 c.manifest,
556 c.manifest,
552 c.user,
557 c.user,
553 c.date,
558 c.date,
554 c.files,
559 c.files,
555 c.description,
560 c.description,
556 c.extra,
561 c.extra,
557 )
562 )
558 def manifestnode(self):
563 def manifestnode(self):
559 return self._changeset.manifest
564 return self._changeset.manifest
560
565
561 def user(self):
566 def user(self):
562 return self._changeset.user
567 return self._changeset.user
563 def date(self):
568 def date(self):
564 return self._changeset.date
569 return self._changeset.date
565 def files(self):
570 def files(self):
566 return self._changeset.files
571 return self._changeset.files
567 def description(self):
572 def description(self):
568 return self._changeset.description
573 return self._changeset.description
569 def branch(self):
574 def branch(self):
570 return encoding.tolocal(self._changeset.extra.get("branch"))
575 return encoding.tolocal(self._changeset.extra.get("branch"))
571 def closesbranch(self):
576 def closesbranch(self):
572 return 'close' in self._changeset.extra
577 return 'close' in self._changeset.extra
573 def extra(self):
578 def extra(self):
574 return self._changeset.extra
579 return self._changeset.extra
575 def tags(self):
580 def tags(self):
576 return self._repo.nodetags(self._node)
581 return self._repo.nodetags(self._node)
577 def bookmarks(self):
582 def bookmarks(self):
578 return self._repo.nodebookmarks(self._node)
583 return self._repo.nodebookmarks(self._node)
579 def phase(self):
584 def phase(self):
580 return self._repo._phasecache.phase(self._repo, self._rev)
585 return self._repo._phasecache.phase(self._repo, self._rev)
581 def hidden(self):
586 def hidden(self):
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
587 return self._rev in repoview.filterrevs(self._repo, 'visible')
583
588
584 def children(self):
589 def children(self):
585 """return contexts for each child changeset"""
590 """return contexts for each child changeset"""
586 c = self._repo.changelog.children(self._node)
591 c = self._repo.changelog.children(self._node)
587 return [changectx(self._repo, x) for x in c]
592 return [changectx(self._repo, x) for x in c]
588
593
589 def ancestors(self):
594 def ancestors(self):
590 for a in self._repo.changelog.ancestors([self._rev]):
595 for a in self._repo.changelog.ancestors([self._rev]):
591 yield changectx(self._repo, a)
596 yield changectx(self._repo, a)
592
597
593 def descendants(self):
598 def descendants(self):
594 for d in self._repo.changelog.descendants([self._rev]):
599 for d in self._repo.changelog.descendants([self._rev]):
595 yield changectx(self._repo, d)
600 yield changectx(self._repo, d)
596
601
597 def filectx(self, path, fileid=None, filelog=None):
602 def filectx(self, path, fileid=None, filelog=None):
598 """get a file context from this changeset"""
603 """get a file context from this changeset"""
599 if fileid is None:
604 if fileid is None:
600 fileid = self.filenode(path)
605 fileid = self.filenode(path)
601 return filectx(self._repo, path, fileid=fileid,
606 return filectx(self._repo, path, fileid=fileid,
602 changectx=self, filelog=filelog)
607 changectx=self, filelog=filelog)
603
608
604 def ancestor(self, c2, warn=False):
609 def ancestor(self, c2, warn=False):
605 """return the "best" ancestor context of self and c2
610 """return the "best" ancestor context of self and c2
606
611
607 If there are multiple candidates, it will show a message and check
612 If there are multiple candidates, it will show a message and check
608 merge.preferancestor configuration before falling back to the
613 merge.preferancestor configuration before falling back to the
609 revlog ancestor."""
614 revlog ancestor."""
610 # deal with workingctxs
615 # deal with workingctxs
611 n2 = c2._node
616 n2 = c2._node
612 if n2 is None:
617 if n2 is None:
613 n2 = c2._parents[0]._node
618 n2 = c2._parents[0]._node
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
619 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 if not cahs:
620 if not cahs:
616 anc = nullid
621 anc = nullid
617 elif len(cahs) == 1:
622 elif len(cahs) == 1:
618 anc = cahs[0]
623 anc = cahs[0]
619 else:
624 else:
620 # experimental config: merge.preferancestor
625 # experimental config: merge.preferancestor
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
626 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 try:
627 try:
623 ctx = changectx(self._repo, r)
628 ctx = changectx(self._repo, r)
624 except error.RepoLookupError:
629 except error.RepoLookupError:
625 continue
630 continue
626 anc = ctx.node()
631 anc = ctx.node()
627 if anc in cahs:
632 if anc in cahs:
628 break
633 break
629 else:
634 else:
630 anc = self._repo.changelog.ancestor(self._node, n2)
635 anc = self._repo.changelog.ancestor(self._node, n2)
631 if warn:
636 if warn:
632 self._repo.ui.status(
637 self._repo.ui.status(
633 (_("note: using %s as ancestor of %s and %s\n") %
638 (_("note: using %s as ancestor of %s and %s\n") %
634 (short(anc), short(self._node), short(n2))) +
639 (short(anc), short(self._node), short(n2))) +
635 ''.join(_(" alternatively, use --config "
640 ''.join(_(" alternatively, use --config "
636 "merge.preferancestor=%s\n") %
641 "merge.preferancestor=%s\n") %
637 short(n) for n in sorted(cahs) if n != anc))
642 short(n) for n in sorted(cahs) if n != anc))
638 return changectx(self._repo, anc)
643 return changectx(self._repo, anc)
639
644
640 def descendant(self, other):
645 def descendant(self, other):
641 """True if other is descendant of this changeset"""
646 """True if other is descendant of this changeset"""
642 return self._repo.changelog.descendant(self._rev, other._rev)
647 return self._repo.changelog.descendant(self._rev, other._rev)
643
648
644 def walk(self, match):
649 def walk(self, match):
645 '''Generates matching file names.'''
650 '''Generates matching file names.'''
646
651
647 # Wrap match.bad method to have message with nodeid
652 # Wrap match.bad method to have message with nodeid
648 def bad(fn, msg):
653 def bad(fn, msg):
649 # The manifest doesn't know about subrepos, so don't complain about
654 # The manifest doesn't know about subrepos, so don't complain about
650 # paths into valid subrepos.
655 # paths into valid subrepos.
651 if any(fn == s or fn.startswith(s + '/')
656 if any(fn == s or fn.startswith(s + '/')
652 for s in self.substate):
657 for s in self.substate):
653 return
658 return
654 match.bad(fn, _('no such file in rev %s') % self)
659 match.bad(fn, _('no such file in rev %s') % self)
655
660
656 m = matchmod.badmatch(match, bad)
661 m = matchmod.badmatch(match, bad)
657 return self._manifest.walk(m)
662 return self._manifest.walk(m)
658
663
659 def matches(self, match):
664 def matches(self, match):
660 return self.walk(match)
665 return self.walk(match)
661
666
662 class basefilectx(object):
667 class basefilectx(object):
663 """A filecontext object represents the common logic for its children:
668 """A filecontext object represents the common logic for its children:
664 filectx: read-only access to a filerevision that is already present
669 filectx: read-only access to a filerevision that is already present
665 in the repo,
670 in the repo,
666 workingfilectx: a filecontext that represents files from the working
671 workingfilectx: a filecontext that represents files from the working
667 directory,
672 directory,
668 memfilectx: a filecontext that represents files in-memory."""
673 memfilectx: a filecontext that represents files in-memory."""
669 def __new__(cls, repo, path, *args, **kwargs):
674 def __new__(cls, repo, path, *args, **kwargs):
670 return super(basefilectx, cls).__new__(cls)
675 return super(basefilectx, cls).__new__(cls)
671
676
672 @propertycache
677 @propertycache
673 def _filelog(self):
678 def _filelog(self):
674 return self._repo.file(self._path)
679 return self._repo.file(self._path)
675
680
676 @propertycache
681 @propertycache
677 def _changeid(self):
682 def _changeid(self):
678 if '_changeid' in self.__dict__:
683 if '_changeid' in self.__dict__:
679 return self._changeid
684 return self._changeid
680 elif '_changectx' in self.__dict__:
685 elif '_changectx' in self.__dict__:
681 return self._changectx.rev()
686 return self._changectx.rev()
682 elif '_descendantrev' in self.__dict__:
687 elif '_descendantrev' in self.__dict__:
683 # this file context was created from a revision with a known
688 # this file context was created from a revision with a known
684 # descendant, we can (lazily) correct for linkrev aliases
689 # descendant, we can (lazily) correct for linkrev aliases
685 return self._adjustlinkrev(self._descendantrev)
690 return self._adjustlinkrev(self._descendantrev)
686 else:
691 else:
687 return self._filelog.linkrev(self._filerev)
692 return self._filelog.linkrev(self._filerev)
688
693
689 @propertycache
694 @propertycache
690 def _filenode(self):
695 def _filenode(self):
691 if '_fileid' in self.__dict__:
696 if '_fileid' in self.__dict__:
692 return self._filelog.lookup(self._fileid)
697 return self._filelog.lookup(self._fileid)
693 else:
698 else:
694 return self._changectx.filenode(self._path)
699 return self._changectx.filenode(self._path)
695
700
696 @propertycache
701 @propertycache
697 def _filerev(self):
702 def _filerev(self):
698 return self._filelog.rev(self._filenode)
703 return self._filelog.rev(self._filenode)
699
704
700 @propertycache
705 @propertycache
701 def _repopath(self):
706 def _repopath(self):
702 return self._path
707 return self._path
703
708
704 def __nonzero__(self):
709 def __nonzero__(self):
705 try:
710 try:
706 self._filenode
711 self._filenode
707 return True
712 return True
708 except error.LookupError:
713 except error.LookupError:
709 # file is missing
714 # file is missing
710 return False
715 return False
711
716
712 def __str__(self):
717 def __str__(self):
713 try:
718 try:
714 return "%s@%s" % (self.path(), self._changectx)
719 return "%s@%s" % (self.path(), self._changectx)
715 except error.LookupError:
720 except error.LookupError:
716 return "%s@???" % self.path()
721 return "%s@???" % self.path()
717
722
718 def __repr__(self):
723 def __repr__(self):
719 return "<%s %s>" % (type(self).__name__, str(self))
724 return "<%s %s>" % (type(self).__name__, str(self))
720
725
721 def __hash__(self):
726 def __hash__(self):
722 try:
727 try:
723 return hash((self._path, self._filenode))
728 return hash((self._path, self._filenode))
724 except AttributeError:
729 except AttributeError:
725 return id(self)
730 return id(self)
726
731
727 def __eq__(self, other):
732 def __eq__(self, other):
728 try:
733 try:
729 return (type(self) == type(other) and self._path == other._path
734 return (type(self) == type(other) and self._path == other._path
730 and self._filenode == other._filenode)
735 and self._filenode == other._filenode)
731 except AttributeError:
736 except AttributeError:
732 return False
737 return False
733
738
734 def __ne__(self, other):
739 def __ne__(self, other):
735 return not (self == other)
740 return not (self == other)
736
741
737 def filerev(self):
742 def filerev(self):
738 return self._filerev
743 return self._filerev
739 def filenode(self):
744 def filenode(self):
740 return self._filenode
745 return self._filenode
741 def flags(self):
746 def flags(self):
742 return self._changectx.flags(self._path)
747 return self._changectx.flags(self._path)
743 def filelog(self):
748 def filelog(self):
744 return self._filelog
749 return self._filelog
745 def rev(self):
750 def rev(self):
746 return self._changeid
751 return self._changeid
747 def linkrev(self):
752 def linkrev(self):
748 return self._filelog.linkrev(self._filerev)
753 return self._filelog.linkrev(self._filerev)
749 def node(self):
754 def node(self):
750 return self._changectx.node()
755 return self._changectx.node()
751 def hex(self):
756 def hex(self):
752 return self._changectx.hex()
757 return self._changectx.hex()
753 def user(self):
758 def user(self):
754 return self._changectx.user()
759 return self._changectx.user()
755 def date(self):
760 def date(self):
756 return self._changectx.date()
761 return self._changectx.date()
757 def files(self):
762 def files(self):
758 return self._changectx.files()
763 return self._changectx.files()
759 def description(self):
764 def description(self):
760 return self._changectx.description()
765 return self._changectx.description()
761 def branch(self):
766 def branch(self):
762 return self._changectx.branch()
767 return self._changectx.branch()
763 def extra(self):
768 def extra(self):
764 return self._changectx.extra()
769 return self._changectx.extra()
765 def phase(self):
770 def phase(self):
766 return self._changectx.phase()
771 return self._changectx.phase()
767 def phasestr(self):
772 def phasestr(self):
768 return self._changectx.phasestr()
773 return self._changectx.phasestr()
769 def manifest(self):
774 def manifest(self):
770 return self._changectx.manifest()
775 return self._changectx.manifest()
771 def changectx(self):
776 def changectx(self):
772 return self._changectx
777 return self._changectx
773 def repo(self):
778 def repo(self):
774 return self._repo
779 return self._repo
775
780
776 def path(self):
781 def path(self):
777 return self._path
782 return self._path
778
783
779 def isbinary(self):
784 def isbinary(self):
780 try:
785 try:
781 return util.binary(self.data())
786 return util.binary(self.data())
782 except IOError:
787 except IOError:
783 return False
788 return False
784 def isexec(self):
789 def isexec(self):
785 return 'x' in self.flags()
790 return 'x' in self.flags()
786 def islink(self):
791 def islink(self):
787 return 'l' in self.flags()
792 return 'l' in self.flags()
788
793
789 def isabsent(self):
794 def isabsent(self):
790 """whether this filectx represents a file not in self._changectx
795 """whether this filectx represents a file not in self._changectx
791
796
792 This is mainly for merge code to detect change/delete conflicts. This is
797 This is mainly for merge code to detect change/delete conflicts. This is
793 expected to be True for all subclasses of basectx."""
798 expected to be True for all subclasses of basectx."""
794 return False
799 return False
795
800
796 _customcmp = False
801 _customcmp = False
797 def cmp(self, fctx):
802 def cmp(self, fctx):
798 """compare with other file context
803 """compare with other file context
799
804
800 returns True if different than fctx.
805 returns True if different than fctx.
801 """
806 """
802 if fctx._customcmp:
807 if fctx._customcmp:
803 return fctx.cmp(self)
808 return fctx.cmp(self)
804
809
805 if (fctx._filenode is None
810 if (fctx._filenode is None
806 and (self._repo._encodefilterpats
811 and (self._repo._encodefilterpats
807 # if file data starts with '\1\n', empty metadata block is
812 # if file data starts with '\1\n', empty metadata block is
808 # prepended, which adds 4 bytes to filelog.size().
813 # prepended, which adds 4 bytes to filelog.size().
809 or self.size() - 4 == fctx.size())
814 or self.size() - 4 == fctx.size())
810 or self.size() == fctx.size()):
815 or self.size() == fctx.size()):
811 return self._filelog.cmp(self._filenode, fctx.data())
816 return self._filelog.cmp(self._filenode, fctx.data())
812
817
813 return True
818 return True
814
819
815 def _adjustlinkrev(self, srcrev, inclusive=False):
820 def _adjustlinkrev(self, srcrev, inclusive=False):
816 """return the first ancestor of <srcrev> introducing <fnode>
821 """return the first ancestor of <srcrev> introducing <fnode>
817
822
818 If the linkrev of the file revision does not point to an ancestor of
823 If the linkrev of the file revision does not point to an ancestor of
819 srcrev, we'll walk down the ancestors until we find one introducing
824 srcrev, we'll walk down the ancestors until we find one introducing
820 this file revision.
825 this file revision.
821
826
822 :srcrev: the changeset revision we search ancestors from
827 :srcrev: the changeset revision we search ancestors from
823 :inclusive: if true, the src revision will also be checked
828 :inclusive: if true, the src revision will also be checked
824 """
829 """
825 repo = self._repo
830 repo = self._repo
826 cl = repo.unfiltered().changelog
831 cl = repo.unfiltered().changelog
827 mfl = repo.manifestlog
832 mfl = repo.manifestlog
828 # fetch the linkrev
833 # fetch the linkrev
829 lkr = self.linkrev()
834 lkr = self.linkrev()
830 # hack to reuse ancestor computation when searching for renames
835 # hack to reuse ancestor computation when searching for renames
831 memberanc = getattr(self, '_ancestrycontext', None)
836 memberanc = getattr(self, '_ancestrycontext', None)
832 iteranc = None
837 iteranc = None
833 if srcrev is None:
838 if srcrev is None:
834 # wctx case, used by workingfilectx during mergecopy
839 # wctx case, used by workingfilectx during mergecopy
835 revs = [p.rev() for p in self._repo[None].parents()]
840 revs = [p.rev() for p in self._repo[None].parents()]
836 inclusive = True # we skipped the real (revless) source
841 inclusive = True # we skipped the real (revless) source
837 else:
842 else:
838 revs = [srcrev]
843 revs = [srcrev]
839 if memberanc is None:
844 if memberanc is None:
840 memberanc = iteranc = cl.ancestors(revs, lkr,
845 memberanc = iteranc = cl.ancestors(revs, lkr,
841 inclusive=inclusive)
846 inclusive=inclusive)
842 # check if this linkrev is an ancestor of srcrev
847 # check if this linkrev is an ancestor of srcrev
843 if lkr not in memberanc:
848 if lkr not in memberanc:
844 if iteranc is None:
849 if iteranc is None:
845 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
850 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 fnode = self._filenode
851 fnode = self._filenode
847 path = self._path
852 path = self._path
848 for a in iteranc:
853 for a in iteranc:
849 ac = cl.read(a) # get changeset data (we avoid object creation)
854 ac = cl.read(a) # get changeset data (we avoid object creation)
850 if path in ac[3]: # checking the 'files' field.
855 if path in ac[3]: # checking the 'files' field.
851 # The file has been touched, check if the content is
856 # The file has been touched, check if the content is
852 # similar to the one we search for.
857 # similar to the one we search for.
853 if fnode == mfl[ac[0]].readfast().get(path):
858 if fnode == mfl[ac[0]].readfast().get(path):
854 return a
859 return a
855 # In theory, we should never get out of that loop without a result.
860 # In theory, we should never get out of that loop without a result.
856 # But if manifest uses a buggy file revision (not children of the
861 # But if manifest uses a buggy file revision (not children of the
857 # one it replaces) we could. Such a buggy situation will likely
862 # one it replaces) we could. Such a buggy situation will likely
858 # result is crash somewhere else at to some point.
863 # result is crash somewhere else at to some point.
859 return lkr
864 return lkr
860
865
861 def introrev(self):
866 def introrev(self):
862 """return the rev of the changeset which introduced this file revision
867 """return the rev of the changeset which introduced this file revision
863
868
864 This method is different from linkrev because it take into account the
869 This method is different from linkrev because it take into account the
865 changeset the filectx was created from. It ensures the returned
870 changeset the filectx was created from. It ensures the returned
866 revision is one of its ancestors. This prevents bugs from
871 revision is one of its ancestors. This prevents bugs from
867 'linkrev-shadowing' when a file revision is used by multiple
872 'linkrev-shadowing' when a file revision is used by multiple
868 changesets.
873 changesets.
869 """
874 """
870 lkr = self.linkrev()
875 lkr = self.linkrev()
871 attrs = vars(self)
876 attrs = vars(self)
872 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
877 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
873 if noctx or self.rev() == lkr:
878 if noctx or self.rev() == lkr:
874 return self.linkrev()
879 return self.linkrev()
875 return self._adjustlinkrev(self.rev(), inclusive=True)
880 return self._adjustlinkrev(self.rev(), inclusive=True)
876
881
877 def _parentfilectx(self, path, fileid, filelog):
882 def _parentfilectx(self, path, fileid, filelog):
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
883 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
884 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
885 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 # If self is associated with a changeset (probably explicitly
886 # If self is associated with a changeset (probably explicitly
882 # fed), ensure the created filectx is associated with a
887 # fed), ensure the created filectx is associated with a
883 # changeset that is an ancestor of self.changectx.
888 # changeset that is an ancestor of self.changectx.
884 # This lets us later use _adjustlinkrev to get a correct link.
889 # This lets us later use _adjustlinkrev to get a correct link.
885 fctx._descendantrev = self.rev()
890 fctx._descendantrev = self.rev()
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 elif '_descendantrev' in vars(self):
892 elif '_descendantrev' in vars(self):
888 # Otherwise propagate _descendantrev if we have one associated.
893 # Otherwise propagate _descendantrev if we have one associated.
889 fctx._descendantrev = self._descendantrev
894 fctx._descendantrev = self._descendantrev
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 return fctx
896 return fctx
892
897
893 def parents(self):
898 def parents(self):
894 _path = self._path
899 _path = self._path
895 fl = self._filelog
900 fl = self._filelog
896 parents = self._filelog.parents(self._filenode)
901 parents = self._filelog.parents(self._filenode)
897 pl = [(_path, node, fl) for node in parents if node != nullid]
902 pl = [(_path, node, fl) for node in parents if node != nullid]
898
903
899 r = fl.renamed(self._filenode)
904 r = fl.renamed(self._filenode)
900 if r:
905 if r:
901 # - In the simple rename case, both parent are nullid, pl is empty.
906 # - In the simple rename case, both parent are nullid, pl is empty.
902 # - In case of merge, only one of the parent is null id and should
907 # - In case of merge, only one of the parent is null id and should
903 # be replaced with the rename information. This parent is -always-
908 # be replaced with the rename information. This parent is -always-
904 # the first one.
909 # the first one.
905 #
910 #
906 # As null id have always been filtered out in the previous list
911 # As null id have always been filtered out in the previous list
907 # comprehension, inserting to 0 will always result in "replacing
912 # comprehension, inserting to 0 will always result in "replacing
908 # first nullid parent with rename information.
913 # first nullid parent with rename information.
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
914 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910
915
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
916 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912
917
913 def p1(self):
918 def p1(self):
914 return self.parents()[0]
919 return self.parents()[0]
915
920
916 def p2(self):
921 def p2(self):
917 p = self.parents()
922 p = self.parents()
918 if len(p) == 2:
923 if len(p) == 2:
919 return p[1]
924 return p[1]
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
925 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921
926
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
927 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 '''returns a list of tuples of ((ctx, number), line) for each line
928 '''returns a list of tuples of ((ctx, number), line) for each line
924 in the file, where ctx is the filectx of the node where
929 in the file, where ctx is the filectx of the node where
925 that line was last changed; if linenumber parameter is true, number is
930 that line was last changed; if linenumber parameter is true, number is
926 the line number at the first appearance in the managed file, otherwise,
931 the line number at the first appearance in the managed file, otherwise,
927 number has a fixed value of False.
932 number has a fixed value of False.
928 '''
933 '''
929
934
930 def lines(text):
935 def lines(text):
931 if text.endswith("\n"):
936 if text.endswith("\n"):
932 return text.count("\n")
937 return text.count("\n")
933 return text.count("\n") + int(bool(text))
938 return text.count("\n") + int(bool(text))
934
939
935 if linenumber:
940 if linenumber:
936 def decorate(text, rev):
941 def decorate(text, rev):
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
942 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 else:
943 else:
939 def decorate(text, rev):
944 def decorate(text, rev):
940 return ([(rev, False)] * lines(text), text)
945 return ([(rev, False)] * lines(text), text)
941
946
942 def pair(parent, child):
947 def pair(parent, child):
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
948 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 for (a1, a2, b1, b2), t in blocks:
949 for (a1, a2, b1, b2), t in blocks:
945 # Changed blocks ('!') or blocks made only of blank lines ('~')
950 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 # belong to the child.
951 # belong to the child.
947 if t == '=':
952 if t == '=':
948 child[0][b1:b2] = parent[0][a1:a2]
953 child[0][b1:b2] = parent[0][a1:a2]
949 return child
954 return child
950
955
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
956 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
957
953 def parents(f):
958 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
959 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
960 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
961 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
962 # isn't an ancestor of the srcrev.
958 f._changeid
963 f._changeid
959 pl = f.parents()
964 pl = f.parents()
960
965
961 # Don't return renamed parents if we aren't following.
966 # Don't return renamed parents if we aren't following.
962 if not follow:
967 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
968 pl = [p for p in pl if p.path() == f.path()]
964
969
965 # renamed filectx won't have a filelog yet, so set it
970 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
971 # from the cache to save time
967 for p in pl:
972 for p in pl:
968 if not '_filelog' in p.__dict__:
973 if not '_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
974 p._filelog = getlog(p.path())
970
975
971 return pl
976 return pl
972
977
973 # use linkrev to find the first changeset where self appeared
978 # use linkrev to find the first changeset where self appeared
974 base = self
979 base = self
975 introrev = self.introrev()
980 introrev = self.introrev()
976 if self.rev() != introrev:
981 if self.rev() != introrev:
977 base = self.filectx(self.filenode(), changeid=introrev)
982 base = self.filectx(self.filenode(), changeid=introrev)
978 if getattr(base, '_ancestrycontext', None) is None:
983 if getattr(base, '_ancestrycontext', None) is None:
979 cl = self._repo.changelog
984 cl = self._repo.changelog
980 if introrev is None:
985 if introrev is None:
981 # wctx is not inclusive, but works because _ancestrycontext
986 # wctx is not inclusive, but works because _ancestrycontext
982 # is used to test filelog revisions
987 # is used to test filelog revisions
983 ac = cl.ancestors([p.rev() for p in base.parents()],
988 ac = cl.ancestors([p.rev() for p in base.parents()],
984 inclusive=True)
989 inclusive=True)
985 else:
990 else:
986 ac = cl.ancestors([introrev], inclusive=True)
991 ac = cl.ancestors([introrev], inclusive=True)
987 base._ancestrycontext = ac
992 base._ancestrycontext = ac
988
993
989 # This algorithm would prefer to be recursive, but Python is a
994 # This algorithm would prefer to be recursive, but Python is a
990 # bit recursion-hostile. Instead we do an iterative
995 # bit recursion-hostile. Instead we do an iterative
991 # depth-first search.
996 # depth-first search.
992
997
993 # 1st DFS pre-calculates pcache and needed
998 # 1st DFS pre-calculates pcache and needed
994 visit = [base]
999 visit = [base]
995 pcache = {}
1000 pcache = {}
996 needed = {base: 1}
1001 needed = {base: 1}
997 while visit:
1002 while visit:
998 f = visit.pop()
1003 f = visit.pop()
999 if f in pcache:
1004 if f in pcache:
1000 continue
1005 continue
1001 pl = parents(f)
1006 pl = parents(f)
1002 pcache[f] = pl
1007 pcache[f] = pl
1003 for p in pl:
1008 for p in pl:
1004 needed[p] = needed.get(p, 0) + 1
1009 needed[p] = needed.get(p, 0) + 1
1005 if p not in pcache:
1010 if p not in pcache:
1006 visit.append(p)
1011 visit.append(p)
1007
1012
1008 # 2nd DFS does the actual annotate
1013 # 2nd DFS does the actual annotate
1009 visit[:] = [base]
1014 visit[:] = [base]
1010 hist = {}
1015 hist = {}
1011 while visit:
1016 while visit:
1012 f = visit[-1]
1017 f = visit[-1]
1013 if f in hist:
1018 if f in hist:
1014 visit.pop()
1019 visit.pop()
1015 continue
1020 continue
1016
1021
1017 ready = True
1022 ready = True
1018 pl = pcache[f]
1023 pl = pcache[f]
1019 for p in pl:
1024 for p in pl:
1020 if p not in hist:
1025 if p not in hist:
1021 ready = False
1026 ready = False
1022 visit.append(p)
1027 visit.append(p)
1023 if ready:
1028 if ready:
1024 visit.pop()
1029 visit.pop()
1025 curr = decorate(f.data(), f)
1030 curr = decorate(f.data(), f)
1026 for p in pl:
1031 for p in pl:
1027 curr = pair(hist[p], curr)
1032 curr = pair(hist[p], curr)
1028 if needed[p] == 1:
1033 if needed[p] == 1:
1029 del hist[p]
1034 del hist[p]
1030 del needed[p]
1035 del needed[p]
1031 else:
1036 else:
1032 needed[p] -= 1
1037 needed[p] -= 1
1033
1038
1034 hist[f] = curr
1039 hist[f] = curr
1035 del pcache[f]
1040 del pcache[f]
1036
1041
1037 return zip(hist[base][0], hist[base][1].splitlines(True))
1042 return zip(hist[base][0], hist[base][1].splitlines(True))
1038
1043
1039 def ancestors(self, followfirst=False):
1044 def ancestors(self, followfirst=False):
1040 visit = {}
1045 visit = {}
1041 c = self
1046 c = self
1042 if followfirst:
1047 if followfirst:
1043 cut = 1
1048 cut = 1
1044 else:
1049 else:
1045 cut = None
1050 cut = None
1046
1051
1047 while True:
1052 while True:
1048 for parent in c.parents()[:cut]:
1053 for parent in c.parents()[:cut]:
1049 visit[(parent.linkrev(), parent.filenode())] = parent
1054 visit[(parent.linkrev(), parent.filenode())] = parent
1050 if not visit:
1055 if not visit:
1051 break
1056 break
1052 c = visit.pop(max(visit))
1057 c = visit.pop(max(visit))
1053 yield c
1058 yield c
1054
1059
1055 class filectx(basefilectx):
1060 class filectx(basefilectx):
1056 """A filecontext object makes access to data related to a particular
1061 """A filecontext object makes access to data related to a particular
1057 filerevision convenient."""
1062 filerevision convenient."""
1058 def __init__(self, repo, path, changeid=None, fileid=None,
1063 def __init__(self, repo, path, changeid=None, fileid=None,
1059 filelog=None, changectx=None):
1064 filelog=None, changectx=None):
1060 """changeid can be a changeset revision, node, or tag.
1065 """changeid can be a changeset revision, node, or tag.
1061 fileid can be a file revision or node."""
1066 fileid can be a file revision or node."""
1062 self._repo = repo
1067 self._repo = repo
1063 self._path = path
1068 self._path = path
1064
1069
1065 assert (changeid is not None
1070 assert (changeid is not None
1066 or fileid is not None
1071 or fileid is not None
1067 or changectx is not None), \
1072 or changectx is not None), \
1068 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1073 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 % (changeid, fileid, changectx))
1074 % (changeid, fileid, changectx))
1070
1075
1071 if filelog is not None:
1076 if filelog is not None:
1072 self._filelog = filelog
1077 self._filelog = filelog
1073
1078
1074 if changeid is not None:
1079 if changeid is not None:
1075 self._changeid = changeid
1080 self._changeid = changeid
1076 if changectx is not None:
1081 if changectx is not None:
1077 self._changectx = changectx
1082 self._changectx = changectx
1078 if fileid is not None:
1083 if fileid is not None:
1079 self._fileid = fileid
1084 self._fileid = fileid
1080
1085
1081 @propertycache
1086 @propertycache
1082 def _changectx(self):
1087 def _changectx(self):
1083 try:
1088 try:
1084 return changectx(self._repo, self._changeid)
1089 return changectx(self._repo, self._changeid)
1085 except error.FilteredRepoLookupError:
1090 except error.FilteredRepoLookupError:
1086 # Linkrev may point to any revision in the repository. When the
1091 # Linkrev may point to any revision in the repository. When the
1087 # repository is filtered this may lead to `filectx` trying to build
1092 # repository is filtered this may lead to `filectx` trying to build
1088 # `changectx` for filtered revision. In such case we fallback to
1093 # `changectx` for filtered revision. In such case we fallback to
1089 # creating `changectx` on the unfiltered version of the reposition.
1094 # creating `changectx` on the unfiltered version of the reposition.
1090 # This fallback should not be an issue because `changectx` from
1095 # This fallback should not be an issue because `changectx` from
1091 # `filectx` are not used in complex operations that care about
1096 # `filectx` are not used in complex operations that care about
1092 # filtering.
1097 # filtering.
1093 #
1098 #
1094 # This fallback is a cheap and dirty fix that prevent several
1099 # This fallback is a cheap and dirty fix that prevent several
1095 # crashes. It does not ensure the behavior is correct. However the
1100 # crashes. It does not ensure the behavior is correct. However the
1096 # behavior was not correct before filtering either and "incorrect
1101 # behavior was not correct before filtering either and "incorrect
1097 # behavior" is seen as better as "crash"
1102 # behavior" is seen as better as "crash"
1098 #
1103 #
1099 # Linkrevs have several serious troubles with filtering that are
1104 # Linkrevs have several serious troubles with filtering that are
1100 # complicated to solve. Proper handling of the issue here should be
1105 # complicated to solve. Proper handling of the issue here should be
1101 # considered when solving linkrev issue are on the table.
1106 # considered when solving linkrev issue are on the table.
1102 return changectx(self._repo.unfiltered(), self._changeid)
1107 return changectx(self._repo.unfiltered(), self._changeid)
1103
1108
1104 def filectx(self, fileid, changeid=None):
1109 def filectx(self, fileid, changeid=None):
1105 '''opens an arbitrary revision of the file without
1110 '''opens an arbitrary revision of the file without
1106 opening a new filelog'''
1111 opening a new filelog'''
1107 return filectx(self._repo, self._path, fileid=fileid,
1112 return filectx(self._repo, self._path, fileid=fileid,
1108 filelog=self._filelog, changeid=changeid)
1113 filelog=self._filelog, changeid=changeid)
1109
1114
1110 def data(self):
1115 def data(self):
1111 try:
1116 try:
1112 return self._filelog.read(self._filenode)
1117 return self._filelog.read(self._filenode)
1113 except error.CensoredNodeError:
1118 except error.CensoredNodeError:
1114 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1119 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 return ""
1120 return ""
1116 raise error.Abort(_("censored node: %s") % short(self._filenode),
1121 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 hint=_("set censor.policy to ignore errors"))
1122 hint=_("set censor.policy to ignore errors"))
1118
1123
1119 def size(self):
1124 def size(self):
1120 return self._filelog.size(self._filerev)
1125 return self._filelog.size(self._filerev)
1121
1126
1122 def renamed(self):
1127 def renamed(self):
1123 """check if file was actually renamed in this changeset revision
1128 """check if file was actually renamed in this changeset revision
1124
1129
1125 If rename logged in file revision, we report copy for changeset only
1130 If rename logged in file revision, we report copy for changeset only
1126 if file revisions linkrev points back to the changeset in question
1131 if file revisions linkrev points back to the changeset in question
1127 or both changeset parents contain different file revisions.
1132 or both changeset parents contain different file revisions.
1128 """
1133 """
1129
1134
1130 renamed = self._filelog.renamed(self._filenode)
1135 renamed = self._filelog.renamed(self._filenode)
1131 if not renamed:
1136 if not renamed:
1132 return renamed
1137 return renamed
1133
1138
1134 if self.rev() == self.linkrev():
1139 if self.rev() == self.linkrev():
1135 return renamed
1140 return renamed
1136
1141
1137 name = self.path()
1142 name = self.path()
1138 fnode = self._filenode
1143 fnode = self._filenode
1139 for p in self._changectx.parents():
1144 for p in self._changectx.parents():
1140 try:
1145 try:
1141 if fnode == p.filenode(name):
1146 if fnode == p.filenode(name):
1142 return None
1147 return None
1143 except error.LookupError:
1148 except error.LookupError:
1144 pass
1149 pass
1145 return renamed
1150 return renamed
1146
1151
1147 def children(self):
1152 def children(self):
1148 # hard for renames
1153 # hard for renames
1149 c = self._filelog.children(self._filenode)
1154 c = self._filelog.children(self._filenode)
1150 return [filectx(self._repo, self._path, fileid=x,
1155 return [filectx(self._repo, self._path, fileid=x,
1151 filelog=self._filelog) for x in c]
1156 filelog=self._filelog) for x in c]
1152
1157
1153 class committablectx(basectx):
1158 class committablectx(basectx):
1154 """A committablectx object provides common functionality for a context that
1159 """A committablectx object provides common functionality for a context that
1155 wants the ability to commit, e.g. workingctx or memctx."""
1160 wants the ability to commit, e.g. workingctx or memctx."""
1156 def __init__(self, repo, text="", user=None, date=None, extra=None,
1161 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 changes=None):
1162 changes=None):
1158 self._repo = repo
1163 self._repo = repo
1159 self._rev = None
1164 self._rev = None
1160 self._node = None
1165 self._node = None
1161 self._text = text
1166 self._text = text
1162 if date:
1167 if date:
1163 self._date = util.parsedate(date)
1168 self._date = util.parsedate(date)
1164 if user:
1169 if user:
1165 self._user = user
1170 self._user = user
1166 if changes:
1171 if changes:
1167 self._status = changes
1172 self._status = changes
1168
1173
1169 self._extra = {}
1174 self._extra = {}
1170 if extra:
1175 if extra:
1171 self._extra = extra.copy()
1176 self._extra = extra.copy()
1172 if 'branch' not in self._extra:
1177 if 'branch' not in self._extra:
1173 try:
1178 try:
1174 branch = encoding.fromlocal(self._repo.dirstate.branch())
1179 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 except UnicodeDecodeError:
1180 except UnicodeDecodeError:
1176 raise error.Abort(_('branch name not in UTF-8!'))
1181 raise error.Abort(_('branch name not in UTF-8!'))
1177 self._extra['branch'] = branch
1182 self._extra['branch'] = branch
1178 if self._extra['branch'] == '':
1183 if self._extra['branch'] == '':
1179 self._extra['branch'] = 'default'
1184 self._extra['branch'] = 'default'
1180
1185
1181 def __str__(self):
1186 def __str__(self):
1182 return str(self._parents[0]) + "+"
1187 return str(self._parents[0]) + "+"
1183
1188
1184 def __nonzero__(self):
1189 def __nonzero__(self):
1185 return True
1190 return True
1186
1191
1187 def _buildflagfunc(self):
1192 def _buildflagfunc(self):
1188 # Create a fallback function for getting file flags when the
1193 # Create a fallback function for getting file flags when the
1189 # filesystem doesn't support them
1194 # filesystem doesn't support them
1190
1195
1191 copiesget = self._repo.dirstate.copies().get
1196 copiesget = self._repo.dirstate.copies().get
1192 parents = self.parents()
1197 parents = self.parents()
1193 if len(parents) < 2:
1198 if len(parents) < 2:
1194 # when we have one parent, it's easy: copy from parent
1199 # when we have one parent, it's easy: copy from parent
1195 man = parents[0].manifest()
1200 man = parents[0].manifest()
1196 def func(f):
1201 def func(f):
1197 f = copiesget(f, f)
1202 f = copiesget(f, f)
1198 return man.flags(f)
1203 return man.flags(f)
1199 else:
1204 else:
1200 # merges are tricky: we try to reconstruct the unstored
1205 # merges are tricky: we try to reconstruct the unstored
1201 # result from the merge (issue1802)
1206 # result from the merge (issue1802)
1202 p1, p2 = parents
1207 p1, p2 = parents
1203 pa = p1.ancestor(p2)
1208 pa = p1.ancestor(p2)
1204 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1209 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205
1210
1206 def func(f):
1211 def func(f):
1207 f = copiesget(f, f) # may be wrong for merges with copies
1212 f = copiesget(f, f) # may be wrong for merges with copies
1208 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1213 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 if fl1 == fl2:
1214 if fl1 == fl2:
1210 return fl1
1215 return fl1
1211 if fl1 == fla:
1216 if fl1 == fla:
1212 return fl2
1217 return fl2
1213 if fl2 == fla:
1218 if fl2 == fla:
1214 return fl1
1219 return fl1
1215 return '' # punt for conflicts
1220 return '' # punt for conflicts
1216
1221
1217 return func
1222 return func
1218
1223
1219 @propertycache
1224 @propertycache
1220 def _flagfunc(self):
1225 def _flagfunc(self):
1221 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1226 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222
1227
1223 @propertycache
1228 @propertycache
1224 def _manifest(self):
1229 def _manifest(self):
1225 """generate a manifest corresponding to the values in self._status
1230 """generate a manifest corresponding to the values in self._status
1226
1231
1227 This reuse the file nodeid from parent, but we append an extra letter
1232 This reuse the file nodeid from parent, but we append an extra letter
1228 when modified. Modified files get an extra 'm' while added files get
1233 when modified. Modified files get an extra 'm' while added files get
1229 an extra 'a'. This is used by manifests merge to see that files
1234 an extra 'a'. This is used by manifests merge to see that files
1230 are different and by update logic to avoid deleting newly added files.
1235 are different and by update logic to avoid deleting newly added files.
1231 """
1236 """
1232 parents = self.parents()
1237 parents = self.parents()
1233
1238
1234 man1 = parents[0].manifest()
1239 man1 = parents[0].manifest()
1235 man = man1.copy()
1240 man = man1.copy()
1236 if len(parents) > 1:
1241 if len(parents) > 1:
1237 man2 = self.p2().manifest()
1242 man2 = self.p2().manifest()
1238 def getman(f):
1243 def getman(f):
1239 if f in man1:
1244 if f in man1:
1240 return man1
1245 return man1
1241 return man2
1246 return man2
1242 else:
1247 else:
1243 getman = lambda f: man1
1248 getman = lambda f: man1
1244
1249
1245 copied = self._repo.dirstate.copies()
1250 copied = self._repo.dirstate.copies()
1246 ff = self._flagfunc
1251 ff = self._flagfunc
1247 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1252 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 for f in l:
1253 for f in l:
1249 orig = copied.get(f, f)
1254 orig = copied.get(f, f)
1250 man[f] = getman(orig).get(orig, nullid) + i
1255 man[f] = getman(orig).get(orig, nullid) + i
1251 try:
1256 try:
1252 man.setflag(f, ff(f))
1257 man.setflag(f, ff(f))
1253 except OSError:
1258 except OSError:
1254 pass
1259 pass
1255
1260
1256 for f in self._status.deleted + self._status.removed:
1261 for f in self._status.deleted + self._status.removed:
1257 if f in man:
1262 if f in man:
1258 del man[f]
1263 del man[f]
1259
1264
1260 return man
1265 return man
1261
1266
1262 @propertycache
1267 @propertycache
1263 def _status(self):
1268 def _status(self):
1264 return self._repo.status()
1269 return self._repo.status()
1265
1270
1266 @propertycache
1271 @propertycache
1267 def _user(self):
1272 def _user(self):
1268 return self._repo.ui.username()
1273 return self._repo.ui.username()
1269
1274
1270 @propertycache
1275 @propertycache
1271 def _date(self):
1276 def _date(self):
1272 return util.makedate()
1277 return util.makedate()
1273
1278
1274 def subrev(self, subpath):
1279 def subrev(self, subpath):
1275 return None
1280 return None
1276
1281
1277 def manifestnode(self):
1282 def manifestnode(self):
1278 return None
1283 return None
1279 def user(self):
1284 def user(self):
1280 return self._user or self._repo.ui.username()
1285 return self._user or self._repo.ui.username()
1281 def date(self):
1286 def date(self):
1282 return self._date
1287 return self._date
1283 def description(self):
1288 def description(self):
1284 return self._text
1289 return self._text
1285 def files(self):
1290 def files(self):
1286 return sorted(self._status.modified + self._status.added +
1291 return sorted(self._status.modified + self._status.added +
1287 self._status.removed)
1292 self._status.removed)
1288
1293
1289 def modified(self):
1294 def modified(self):
1290 return self._status.modified
1295 return self._status.modified
1291 def added(self):
1296 def added(self):
1292 return self._status.added
1297 return self._status.added
1293 def removed(self):
1298 def removed(self):
1294 return self._status.removed
1299 return self._status.removed
1295 def deleted(self):
1300 def deleted(self):
1296 return self._status.deleted
1301 return self._status.deleted
1297 def branch(self):
1302 def branch(self):
1298 return encoding.tolocal(self._extra['branch'])
1303 return encoding.tolocal(self._extra['branch'])
1299 def closesbranch(self):
1304 def closesbranch(self):
1300 return 'close' in self._extra
1305 return 'close' in self._extra
1301 def extra(self):
1306 def extra(self):
1302 return self._extra
1307 return self._extra
1303
1308
1304 def tags(self):
1309 def tags(self):
1305 return []
1310 return []
1306
1311
1307 def bookmarks(self):
1312 def bookmarks(self):
1308 b = []
1313 b = []
1309 for p in self.parents():
1314 for p in self.parents():
1310 b.extend(p.bookmarks())
1315 b.extend(p.bookmarks())
1311 return b
1316 return b
1312
1317
1313 def phase(self):
1318 def phase(self):
1314 phase = phases.draft # default phase to draft
1319 phase = phases.draft # default phase to draft
1315 for p in self.parents():
1320 for p in self.parents():
1316 phase = max(phase, p.phase())
1321 phase = max(phase, p.phase())
1317 return phase
1322 return phase
1318
1323
1319 def hidden(self):
1324 def hidden(self):
1320 return False
1325 return False
1321
1326
1322 def children(self):
1327 def children(self):
1323 return []
1328 return []
1324
1329
1325 def flags(self, path):
1330 def flags(self, path):
1326 if '_manifest' in self.__dict__:
1331 if '_manifest' in self.__dict__:
1327 try:
1332 try:
1328 return self._manifest.flags(path)
1333 return self._manifest.flags(path)
1329 except KeyError:
1334 except KeyError:
1330 return ''
1335 return ''
1331
1336
1332 try:
1337 try:
1333 return self._flagfunc(path)
1338 return self._flagfunc(path)
1334 except OSError:
1339 except OSError:
1335 return ''
1340 return ''
1336
1341
1337 def ancestor(self, c2):
1342 def ancestor(self, c2):
1338 """return the "best" ancestor context of self and c2"""
1343 """return the "best" ancestor context of self and c2"""
1339 return self._parents[0].ancestor(c2) # punt on two parents for now
1344 return self._parents[0].ancestor(c2) # punt on two parents for now
1340
1345
1341 def walk(self, match):
1346 def walk(self, match):
1342 '''Generates matching file names.'''
1347 '''Generates matching file names.'''
1343 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1348 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 True, False))
1349 True, False))
1345
1350
1346 def matches(self, match):
1351 def matches(self, match):
1347 return sorted(self._repo.dirstate.matches(match))
1352 return sorted(self._repo.dirstate.matches(match))
1348
1353
1349 def ancestors(self):
1354 def ancestors(self):
1350 for p in self._parents:
1355 for p in self._parents:
1351 yield p
1356 yield p
1352 for a in self._repo.changelog.ancestors(
1357 for a in self._repo.changelog.ancestors(
1353 [p.rev() for p in self._parents]):
1358 [p.rev() for p in self._parents]):
1354 yield changectx(self._repo, a)
1359 yield changectx(self._repo, a)
1355
1360
1356 def markcommitted(self, node):
1361 def markcommitted(self, node):
1357 """Perform post-commit cleanup necessary after committing this ctx
1362 """Perform post-commit cleanup necessary after committing this ctx
1358
1363
1359 Specifically, this updates backing stores this working context
1364 Specifically, this updates backing stores this working context
1360 wraps to reflect the fact that the changes reflected by this
1365 wraps to reflect the fact that the changes reflected by this
1361 workingctx have been committed. For example, it marks
1366 workingctx have been committed. For example, it marks
1362 modified and added files as normal in the dirstate.
1367 modified and added files as normal in the dirstate.
1363
1368
1364 """
1369 """
1365
1370
1366 self._repo.dirstate.beginparentchange()
1371 self._repo.dirstate.beginparentchange()
1367 for f in self.modified() + self.added():
1372 for f in self.modified() + self.added():
1368 self._repo.dirstate.normal(f)
1373 self._repo.dirstate.normal(f)
1369 for f in self.removed():
1374 for f in self.removed():
1370 self._repo.dirstate.drop(f)
1375 self._repo.dirstate.drop(f)
1371 self._repo.dirstate.setparents(node)
1376 self._repo.dirstate.setparents(node)
1372 self._repo.dirstate.endparentchange()
1377 self._repo.dirstate.endparentchange()
1373
1378
1374 # write changes out explicitly, because nesting wlock at
1379 # write changes out explicitly, because nesting wlock at
1375 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1380 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 # from immediately doing so for subsequent changing files
1381 # from immediately doing so for subsequent changing files
1377 self._repo.dirstate.write(self._repo.currenttransaction())
1382 self._repo.dirstate.write(self._repo.currenttransaction())
1378
1383
1379 class workingctx(committablectx):
1384 class workingctx(committablectx):
1380 """A workingctx object makes access to data related to
1385 """A workingctx object makes access to data related to
1381 the current working directory convenient.
1386 the current working directory convenient.
1382 date - any valid date string or (unixtime, offset), or None.
1387 date - any valid date string or (unixtime, offset), or None.
1383 user - username string, or None.
1388 user - username string, or None.
1384 extra - a dictionary of extra values, or None.
1389 extra - a dictionary of extra values, or None.
1385 changes - a list of file lists as returned by localrepo.status()
1390 changes - a list of file lists as returned by localrepo.status()
1386 or None to use the repository status.
1391 or None to use the repository status.
1387 """
1392 """
1388 def __init__(self, repo, text="", user=None, date=None, extra=None,
1393 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 changes=None):
1394 changes=None):
1390 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1395 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391
1396
1392 def __iter__(self):
1397 def __iter__(self):
1393 d = self._repo.dirstate
1398 d = self._repo.dirstate
1394 for f in d:
1399 for f in d:
1395 if d[f] != 'r':
1400 if d[f] != 'r':
1396 yield f
1401 yield f
1397
1402
1398 def __contains__(self, key):
1403 def __contains__(self, key):
1399 return self._repo.dirstate[key] not in "?r"
1404 return self._repo.dirstate[key] not in "?r"
1400
1405
1401 def hex(self):
1406 def hex(self):
1402 return hex(wdirid)
1407 return hex(wdirid)
1403
1408
1404 @propertycache
1409 @propertycache
1405 def _parents(self):
1410 def _parents(self):
1406 p = self._repo.dirstate.parents()
1411 p = self._repo.dirstate.parents()
1407 if p[1] == nullid:
1412 if p[1] == nullid:
1408 p = p[:-1]
1413 p = p[:-1]
1409 return [changectx(self._repo, x) for x in p]
1414 return [changectx(self._repo, x) for x in p]
1410
1415
1411 def filectx(self, path, filelog=None):
1416 def filectx(self, path, filelog=None):
1412 """get a file context from the working directory"""
1417 """get a file context from the working directory"""
1413 return workingfilectx(self._repo, path, workingctx=self,
1418 return workingfilectx(self._repo, path, workingctx=self,
1414 filelog=filelog)
1419 filelog=filelog)
1415
1420
1416 def dirty(self, missing=False, merge=True, branch=True):
1421 def dirty(self, missing=False, merge=True, branch=True):
1417 "check whether a working directory is modified"
1422 "check whether a working directory is modified"
1418 # check subrepos first
1423 # check subrepos first
1419 for s in sorted(self.substate):
1424 for s in sorted(self.substate):
1420 if self.sub(s).dirty():
1425 if self.sub(s).dirty():
1421 return True
1426 return True
1422 # check current working dir
1427 # check current working dir
1423 return ((merge and self.p2()) or
1428 return ((merge and self.p2()) or
1424 (branch and self.branch() != self.p1().branch()) or
1429 (branch and self.branch() != self.p1().branch()) or
1425 self.modified() or self.added() or self.removed() or
1430 self.modified() or self.added() or self.removed() or
1426 (missing and self.deleted()))
1431 (missing and self.deleted()))
1427
1432
1428 def add(self, list, prefix=""):
1433 def add(self, list, prefix=""):
1429 join = lambda f: os.path.join(prefix, f)
1434 join = lambda f: os.path.join(prefix, f)
1430 with self._repo.wlock():
1435 with self._repo.wlock():
1431 ui, ds = self._repo.ui, self._repo.dirstate
1436 ui, ds = self._repo.ui, self._repo.dirstate
1432 rejected = []
1437 rejected = []
1433 lstat = self._repo.wvfs.lstat
1438 lstat = self._repo.wvfs.lstat
1434 for f in list:
1439 for f in list:
1435 scmutil.checkportable(ui, join(f))
1440 scmutil.checkportable(ui, join(f))
1436 try:
1441 try:
1437 st = lstat(f)
1442 st = lstat(f)
1438 except OSError:
1443 except OSError:
1439 ui.warn(_("%s does not exist!\n") % join(f))
1444 ui.warn(_("%s does not exist!\n") % join(f))
1440 rejected.append(f)
1445 rejected.append(f)
1441 continue
1446 continue
1442 if st.st_size > 10000000:
1447 if st.st_size > 10000000:
1443 ui.warn(_("%s: up to %d MB of RAM may be required "
1448 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 "to manage this file\n"
1449 "to manage this file\n"
1445 "(use 'hg revert %s' to cancel the "
1450 "(use 'hg revert %s' to cancel the "
1446 "pending addition)\n")
1451 "pending addition)\n")
1447 % (f, 3 * st.st_size // 1000000, join(f)))
1452 % (f, 3 * st.st_size // 1000000, join(f)))
1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1453 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 ui.warn(_("%s not added: only files and symlinks "
1454 ui.warn(_("%s not added: only files and symlinks "
1450 "supported currently\n") % join(f))
1455 "supported currently\n") % join(f))
1451 rejected.append(f)
1456 rejected.append(f)
1452 elif ds[f] in 'amn':
1457 elif ds[f] in 'amn':
1453 ui.warn(_("%s already tracked!\n") % join(f))
1458 ui.warn(_("%s already tracked!\n") % join(f))
1454 elif ds[f] == 'r':
1459 elif ds[f] == 'r':
1455 ds.normallookup(f)
1460 ds.normallookup(f)
1456 else:
1461 else:
1457 ds.add(f)
1462 ds.add(f)
1458 return rejected
1463 return rejected
1459
1464
1460 def forget(self, files, prefix=""):
1465 def forget(self, files, prefix=""):
1461 join = lambda f: os.path.join(prefix, f)
1466 join = lambda f: os.path.join(prefix, f)
1462 with self._repo.wlock():
1467 with self._repo.wlock():
1463 rejected = []
1468 rejected = []
1464 for f in files:
1469 for f in files:
1465 if f not in self._repo.dirstate:
1470 if f not in self._repo.dirstate:
1466 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1471 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 rejected.append(f)
1472 rejected.append(f)
1468 elif self._repo.dirstate[f] != 'a':
1473 elif self._repo.dirstate[f] != 'a':
1469 self._repo.dirstate.remove(f)
1474 self._repo.dirstate.remove(f)
1470 else:
1475 else:
1471 self._repo.dirstate.drop(f)
1476 self._repo.dirstate.drop(f)
1472 return rejected
1477 return rejected
1473
1478
1474 def undelete(self, list):
1479 def undelete(self, list):
1475 pctxs = self.parents()
1480 pctxs = self.parents()
1476 with self._repo.wlock():
1481 with self._repo.wlock():
1477 for f in list:
1482 for f in list:
1478 if self._repo.dirstate[f] != 'r':
1483 if self._repo.dirstate[f] != 'r':
1479 self._repo.ui.warn(_("%s not removed!\n") % f)
1484 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 else:
1485 else:
1481 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1486 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 t = fctx.data()
1487 t = fctx.data()
1483 self._repo.wwrite(f, t, fctx.flags())
1488 self._repo.wwrite(f, t, fctx.flags())
1484 self._repo.dirstate.normal(f)
1489 self._repo.dirstate.normal(f)
1485
1490
1486 def copy(self, source, dest):
1491 def copy(self, source, dest):
1487 try:
1492 try:
1488 st = self._repo.wvfs.lstat(dest)
1493 st = self._repo.wvfs.lstat(dest)
1489 except OSError as err:
1494 except OSError as err:
1490 if err.errno != errno.ENOENT:
1495 if err.errno != errno.ENOENT:
1491 raise
1496 raise
1492 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1497 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 return
1498 return
1494 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1499 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1500 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 "symbolic link\n") % dest)
1501 "symbolic link\n") % dest)
1497 else:
1502 else:
1498 with self._repo.wlock():
1503 with self._repo.wlock():
1499 if self._repo.dirstate[dest] in '?':
1504 if self._repo.dirstate[dest] in '?':
1500 self._repo.dirstate.add(dest)
1505 self._repo.dirstate.add(dest)
1501 elif self._repo.dirstate[dest] in 'r':
1506 elif self._repo.dirstate[dest] in 'r':
1502 self._repo.dirstate.normallookup(dest)
1507 self._repo.dirstate.normallookup(dest)
1503 self._repo.dirstate.copy(source, dest)
1508 self._repo.dirstate.copy(source, dest)
1504
1509
1505 def match(self, pats=[], include=None, exclude=None, default='glob',
1510 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 listsubrepos=False, badfn=None):
1511 listsubrepos=False, badfn=None):
1507 r = self._repo
1512 r = self._repo
1508
1513
1509 # Only a case insensitive filesystem needs magic to translate user input
1514 # Only a case insensitive filesystem needs magic to translate user input
1510 # to actual case in the filesystem.
1515 # to actual case in the filesystem.
1511 if not util.fscasesensitive(r.root):
1516 if not util.fscasesensitive(r.root):
1512 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1517 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 exclude, default, r.auditor, self,
1518 exclude, default, r.auditor, self,
1514 listsubrepos=listsubrepos,
1519 listsubrepos=listsubrepos,
1515 badfn=badfn)
1520 badfn=badfn)
1516 return matchmod.match(r.root, r.getcwd(), pats,
1521 return matchmod.match(r.root, r.getcwd(), pats,
1517 include, exclude, default,
1522 include, exclude, default,
1518 auditor=r.auditor, ctx=self,
1523 auditor=r.auditor, ctx=self,
1519 listsubrepos=listsubrepos, badfn=badfn)
1524 listsubrepos=listsubrepos, badfn=badfn)
1520
1525
1521 def _filtersuspectsymlink(self, files):
1526 def _filtersuspectsymlink(self, files):
1522 if not files or self._repo.dirstate._checklink:
1527 if not files or self._repo.dirstate._checklink:
1523 return files
1528 return files
1524
1529
1525 # Symlink placeholders may get non-symlink-like contents
1530 # Symlink placeholders may get non-symlink-like contents
1526 # via user error or dereferencing by NFS or Samba servers,
1531 # via user error or dereferencing by NFS or Samba servers,
1527 # so we filter out any placeholders that don't look like a
1532 # so we filter out any placeholders that don't look like a
1528 # symlink
1533 # symlink
1529 sane = []
1534 sane = []
1530 for f in files:
1535 for f in files:
1531 if self.flags(f) == 'l':
1536 if self.flags(f) == 'l':
1532 d = self[f].data()
1537 d = self[f].data()
1533 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1538 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 self._repo.ui.debug('ignoring suspect symlink placeholder'
1539 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 ' "%s"\n' % f)
1540 ' "%s"\n' % f)
1536 continue
1541 continue
1537 sane.append(f)
1542 sane.append(f)
1538 return sane
1543 return sane
1539
1544
1540 def _checklookup(self, files):
1545 def _checklookup(self, files):
1541 # check for any possibly clean files
1546 # check for any possibly clean files
1542 if not files:
1547 if not files:
1543 return [], []
1548 return [], []
1544
1549
1545 modified = []
1550 modified = []
1546 fixup = []
1551 fixup = []
1547 pctx = self._parents[0]
1552 pctx = self._parents[0]
1548 # do a full compare of any files that might have changed
1553 # do a full compare of any files that might have changed
1549 for f in sorted(files):
1554 for f in sorted(files):
1550 if (f not in pctx or self.flags(f) != pctx.flags(f)
1555 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 or pctx[f].cmp(self[f])):
1556 or pctx[f].cmp(self[f])):
1552 modified.append(f)
1557 modified.append(f)
1553 else:
1558 else:
1554 fixup.append(f)
1559 fixup.append(f)
1555
1560
1556 # update dirstate for files that are actually clean
1561 # update dirstate for files that are actually clean
1557 if fixup:
1562 if fixup:
1558 try:
1563 try:
1559 # updating the dirstate is optional
1564 # updating the dirstate is optional
1560 # so we don't wait on the lock
1565 # so we don't wait on the lock
1561 # wlock can invalidate the dirstate, so cache normal _after_
1566 # wlock can invalidate the dirstate, so cache normal _after_
1562 # taking the lock
1567 # taking the lock
1563 with self._repo.wlock(False):
1568 with self._repo.wlock(False):
1564 normal = self._repo.dirstate.normal
1569 normal = self._repo.dirstate.normal
1565 for f in fixup:
1570 for f in fixup:
1566 normal(f)
1571 normal(f)
1567 # write changes out explicitly, because nesting
1572 # write changes out explicitly, because nesting
1568 # wlock at runtime may prevent 'wlock.release()'
1573 # wlock at runtime may prevent 'wlock.release()'
1569 # after this block from doing so for subsequent
1574 # after this block from doing so for subsequent
1570 # changing files
1575 # changing files
1571 self._repo.dirstate.write(self._repo.currenttransaction())
1576 self._repo.dirstate.write(self._repo.currenttransaction())
1572 except error.LockError:
1577 except error.LockError:
1573 pass
1578 pass
1574 return modified, fixup
1579 return modified, fixup
1575
1580
1576 def _manifestmatches(self, match, s):
1581 def _manifestmatches(self, match, s):
1577 """Slow path for workingctx
1582 """Slow path for workingctx
1578
1583
1579 The fast path is when we compare the working directory to its parent
1584 The fast path is when we compare the working directory to its parent
1580 which means this function is comparing with a non-parent; therefore we
1585 which means this function is comparing with a non-parent; therefore we
1581 need to build a manifest and return what matches.
1586 need to build a manifest and return what matches.
1582 """
1587 """
1583 mf = self._repo['.']._manifestmatches(match, s)
1588 mf = self._repo['.']._manifestmatches(match, s)
1584 for f in s.modified + s.added:
1589 for f in s.modified + s.added:
1585 mf[f] = _newnode
1590 mf[f] = _newnode
1586 mf.setflag(f, self.flags(f))
1591 mf.setflag(f, self.flags(f))
1587 for f in s.removed:
1592 for f in s.removed:
1588 if f in mf:
1593 if f in mf:
1589 del mf[f]
1594 del mf[f]
1590 return mf
1595 return mf
1591
1596
1592 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1597 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 unknown=False):
1598 unknown=False):
1594 '''Gets the status from the dirstate -- internal use only.'''
1599 '''Gets the status from the dirstate -- internal use only.'''
1595 listignored, listclean, listunknown = ignored, clean, unknown
1600 listignored, listclean, listunknown = ignored, clean, unknown
1596 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1601 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 subrepos = []
1602 subrepos = []
1598 if '.hgsub' in self:
1603 if '.hgsub' in self:
1599 subrepos = sorted(self.substate)
1604 subrepos = sorted(self.substate)
1600 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1605 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 listclean, listunknown)
1606 listclean, listunknown)
1602
1607
1603 # check for any possibly clean files
1608 # check for any possibly clean files
1604 if cmp:
1609 if cmp:
1605 modified2, fixup = self._checklookup(cmp)
1610 modified2, fixup = self._checklookup(cmp)
1606 s.modified.extend(modified2)
1611 s.modified.extend(modified2)
1607
1612
1608 # update dirstate for files that are actually clean
1613 # update dirstate for files that are actually clean
1609 if fixup and listclean:
1614 if fixup and listclean:
1610 s.clean.extend(fixup)
1615 s.clean.extend(fixup)
1611
1616
1612 if match.always():
1617 if match.always():
1613 # cache for performance
1618 # cache for performance
1614 if s.unknown or s.ignored or s.clean:
1619 if s.unknown or s.ignored or s.clean:
1615 # "_status" is cached with list*=False in the normal route
1620 # "_status" is cached with list*=False in the normal route
1616 self._status = scmutil.status(s.modified, s.added, s.removed,
1621 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 s.deleted, [], [], [])
1622 s.deleted, [], [], [])
1618 else:
1623 else:
1619 self._status = s
1624 self._status = s
1620
1625
1621 return s
1626 return s
1622
1627
1623 def _buildstatus(self, other, s, match, listignored, listclean,
1628 def _buildstatus(self, other, s, match, listignored, listclean,
1624 listunknown):
1629 listunknown):
1625 """build a status with respect to another context
1630 """build a status with respect to another context
1626
1631
1627 This includes logic for maintaining the fast path of status when
1632 This includes logic for maintaining the fast path of status when
1628 comparing the working directory against its parent, which is to skip
1633 comparing the working directory against its parent, which is to skip
1629 building a new manifest if self (working directory) is not comparing
1634 building a new manifest if self (working directory) is not comparing
1630 against its parent (repo['.']).
1635 against its parent (repo['.']).
1631 """
1636 """
1632 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1637 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1638 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 # might have accidentally ended up with the entire contents of the file
1639 # might have accidentally ended up with the entire contents of the file
1635 # they are supposed to be linking to.
1640 # they are supposed to be linking to.
1636 s.modified[:] = self._filtersuspectsymlink(s.modified)
1641 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 if other != self._repo['.']:
1642 if other != self._repo['.']:
1638 s = super(workingctx, self)._buildstatus(other, s, match,
1643 s = super(workingctx, self)._buildstatus(other, s, match,
1639 listignored, listclean,
1644 listignored, listclean,
1640 listunknown)
1645 listunknown)
1641 return s
1646 return s
1642
1647
1643 def _matchstatus(self, other, match):
1648 def _matchstatus(self, other, match):
1644 """override the match method with a filter for directory patterns
1649 """override the match method with a filter for directory patterns
1645
1650
1646 We use inheritance to customize the match.bad method only in cases of
1651 We use inheritance to customize the match.bad method only in cases of
1647 workingctx since it belongs only to the working directory when
1652 workingctx since it belongs only to the working directory when
1648 comparing against the parent changeset.
1653 comparing against the parent changeset.
1649
1654
1650 If we aren't comparing against the working directory's parent, then we
1655 If we aren't comparing against the working directory's parent, then we
1651 just use the default match object sent to us.
1656 just use the default match object sent to us.
1652 """
1657 """
1653 superself = super(workingctx, self)
1658 superself = super(workingctx, self)
1654 match = superself._matchstatus(other, match)
1659 match = superself._matchstatus(other, match)
1655 if other != self._repo['.']:
1660 if other != self._repo['.']:
1656 def bad(f, msg):
1661 def bad(f, msg):
1657 # 'f' may be a directory pattern from 'match.files()',
1662 # 'f' may be a directory pattern from 'match.files()',
1658 # so 'f not in ctx1' is not enough
1663 # so 'f not in ctx1' is not enough
1659 if f not in other and not other.hasdir(f):
1664 if f not in other and not other.hasdir(f):
1660 self._repo.ui.warn('%s: %s\n' %
1665 self._repo.ui.warn('%s: %s\n' %
1661 (self._repo.dirstate.pathto(f), msg))
1666 (self._repo.dirstate.pathto(f), msg))
1662 match.bad = bad
1667 match.bad = bad
1663 return match
1668 return match
1664
1669
1665 class committablefilectx(basefilectx):
1670 class committablefilectx(basefilectx):
1666 """A committablefilectx provides common functionality for a file context
1671 """A committablefilectx provides common functionality for a file context
1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1672 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 def __init__(self, repo, path, filelog=None, ctx=None):
1673 def __init__(self, repo, path, filelog=None, ctx=None):
1669 self._repo = repo
1674 self._repo = repo
1670 self._path = path
1675 self._path = path
1671 self._changeid = None
1676 self._changeid = None
1672 self._filerev = self._filenode = None
1677 self._filerev = self._filenode = None
1673
1678
1674 if filelog is not None:
1679 if filelog is not None:
1675 self._filelog = filelog
1680 self._filelog = filelog
1676 if ctx:
1681 if ctx:
1677 self._changectx = ctx
1682 self._changectx = ctx
1678
1683
1679 def __nonzero__(self):
1684 def __nonzero__(self):
1680 return True
1685 return True
1681
1686
1682 def linkrev(self):
1687 def linkrev(self):
1683 # linked to self._changectx no matter if file is modified or not
1688 # linked to self._changectx no matter if file is modified or not
1684 return self.rev()
1689 return self.rev()
1685
1690
1686 def parents(self):
1691 def parents(self):
1687 '''return parent filectxs, following copies if necessary'''
1692 '''return parent filectxs, following copies if necessary'''
1688 def filenode(ctx, path):
1693 def filenode(ctx, path):
1689 return ctx._manifest.get(path, nullid)
1694 return ctx._manifest.get(path, nullid)
1690
1695
1691 path = self._path
1696 path = self._path
1692 fl = self._filelog
1697 fl = self._filelog
1693 pcl = self._changectx._parents
1698 pcl = self._changectx._parents
1694 renamed = self.renamed()
1699 renamed = self.renamed()
1695
1700
1696 if renamed:
1701 if renamed:
1697 pl = [renamed + (None,)]
1702 pl = [renamed + (None,)]
1698 else:
1703 else:
1699 pl = [(path, filenode(pcl[0], path), fl)]
1704 pl = [(path, filenode(pcl[0], path), fl)]
1700
1705
1701 for pc in pcl[1:]:
1706 for pc in pcl[1:]:
1702 pl.append((path, filenode(pc, path), fl))
1707 pl.append((path, filenode(pc, path), fl))
1703
1708
1704 return [self._parentfilectx(p, fileid=n, filelog=l)
1709 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 for p, n, l in pl if n != nullid]
1710 for p, n, l in pl if n != nullid]
1706
1711
1707 def children(self):
1712 def children(self):
1708 return []
1713 return []
1709
1714
1710 class workingfilectx(committablefilectx):
1715 class workingfilectx(committablefilectx):
1711 """A workingfilectx object makes access to data related to a particular
1716 """A workingfilectx object makes access to data related to a particular
1712 file in the working directory convenient."""
1717 file in the working directory convenient."""
1713 def __init__(self, repo, path, filelog=None, workingctx=None):
1718 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1719 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715
1720
1716 @propertycache
1721 @propertycache
1717 def _changectx(self):
1722 def _changectx(self):
1718 return workingctx(self._repo)
1723 return workingctx(self._repo)
1719
1724
1720 def data(self):
1725 def data(self):
1721 return self._repo.wread(self._path)
1726 return self._repo.wread(self._path)
1722 def renamed(self):
1727 def renamed(self):
1723 rp = self._repo.dirstate.copied(self._path)
1728 rp = self._repo.dirstate.copied(self._path)
1724 if not rp:
1729 if not rp:
1725 return None
1730 return None
1726 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1731 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727
1732
1728 def size(self):
1733 def size(self):
1729 return self._repo.wvfs.lstat(self._path).st_size
1734 return self._repo.wvfs.lstat(self._path).st_size
1730 def date(self):
1735 def date(self):
1731 t, tz = self._changectx.date()
1736 t, tz = self._changectx.date()
1732 try:
1737 try:
1733 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1738 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 except OSError as err:
1739 except OSError as err:
1735 if err.errno != errno.ENOENT:
1740 if err.errno != errno.ENOENT:
1736 raise
1741 raise
1737 return (t, tz)
1742 return (t, tz)
1738
1743
1739 def cmp(self, fctx):
1744 def cmp(self, fctx):
1740 """compare with other file context
1745 """compare with other file context
1741
1746
1742 returns True if different than fctx.
1747 returns True if different than fctx.
1743 """
1748 """
1744 # fctx should be a filectx (not a workingfilectx)
1749 # fctx should be a filectx (not a workingfilectx)
1745 # invert comparison to reuse the same code path
1750 # invert comparison to reuse the same code path
1746 return fctx.cmp(self)
1751 return fctx.cmp(self)
1747
1752
1748 def remove(self, ignoremissing=False):
1753 def remove(self, ignoremissing=False):
1749 """wraps unlink for a repo's working directory"""
1754 """wraps unlink for a repo's working directory"""
1750 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1755 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751
1756
1752 def write(self, data, flags):
1757 def write(self, data, flags):
1753 """wraps repo.wwrite"""
1758 """wraps repo.wwrite"""
1754 self._repo.wwrite(self._path, data, flags)
1759 self._repo.wwrite(self._path, data, flags)
1755
1760
1756 class workingcommitctx(workingctx):
1761 class workingcommitctx(workingctx):
1757 """A workingcommitctx object makes access to data related to
1762 """A workingcommitctx object makes access to data related to
1758 the revision being committed convenient.
1763 the revision being committed convenient.
1759
1764
1760 This hides changes in the working directory, if they aren't
1765 This hides changes in the working directory, if they aren't
1761 committed in this context.
1766 committed in this context.
1762 """
1767 """
1763 def __init__(self, repo, changes,
1768 def __init__(self, repo, changes,
1764 text="", user=None, date=None, extra=None):
1769 text="", user=None, date=None, extra=None):
1765 super(workingctx, self).__init__(repo, text, user, date, extra,
1770 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 changes)
1771 changes)
1767
1772
1768 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1773 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 unknown=False):
1774 unknown=False):
1770 """Return matched files only in ``self._status``
1775 """Return matched files only in ``self._status``
1771
1776
1772 Uncommitted files appear "clean" via this context, even if
1777 Uncommitted files appear "clean" via this context, even if
1773 they aren't actually so in the working directory.
1778 they aren't actually so in the working directory.
1774 """
1779 """
1775 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1780 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 if clean:
1781 if clean:
1777 clean = [f for f in self._manifest if f not in self._changedset]
1782 clean = [f for f in self._manifest if f not in self._changedset]
1778 else:
1783 else:
1779 clean = []
1784 clean = []
1780 return scmutil.status([f for f in self._status.modified if match(f)],
1785 return scmutil.status([f for f in self._status.modified if match(f)],
1781 [f for f in self._status.added if match(f)],
1786 [f for f in self._status.added if match(f)],
1782 [f for f in self._status.removed if match(f)],
1787 [f for f in self._status.removed if match(f)],
1783 [], [], [], clean)
1788 [], [], [], clean)
1784
1789
1785 @propertycache
1790 @propertycache
1786 def _changedset(self):
1791 def _changedset(self):
1787 """Return the set of files changed in this context
1792 """Return the set of files changed in this context
1788 """
1793 """
1789 changed = set(self._status.modified)
1794 changed = set(self._status.modified)
1790 changed.update(self._status.added)
1795 changed.update(self._status.added)
1791 changed.update(self._status.removed)
1796 changed.update(self._status.removed)
1792 return changed
1797 return changed
1793
1798
1794 def makecachingfilectxfn(func):
1799 def makecachingfilectxfn(func):
1795 """Create a filectxfn that caches based on the path.
1800 """Create a filectxfn that caches based on the path.
1796
1801
1797 We can't use util.cachefunc because it uses all arguments as the cache
1802 We can't use util.cachefunc because it uses all arguments as the cache
1798 key and this creates a cycle since the arguments include the repo and
1803 key and this creates a cycle since the arguments include the repo and
1799 memctx.
1804 memctx.
1800 """
1805 """
1801 cache = {}
1806 cache = {}
1802
1807
1803 def getfilectx(repo, memctx, path):
1808 def getfilectx(repo, memctx, path):
1804 if path not in cache:
1809 if path not in cache:
1805 cache[path] = func(repo, memctx, path)
1810 cache[path] = func(repo, memctx, path)
1806 return cache[path]
1811 return cache[path]
1807
1812
1808 return getfilectx
1813 return getfilectx
1809
1814
1810 class memctx(committablectx):
1815 class memctx(committablectx):
1811 """Use memctx to perform in-memory commits via localrepo.commitctx().
1816 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812
1817
1813 Revision information is supplied at initialization time while
1818 Revision information is supplied at initialization time while
1814 related files data and is made available through a callback
1819 related files data and is made available through a callback
1815 mechanism. 'repo' is the current localrepo, 'parents' is a
1820 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 sequence of two parent revisions identifiers (pass None for every
1821 sequence of two parent revisions identifiers (pass None for every
1817 missing parent), 'text' is the commit message and 'files' lists
1822 missing parent), 'text' is the commit message and 'files' lists
1818 names of files touched by the revision (normalized and relative to
1823 names of files touched by the revision (normalized and relative to
1819 repository root).
1824 repository root).
1820
1825
1821 filectxfn(repo, memctx, path) is a callable receiving the
1826 filectxfn(repo, memctx, path) is a callable receiving the
1822 repository, the current memctx object and the normalized path of
1827 repository, the current memctx object and the normalized path of
1823 requested file, relative to repository root. It is fired by the
1828 requested file, relative to repository root. It is fired by the
1824 commit function for every file in 'files', but calls order is
1829 commit function for every file in 'files', but calls order is
1825 undefined. If the file is available in the revision being
1830 undefined. If the file is available in the revision being
1826 committed (updated or added), filectxfn returns a memfilectx
1831 committed (updated or added), filectxfn returns a memfilectx
1827 object. If the file was removed, filectxfn raises an
1832 object. If the file was removed, filectxfn raises an
1828 IOError. Moved files are represented by marking the source file
1833 IOError. Moved files are represented by marking the source file
1829 removed and the new file added with copy information (see
1834 removed and the new file added with copy information (see
1830 memfilectx).
1835 memfilectx).
1831
1836
1832 user receives the committer name and defaults to current
1837 user receives the committer name and defaults to current
1833 repository username, date is the commit date in any format
1838 repository username, date is the commit date in any format
1834 supported by util.parsedate() and defaults to current date, extra
1839 supported by util.parsedate() and defaults to current date, extra
1835 is a dictionary of metadata or is left empty.
1840 is a dictionary of metadata or is left empty.
1836 """
1841 """
1837
1842
1838 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1843 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1844 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 # this field to determine what to do in filectxfn.
1845 # this field to determine what to do in filectxfn.
1841 _returnnoneformissingfiles = True
1846 _returnnoneformissingfiles = True
1842
1847
1843 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1848 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 date=None, extra=None, editor=False):
1849 date=None, extra=None, editor=False):
1845 super(memctx, self).__init__(repo, text, user, date, extra)
1850 super(memctx, self).__init__(repo, text, user, date, extra)
1846 self._rev = None
1851 self._rev = None
1847 self._node = None
1852 self._node = None
1848 parents = [(p or nullid) for p in parents]
1853 parents = [(p or nullid) for p in parents]
1849 p1, p2 = parents
1854 p1, p2 = parents
1850 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1855 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 files = sorted(set(files))
1856 files = sorted(set(files))
1852 self._files = files
1857 self._files = files
1853 self.substate = {}
1858 self.substate = {}
1854
1859
1855 # if store is not callable, wrap it in a function
1860 # if store is not callable, wrap it in a function
1856 if not callable(filectxfn):
1861 if not callable(filectxfn):
1857 def getfilectx(repo, memctx, path):
1862 def getfilectx(repo, memctx, path):
1858 fctx = filectxfn[path]
1863 fctx = filectxfn[path]
1859 # this is weird but apparently we only keep track of one parent
1864 # this is weird but apparently we only keep track of one parent
1860 # (why not only store that instead of a tuple?)
1865 # (why not only store that instead of a tuple?)
1861 copied = fctx.renamed()
1866 copied = fctx.renamed()
1862 if copied:
1867 if copied:
1863 copied = copied[0]
1868 copied = copied[0]
1864 return memfilectx(repo, path, fctx.data(),
1869 return memfilectx(repo, path, fctx.data(),
1865 islink=fctx.islink(), isexec=fctx.isexec(),
1870 islink=fctx.islink(), isexec=fctx.isexec(),
1866 copied=copied, memctx=memctx)
1871 copied=copied, memctx=memctx)
1867 self._filectxfn = getfilectx
1872 self._filectxfn = getfilectx
1868 else:
1873 else:
1869 # memoizing increases performance for e.g. vcs convert scenarios.
1874 # memoizing increases performance for e.g. vcs convert scenarios.
1870 self._filectxfn = makecachingfilectxfn(filectxfn)
1875 self._filectxfn = makecachingfilectxfn(filectxfn)
1871
1876
1872 if extra:
1877 if extra:
1873 self._extra = extra.copy()
1878 self._extra = extra.copy()
1874 else:
1879 else:
1875 self._extra = {}
1880 self._extra = {}
1876
1881
1877 if self._extra.get('branch', '') == '':
1882 if self._extra.get('branch', '') == '':
1878 self._extra['branch'] = 'default'
1883 self._extra['branch'] = 'default'
1879
1884
1880 if editor:
1885 if editor:
1881 self._text = editor(self._repo, self, [])
1886 self._text = editor(self._repo, self, [])
1882 self._repo.savecommitmessage(self._text)
1887 self._repo.savecommitmessage(self._text)
1883
1888
1884 def filectx(self, path, filelog=None):
1889 def filectx(self, path, filelog=None):
1885 """get a file context from the working directory
1890 """get a file context from the working directory
1886
1891
1887 Returns None if file doesn't exist and should be removed."""
1892 Returns None if file doesn't exist and should be removed."""
1888 return self._filectxfn(self._repo, self, path)
1893 return self._filectxfn(self._repo, self, path)
1889
1894
1890 def commit(self):
1895 def commit(self):
1891 """commit context to the repo"""
1896 """commit context to the repo"""
1892 return self._repo.commitctx(self)
1897 return self._repo.commitctx(self)
1893
1898
1894 @propertycache
1899 @propertycache
1895 def _manifest(self):
1900 def _manifest(self):
1896 """generate a manifest based on the return values of filectxfn"""
1901 """generate a manifest based on the return values of filectxfn"""
1897
1902
1898 # keep this simple for now; just worry about p1
1903 # keep this simple for now; just worry about p1
1899 pctx = self._parents[0]
1904 pctx = self._parents[0]
1900 man = pctx.manifest().copy()
1905 man = pctx.manifest().copy()
1901
1906
1902 for f in self._status.modified:
1907 for f in self._status.modified:
1903 p1node = nullid
1908 p1node = nullid
1904 p2node = nullid
1909 p2node = nullid
1905 p = pctx[f].parents() # if file isn't in pctx, check p2?
1910 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 if len(p) > 0:
1911 if len(p) > 0:
1907 p1node = p[0].filenode()
1912 p1node = p[0].filenode()
1908 if len(p) > 1:
1913 if len(p) > 1:
1909 p2node = p[1].filenode()
1914 p2node = p[1].filenode()
1910 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1915 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911
1916
1912 for f in self._status.added:
1917 for f in self._status.added:
1913 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1918 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914
1919
1915 for f in self._status.removed:
1920 for f in self._status.removed:
1916 if f in man:
1921 if f in man:
1917 del man[f]
1922 del man[f]
1918
1923
1919 return man
1924 return man
1920
1925
1921 @propertycache
1926 @propertycache
1922 def _status(self):
1927 def _status(self):
1923 """Calculate exact status from ``files`` specified at construction
1928 """Calculate exact status from ``files`` specified at construction
1924 """
1929 """
1925 man1 = self.p1().manifest()
1930 man1 = self.p1().manifest()
1926 p2 = self._parents[1]
1931 p2 = self._parents[1]
1927 # "1 < len(self._parents)" can't be used for checking
1932 # "1 < len(self._parents)" can't be used for checking
1928 # existence of the 2nd parent, because "memctx._parents" is
1933 # existence of the 2nd parent, because "memctx._parents" is
1929 # explicitly initialized by the list, of which length is 2.
1934 # explicitly initialized by the list, of which length is 2.
1930 if p2.node() != nullid:
1935 if p2.node() != nullid:
1931 man2 = p2.manifest()
1936 man2 = p2.manifest()
1932 managing = lambda f: f in man1 or f in man2
1937 managing = lambda f: f in man1 or f in man2
1933 else:
1938 else:
1934 managing = lambda f: f in man1
1939 managing = lambda f: f in man1
1935
1940
1936 modified, added, removed = [], [], []
1941 modified, added, removed = [], [], []
1937 for f in self._files:
1942 for f in self._files:
1938 if not managing(f):
1943 if not managing(f):
1939 added.append(f)
1944 added.append(f)
1940 elif self[f]:
1945 elif self[f]:
1941 modified.append(f)
1946 modified.append(f)
1942 else:
1947 else:
1943 removed.append(f)
1948 removed.append(f)
1944
1949
1945 return scmutil.status(modified, added, removed, [], [], [], [])
1950 return scmutil.status(modified, added, removed, [], [], [], [])
1946
1951
1947 class memfilectx(committablefilectx):
1952 class memfilectx(committablefilectx):
1948 """memfilectx represents an in-memory file to commit.
1953 """memfilectx represents an in-memory file to commit.
1949
1954
1950 See memctx and committablefilectx for more details.
1955 See memctx and committablefilectx for more details.
1951 """
1956 """
1952 def __init__(self, repo, path, data, islink=False,
1957 def __init__(self, repo, path, data, islink=False,
1953 isexec=False, copied=None, memctx=None):
1958 isexec=False, copied=None, memctx=None):
1954 """
1959 """
1955 path is the normalized file path relative to repository root.
1960 path is the normalized file path relative to repository root.
1956 data is the file content as a string.
1961 data is the file content as a string.
1957 islink is True if the file is a symbolic link.
1962 islink is True if the file is a symbolic link.
1958 isexec is True if the file is executable.
1963 isexec is True if the file is executable.
1959 copied is the source file path if current file was copied in the
1964 copied is the source file path if current file was copied in the
1960 revision being committed, or None."""
1965 revision being committed, or None."""
1961 super(memfilectx, self).__init__(repo, path, None, memctx)
1966 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 self._data = data
1967 self._data = data
1963 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1968 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 self._copied = None
1969 self._copied = None
1965 if copied:
1970 if copied:
1966 self._copied = (copied, nullid)
1971 self._copied = (copied, nullid)
1967
1972
1968 def data(self):
1973 def data(self):
1969 return self._data
1974 return self._data
1970 def size(self):
1975 def size(self):
1971 return len(self.data())
1976 return len(self.data())
1972 def flags(self):
1977 def flags(self):
1973 return self._flags
1978 return self._flags
1974 def renamed(self):
1979 def renamed(self):
1975 return self._copied
1980 return self._copied
1976
1981
1977 def remove(self, ignoremissing=False):
1982 def remove(self, ignoremissing=False):
1978 """wraps unlink for a repo's working directory"""
1983 """wraps unlink for a repo's working directory"""
1979 # need to figure out what to do here
1984 # need to figure out what to do here
1980 del self._changectx[self._path]
1985 del self._changectx[self._path]
1981
1986
1982 def write(self, data, flags):
1987 def write(self, data, flags):
1983 """wraps repo.wwrite"""
1988 """wraps repo.wwrite"""
1984 self._data = data
1989 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now