##// END OF EJS Templates
workingctx: use node.wdirid constant
Yuya Nishihara -
r25738:04d26a3c default
parent child Browse files
Show More
@@ -1,1919 +1,1919 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, wdirid, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 return iter(self._manifest)
69 return iter(self._manifest)
70
70
71 def _manifestmatches(self, match, s):
71 def _manifestmatches(self, match, s):
72 """generate a new manifest filtered by the match argument
72 """generate a new manifest filtered by the match argument
73
73
74 This method is for internal use only and mainly exists to provide an
74 This method is for internal use only and mainly exists to provide an
75 object oriented way for other contexts to customize the manifest
75 object oriented way for other contexts to customize the manifest
76 generation.
76 generation.
77 """
77 """
78 return self.manifest().matches(match)
78 return self.manifest().matches(match)
79
79
80 def _matchstatus(self, other, match):
80 def _matchstatus(self, other, match):
81 """return match.always if match is none
81 """return match.always if match is none
82
82
83 This internal method provides a way for child objects to override the
83 This internal method provides a way for child objects to override the
84 match operator.
84 match operator.
85 """
85 """
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87
87
88 def _buildstatus(self, other, s, match, listignored, listclean,
88 def _buildstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """build a status with respect to another context"""
90 """build a status with respect to another context"""
91 # Load earliest manifest first for caching reasons. More specifically,
91 # Load earliest manifest first for caching reasons. More specifically,
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # 1000 and cache it so that when you read 1001, we just need to apply a
94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # delta to what's in the cache. So that's one full reconstruction + one
95 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta application.
96 # delta application.
97 if self.rev() is not None and self.rev() < other.rev():
97 if self.rev() is not None and self.rev() < other.rev():
98 self.manifest()
98 self.manifest()
99 mf1 = other._manifestmatches(match, s)
99 mf1 = other._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
101
101
102 modified, added = [], []
102 modified, added = [], []
103 removed = []
103 removed = []
104 clean = []
104 clean = []
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deletedset = set(deleted)
106 deletedset = set(deleted)
107 d = mf1.diff(mf2, clean=listclean)
107 d = mf1.diff(mf2, clean=listclean)
108 for fn, value in d.iteritems():
108 for fn, value in d.iteritems():
109 if fn in deletedset:
109 if fn in deletedset:
110 continue
110 continue
111 if value is None:
111 if value is None:
112 clean.append(fn)
112 clean.append(fn)
113 continue
113 continue
114 (node1, flag1), (node2, flag2) = value
114 (node1, flag1), (node2, flag2) = value
115 if node1 is None:
115 if node1 is None:
116 added.append(fn)
116 added.append(fn)
117 elif node2 is None:
117 elif node2 is None:
118 removed.append(fn)
118 removed.append(fn)
119 elif node2 != _newnode:
119 elif node2 != _newnode:
120 # The file was not a new file in mf2, so an entry
120 # The file was not a new file in mf2, so an entry
121 # from diff is really a difference.
121 # from diff is really a difference.
122 modified.append(fn)
122 modified.append(fn)
123 elif self[fn].cmp(other[fn]):
123 elif self[fn].cmp(other[fn]):
124 # node2 was newnode, but the working file doesn't
124 # node2 was newnode, but the working file doesn't
125 # match the one in mf1.
125 # match the one in mf1.
126 modified.append(fn)
126 modified.append(fn)
127 else:
127 else:
128 clean.append(fn)
128 clean.append(fn)
129
129
130 if removed:
130 if removed:
131 # need to filter files if they are already reported as removed
131 # need to filter files if they are already reported as removed
132 unknown = [fn for fn in unknown if fn not in mf1]
132 unknown = [fn for fn in unknown if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
134 # if they're deleted, don't report them as removed
134 # if they're deleted, don't report them as removed
135 removed = [fn for fn in removed if fn not in deletedset]
135 removed = [fn for fn in removed if fn not in deletedset]
136
136
137 return scmutil.status(modified, added, removed, deleted, unknown,
137 return scmutil.status(modified, added, removed, deleted, unknown,
138 ignored, clean)
138 ignored, clean)
139
139
140 @propertycache
140 @propertycache
141 def substate(self):
141 def substate(self):
142 return subrepo.state(self, self._repo.ui)
142 return subrepo.state(self, self._repo.ui)
143
143
144 def subrev(self, subpath):
144 def subrev(self, subpath):
145 return self.substate[subpath][1]
145 return self.substate[subpath][1]
146
146
147 def rev(self):
147 def rev(self):
148 return self._rev
148 return self._rev
149 def node(self):
149 def node(self):
150 return self._node
150 return self._node
151 def hex(self):
151 def hex(self):
152 return hex(self.node())
152 return hex(self.node())
153 def manifest(self):
153 def manifest(self):
154 return self._manifest
154 return self._manifest
155 def repo(self):
155 def repo(self):
156 return self._repo
156 return self._repo
157 def phasestr(self):
157 def phasestr(self):
158 return phases.phasenames[self.phase()]
158 return phases.phasenames[self.phase()]
159 def mutable(self):
159 def mutable(self):
160 return self.phase() > phases.public
160 return self.phase() > phases.public
161
161
162 def getfileset(self, expr):
162 def getfileset(self, expr):
163 return fileset.getfileset(self, expr)
163 return fileset.getfileset(self, expr)
164
164
165 def obsolete(self):
165 def obsolete(self):
166 """True if the changeset is obsolete"""
166 """True if the changeset is obsolete"""
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168
168
169 def extinct(self):
169 def extinct(self):
170 """True if the changeset is extinct"""
170 """True if the changeset is extinct"""
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172
172
173 def unstable(self):
173 def unstable(self):
174 """True if the changeset is not obsolete but it's ancestor are"""
174 """True if the changeset is not obsolete but it's ancestor are"""
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176
176
177 def bumped(self):
177 def bumped(self):
178 """True if the changeset try to be a successor of a public changeset
178 """True if the changeset try to be a successor of a public changeset
179
179
180 Only non-public and non-obsolete changesets may be bumped.
180 Only non-public and non-obsolete changesets may be bumped.
181 """
181 """
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183
183
184 def divergent(self):
184 def divergent(self):
185 """Is a successors of a changeset with multiple possible successors set
185 """Is a successors of a changeset with multiple possible successors set
186
186
187 Only non-public and non-obsolete changesets may be divergent.
187 Only non-public and non-obsolete changesets may be divergent.
188 """
188 """
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190
190
191 def troubled(self):
191 def troubled(self):
192 """True if the changeset is either unstable, bumped or divergent"""
192 """True if the changeset is either unstable, bumped or divergent"""
193 return self.unstable() or self.bumped() or self.divergent()
193 return self.unstable() or self.bumped() or self.divergent()
194
194
195 def troubles(self):
195 def troubles(self):
196 """return the list of troubles affecting this changesets.
196 """return the list of troubles affecting this changesets.
197
197
198 Troubles are returned as strings. possible values are:
198 Troubles are returned as strings. possible values are:
199 - unstable,
199 - unstable,
200 - bumped,
200 - bumped,
201 - divergent.
201 - divergent.
202 """
202 """
203 troubles = []
203 troubles = []
204 if self.unstable():
204 if self.unstable():
205 troubles.append('unstable')
205 troubles.append('unstable')
206 if self.bumped():
206 if self.bumped():
207 troubles.append('bumped')
207 troubles.append('bumped')
208 if self.divergent():
208 if self.divergent():
209 troubles.append('divergent')
209 troubles.append('divergent')
210 return troubles
210 return troubles
211
211
212 def parents(self):
212 def parents(self):
213 """return contexts for each parent changeset"""
213 """return contexts for each parent changeset"""
214 return self._parents
214 return self._parents
215
215
216 def p1(self):
216 def p1(self):
217 return self._parents[0]
217 return self._parents[0]
218
218
219 def p2(self):
219 def p2(self):
220 if len(self._parents) == 2:
220 if len(self._parents) == 2:
221 return self._parents[1]
221 return self._parents[1]
222 return changectx(self._repo, -1)
222 return changectx(self._repo, -1)
223
223
224 def _fileinfo(self, path):
224 def _fileinfo(self, path):
225 if '_manifest' in self.__dict__:
225 if '_manifest' in self.__dict__:
226 try:
226 try:
227 return self._manifest[path], self._manifest.flags(path)
227 return self._manifest[path], self._manifest.flags(path)
228 except KeyError:
228 except KeyError:
229 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
230 _('not found in manifest'))
230 _('not found in manifest'))
231 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 if path in self._manifestdelta:
232 if path in self._manifestdelta:
233 return (self._manifestdelta[path],
233 return (self._manifestdelta[path],
234 self._manifestdelta.flags(path))
234 self._manifestdelta.flags(path))
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 if not node:
236 if not node:
237 raise error.ManifestLookupError(self._node, path,
237 raise error.ManifestLookupError(self._node, path,
238 _('not found in manifest'))
238 _('not found in manifest'))
239
239
240 return node, flag
240 return node, flag
241
241
242 def filenode(self, path):
242 def filenode(self, path):
243 return self._fileinfo(path)[0]
243 return self._fileinfo(path)[0]
244
244
245 def flags(self, path):
245 def flags(self, path):
246 try:
246 try:
247 return self._fileinfo(path)[1]
247 return self._fileinfo(path)[1]
248 except error.LookupError:
248 except error.LookupError:
249 return ''
249 return ''
250
250
251 def sub(self, path):
251 def sub(self, path):
252 '''return a subrepo for the stored revision of path, never wdir()'''
252 '''return a subrepo for the stored revision of path, never wdir()'''
253 return subrepo.subrepo(self, path)
253 return subrepo.subrepo(self, path)
254
254
255 def nullsub(self, path, pctx):
255 def nullsub(self, path, pctx):
256 return subrepo.nullsubrepo(self, path, pctx)
256 return subrepo.nullsubrepo(self, path, pctx)
257
257
258 def workingsub(self, path):
258 def workingsub(self, path):
259 '''return a subrepo for the stored revision, or wdir if this is a wdir
259 '''return a subrepo for the stored revision, or wdir if this is a wdir
260 context.
260 context.
261 '''
261 '''
262 return subrepo.subrepo(self, path, allowwdir=True)
262 return subrepo.subrepo(self, path, allowwdir=True)
263
263
264 def match(self, pats=[], include=None, exclude=None, default='glob',
264 def match(self, pats=[], include=None, exclude=None, default='glob',
265 listsubrepos=False, badfn=None):
265 listsubrepos=False, badfn=None):
266 r = self._repo
266 r = self._repo
267 return matchmod.match(r.root, r.getcwd(), pats,
267 return matchmod.match(r.root, r.getcwd(), pats,
268 include, exclude, default,
268 include, exclude, default,
269 auditor=r.auditor, ctx=self,
269 auditor=r.auditor, ctx=self,
270 listsubrepos=listsubrepos, badfn=badfn)
270 listsubrepos=listsubrepos, badfn=badfn)
271
271
272 def diff(self, ctx2=None, match=None, **opts):
272 def diff(self, ctx2=None, match=None, **opts):
273 """Returns a diff generator for the given contexts and matcher"""
273 """Returns a diff generator for the given contexts and matcher"""
274 if ctx2 is None:
274 if ctx2 is None:
275 ctx2 = self.p1()
275 ctx2 = self.p1()
276 if ctx2 is not None:
276 if ctx2 is not None:
277 ctx2 = self._repo[ctx2]
277 ctx2 = self._repo[ctx2]
278 diffopts = patch.diffopts(self._repo.ui, opts)
278 diffopts = patch.diffopts(self._repo.ui, opts)
279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
280
280
281 def dirs(self):
281 def dirs(self):
282 return self._manifest.dirs()
282 return self._manifest.dirs()
283
283
284 def hasdir(self, dir):
284 def hasdir(self, dir):
285 return self._manifest.hasdir(dir)
285 return self._manifest.hasdir(dir)
286
286
287 def dirty(self, missing=False, merge=True, branch=True):
287 def dirty(self, missing=False, merge=True, branch=True):
288 return False
288 return False
289
289
290 def status(self, other=None, match=None, listignored=False,
290 def status(self, other=None, match=None, listignored=False,
291 listclean=False, listunknown=False, listsubrepos=False):
291 listclean=False, listunknown=False, listsubrepos=False):
292 """return status of files between two nodes or node and working
292 """return status of files between two nodes or node and working
293 directory.
293 directory.
294
294
295 If other is None, compare this node with working directory.
295 If other is None, compare this node with working directory.
296
296
297 returns (modified, added, removed, deleted, unknown, ignored, clean)
297 returns (modified, added, removed, deleted, unknown, ignored, clean)
298 """
298 """
299
299
300 ctx1 = self
300 ctx1 = self
301 ctx2 = self._repo[other]
301 ctx2 = self._repo[other]
302
302
303 # This next code block is, admittedly, fragile logic that tests for
303 # This next code block is, admittedly, fragile logic that tests for
304 # reversing the contexts and wouldn't need to exist if it weren't for
304 # reversing the contexts and wouldn't need to exist if it weren't for
305 # the fast (and common) code path of comparing the working directory
305 # the fast (and common) code path of comparing the working directory
306 # with its first parent.
306 # with its first parent.
307 #
307 #
308 # What we're aiming for here is the ability to call:
308 # What we're aiming for here is the ability to call:
309 #
309 #
310 # workingctx.status(parentctx)
310 # workingctx.status(parentctx)
311 #
311 #
312 # If we always built the manifest for each context and compared those,
312 # If we always built the manifest for each context and compared those,
313 # then we'd be done. But the special case of the above call means we
313 # then we'd be done. But the special case of the above call means we
314 # just copy the manifest of the parent.
314 # just copy the manifest of the parent.
315 reversed = False
315 reversed = False
316 if (not isinstance(ctx1, changectx)
316 if (not isinstance(ctx1, changectx)
317 and isinstance(ctx2, changectx)):
317 and isinstance(ctx2, changectx)):
318 reversed = True
318 reversed = True
319 ctx1, ctx2 = ctx2, ctx1
319 ctx1, ctx2 = ctx2, ctx1
320
320
321 match = ctx2._matchstatus(ctx1, match)
321 match = ctx2._matchstatus(ctx1, match)
322 r = scmutil.status([], [], [], [], [], [], [])
322 r = scmutil.status([], [], [], [], [], [], [])
323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
324 listunknown)
324 listunknown)
325
325
326 if reversed:
326 if reversed:
327 # Reverse added and removed. Clear deleted, unknown and ignored as
327 # Reverse added and removed. Clear deleted, unknown and ignored as
328 # these make no sense to reverse.
328 # these make no sense to reverse.
329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
330 r.clean)
330 r.clean)
331
331
332 if listsubrepos:
332 if listsubrepos:
333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
334 rev2 = ctx2.subrev(subpath)
334 rev2 = ctx2.subrev(subpath)
335 try:
335 try:
336 submatch = matchmod.narrowmatcher(subpath, match)
336 submatch = matchmod.narrowmatcher(subpath, match)
337 s = sub.status(rev2, match=submatch, ignored=listignored,
337 s = sub.status(rev2, match=submatch, ignored=listignored,
338 clean=listclean, unknown=listunknown,
338 clean=listclean, unknown=listunknown,
339 listsubrepos=True)
339 listsubrepos=True)
340 for rfiles, sfiles in zip(r, s):
340 for rfiles, sfiles in zip(r, s):
341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
342 except error.LookupError:
342 except error.LookupError:
343 self._repo.ui.status(_("skipping missing "
343 self._repo.ui.status(_("skipping missing "
344 "subrepository: %s\n") % subpath)
344 "subrepository: %s\n") % subpath)
345
345
346 for l in r:
346 for l in r:
347 l.sort()
347 l.sort()
348
348
349 return r
349 return r
350
350
351
351
352 def makememctx(repo, parents, text, user, date, branch, files, store,
352 def makememctx(repo, parents, text, user, date, branch, files, store,
353 editor=None, extra=None):
353 editor=None, extra=None):
354 def getfilectx(repo, memctx, path):
354 def getfilectx(repo, memctx, path):
355 data, mode, copied = store.getfile(path)
355 data, mode, copied = store.getfile(path)
356 if data is None:
356 if data is None:
357 return None
357 return None
358 islink, isexec = mode
358 islink, isexec = mode
359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
360 copied=copied, memctx=memctx)
360 copied=copied, memctx=memctx)
361 if extra is None:
361 if extra is None:
362 extra = {}
362 extra = {}
363 if branch:
363 if branch:
364 extra['branch'] = encoding.fromlocal(branch)
364 extra['branch'] = encoding.fromlocal(branch)
365 ctx = memctx(repo, parents, text, files, getfilectx, user,
365 ctx = memctx(repo, parents, text, files, getfilectx, user,
366 date, extra, editor)
366 date, extra, editor)
367 return ctx
367 return ctx
368
368
369 class changectx(basectx):
369 class changectx(basectx):
370 """A changecontext object makes access to data related to a particular
370 """A changecontext object makes access to data related to a particular
371 changeset convenient. It represents a read-only context already present in
371 changeset convenient. It represents a read-only context already present in
372 the repo."""
372 the repo."""
373 def __init__(self, repo, changeid=''):
373 def __init__(self, repo, changeid=''):
374 """changeid is a revision number, node, or tag"""
374 """changeid is a revision number, node, or tag"""
375
375
376 # since basectx.__new__ already took care of copying the object, we
376 # since basectx.__new__ already took care of copying the object, we
377 # don't need to do anything in __init__, so we just exit here
377 # don't need to do anything in __init__, so we just exit here
378 if isinstance(changeid, basectx):
378 if isinstance(changeid, basectx):
379 return
379 return
380
380
381 if changeid == '':
381 if changeid == '':
382 changeid = '.'
382 changeid = '.'
383 self._repo = repo
383 self._repo = repo
384
384
385 try:
385 try:
386 if isinstance(changeid, int):
386 if isinstance(changeid, int):
387 self._node = repo.changelog.node(changeid)
387 self._node = repo.changelog.node(changeid)
388 self._rev = changeid
388 self._rev = changeid
389 return
389 return
390 if isinstance(changeid, long):
390 if isinstance(changeid, long):
391 changeid = str(changeid)
391 changeid = str(changeid)
392 if changeid == 'null':
392 if changeid == 'null':
393 self._node = nullid
393 self._node = nullid
394 self._rev = nullrev
394 self._rev = nullrev
395 return
395 return
396 if changeid == 'tip':
396 if changeid == 'tip':
397 self._node = repo.changelog.tip()
397 self._node = repo.changelog.tip()
398 self._rev = repo.changelog.rev(self._node)
398 self._rev = repo.changelog.rev(self._node)
399 return
399 return
400 if changeid == '.' or changeid == repo.dirstate.p1():
400 if changeid == '.' or changeid == repo.dirstate.p1():
401 # this is a hack to delay/avoid loading obsmarkers
401 # this is a hack to delay/avoid loading obsmarkers
402 # when we know that '.' won't be hidden
402 # when we know that '.' won't be hidden
403 self._node = repo.dirstate.p1()
403 self._node = repo.dirstate.p1()
404 self._rev = repo.unfiltered().changelog.rev(self._node)
404 self._rev = repo.unfiltered().changelog.rev(self._node)
405 return
405 return
406 if len(changeid) == 20:
406 if len(changeid) == 20:
407 try:
407 try:
408 self._node = changeid
408 self._node = changeid
409 self._rev = repo.changelog.rev(changeid)
409 self._rev = repo.changelog.rev(changeid)
410 return
410 return
411 except error.FilteredRepoLookupError:
411 except error.FilteredRepoLookupError:
412 raise
412 raise
413 except LookupError:
413 except LookupError:
414 pass
414 pass
415
415
416 try:
416 try:
417 r = int(changeid)
417 r = int(changeid)
418 if str(r) != changeid:
418 if str(r) != changeid:
419 raise ValueError
419 raise ValueError
420 l = len(repo.changelog)
420 l = len(repo.changelog)
421 if r < 0:
421 if r < 0:
422 r += l
422 r += l
423 if r < 0 or r >= l:
423 if r < 0 or r >= l:
424 raise ValueError
424 raise ValueError
425 self._rev = r
425 self._rev = r
426 self._node = repo.changelog.node(r)
426 self._node = repo.changelog.node(r)
427 return
427 return
428 except error.FilteredIndexError:
428 except error.FilteredIndexError:
429 raise
429 raise
430 except (ValueError, OverflowError, IndexError):
430 except (ValueError, OverflowError, IndexError):
431 pass
431 pass
432
432
433 if len(changeid) == 40:
433 if len(changeid) == 40:
434 try:
434 try:
435 self._node = bin(changeid)
435 self._node = bin(changeid)
436 self._rev = repo.changelog.rev(self._node)
436 self._rev = repo.changelog.rev(self._node)
437 return
437 return
438 except error.FilteredLookupError:
438 except error.FilteredLookupError:
439 raise
439 raise
440 except (TypeError, LookupError):
440 except (TypeError, LookupError):
441 pass
441 pass
442
442
443 # lookup bookmarks through the name interface
443 # lookup bookmarks through the name interface
444 try:
444 try:
445 self._node = repo.names.singlenode(repo, changeid)
445 self._node = repo.names.singlenode(repo, changeid)
446 self._rev = repo.changelog.rev(self._node)
446 self._rev = repo.changelog.rev(self._node)
447 return
447 return
448 except KeyError:
448 except KeyError:
449 pass
449 pass
450 except error.FilteredRepoLookupError:
450 except error.FilteredRepoLookupError:
451 raise
451 raise
452 except error.RepoLookupError:
452 except error.RepoLookupError:
453 pass
453 pass
454
454
455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
456 if self._node is not None:
456 if self._node is not None:
457 self._rev = repo.changelog.rev(self._node)
457 self._rev = repo.changelog.rev(self._node)
458 return
458 return
459
459
460 # lookup failed
460 # lookup failed
461 # check if it might have come from damaged dirstate
461 # check if it might have come from damaged dirstate
462 #
462 #
463 # XXX we could avoid the unfiltered if we had a recognizable
463 # XXX we could avoid the unfiltered if we had a recognizable
464 # exception for filtered changeset access
464 # exception for filtered changeset access
465 if changeid in repo.unfiltered().dirstate.parents():
465 if changeid in repo.unfiltered().dirstate.parents():
466 msg = _("working directory has unknown parent '%s'!")
466 msg = _("working directory has unknown parent '%s'!")
467 raise error.Abort(msg % short(changeid))
467 raise error.Abort(msg % short(changeid))
468 try:
468 try:
469 if len(changeid) == 20:
469 if len(changeid) == 20:
470 changeid = hex(changeid)
470 changeid = hex(changeid)
471 except TypeError:
471 except TypeError:
472 pass
472 pass
473 except (error.FilteredIndexError, error.FilteredLookupError,
473 except (error.FilteredIndexError, error.FilteredLookupError,
474 error.FilteredRepoLookupError):
474 error.FilteredRepoLookupError):
475 if repo.filtername.startswith('visible'):
475 if repo.filtername.startswith('visible'):
476 msg = _("hidden revision '%s'") % changeid
476 msg = _("hidden revision '%s'") % changeid
477 hint = _('use --hidden to access hidden revisions')
477 hint = _('use --hidden to access hidden revisions')
478 raise error.FilteredRepoLookupError(msg, hint=hint)
478 raise error.FilteredRepoLookupError(msg, hint=hint)
479 msg = _("filtered revision '%s' (not in '%s' subset)")
479 msg = _("filtered revision '%s' (not in '%s' subset)")
480 msg %= (changeid, repo.filtername)
480 msg %= (changeid, repo.filtername)
481 raise error.FilteredRepoLookupError(msg)
481 raise error.FilteredRepoLookupError(msg)
482 except IndexError:
482 except IndexError:
483 pass
483 pass
484 raise error.RepoLookupError(
484 raise error.RepoLookupError(
485 _("unknown revision '%s'") % changeid)
485 _("unknown revision '%s'") % changeid)
486
486
487 def __hash__(self):
487 def __hash__(self):
488 try:
488 try:
489 return hash(self._rev)
489 return hash(self._rev)
490 except AttributeError:
490 except AttributeError:
491 return id(self)
491 return id(self)
492
492
493 def __nonzero__(self):
493 def __nonzero__(self):
494 return self._rev != nullrev
494 return self._rev != nullrev
495
495
496 @propertycache
496 @propertycache
497 def _changeset(self):
497 def _changeset(self):
498 return self._repo.changelog.read(self.rev())
498 return self._repo.changelog.read(self.rev())
499
499
500 @propertycache
500 @propertycache
501 def _manifest(self):
501 def _manifest(self):
502 return self._repo.manifest.read(self._changeset[0])
502 return self._repo.manifest.read(self._changeset[0])
503
503
504 @propertycache
504 @propertycache
505 def _manifestdelta(self):
505 def _manifestdelta(self):
506 return self._repo.manifest.readdelta(self._changeset[0])
506 return self._repo.manifest.readdelta(self._changeset[0])
507
507
508 @propertycache
508 @propertycache
509 def _parents(self):
509 def _parents(self):
510 p = self._repo.changelog.parentrevs(self._rev)
510 p = self._repo.changelog.parentrevs(self._rev)
511 if p[1] == nullrev:
511 if p[1] == nullrev:
512 p = p[:-1]
512 p = p[:-1]
513 return [changectx(self._repo, x) for x in p]
513 return [changectx(self._repo, x) for x in p]
514
514
515 def changeset(self):
515 def changeset(self):
516 return self._changeset
516 return self._changeset
517 def manifestnode(self):
517 def manifestnode(self):
518 return self._changeset[0]
518 return self._changeset[0]
519
519
520 def user(self):
520 def user(self):
521 return self._changeset[1]
521 return self._changeset[1]
522 def date(self):
522 def date(self):
523 return self._changeset[2]
523 return self._changeset[2]
524 def files(self):
524 def files(self):
525 return self._changeset[3]
525 return self._changeset[3]
526 def description(self):
526 def description(self):
527 return self._changeset[4]
527 return self._changeset[4]
528 def branch(self):
528 def branch(self):
529 return encoding.tolocal(self._changeset[5].get("branch"))
529 return encoding.tolocal(self._changeset[5].get("branch"))
530 def closesbranch(self):
530 def closesbranch(self):
531 return 'close' in self._changeset[5]
531 return 'close' in self._changeset[5]
532 def extra(self):
532 def extra(self):
533 return self._changeset[5]
533 return self._changeset[5]
534 def tags(self):
534 def tags(self):
535 return self._repo.nodetags(self._node)
535 return self._repo.nodetags(self._node)
536 def bookmarks(self):
536 def bookmarks(self):
537 return self._repo.nodebookmarks(self._node)
537 return self._repo.nodebookmarks(self._node)
538 def phase(self):
538 def phase(self):
539 return self._repo._phasecache.phase(self._repo, self._rev)
539 return self._repo._phasecache.phase(self._repo, self._rev)
540 def hidden(self):
540 def hidden(self):
541 return self._rev in repoview.filterrevs(self._repo, 'visible')
541 return self._rev in repoview.filterrevs(self._repo, 'visible')
542
542
543 def children(self):
543 def children(self):
544 """return contexts for each child changeset"""
544 """return contexts for each child changeset"""
545 c = self._repo.changelog.children(self._node)
545 c = self._repo.changelog.children(self._node)
546 return [changectx(self._repo, x) for x in c]
546 return [changectx(self._repo, x) for x in c]
547
547
548 def ancestors(self):
548 def ancestors(self):
549 for a in self._repo.changelog.ancestors([self._rev]):
549 for a in self._repo.changelog.ancestors([self._rev]):
550 yield changectx(self._repo, a)
550 yield changectx(self._repo, a)
551
551
552 def descendants(self):
552 def descendants(self):
553 for d in self._repo.changelog.descendants([self._rev]):
553 for d in self._repo.changelog.descendants([self._rev]):
554 yield changectx(self._repo, d)
554 yield changectx(self._repo, d)
555
555
556 def filectx(self, path, fileid=None, filelog=None):
556 def filectx(self, path, fileid=None, filelog=None):
557 """get a file context from this changeset"""
557 """get a file context from this changeset"""
558 if fileid is None:
558 if fileid is None:
559 fileid = self.filenode(path)
559 fileid = self.filenode(path)
560 return filectx(self._repo, path, fileid=fileid,
560 return filectx(self._repo, path, fileid=fileid,
561 changectx=self, filelog=filelog)
561 changectx=self, filelog=filelog)
562
562
563 def ancestor(self, c2, warn=False):
563 def ancestor(self, c2, warn=False):
564 """return the "best" ancestor context of self and c2
564 """return the "best" ancestor context of self and c2
565
565
566 If there are multiple candidates, it will show a message and check
566 If there are multiple candidates, it will show a message and check
567 merge.preferancestor configuration before falling back to the
567 merge.preferancestor configuration before falling back to the
568 revlog ancestor."""
568 revlog ancestor."""
569 # deal with workingctxs
569 # deal with workingctxs
570 n2 = c2._node
570 n2 = c2._node
571 if n2 is None:
571 if n2 is None:
572 n2 = c2._parents[0]._node
572 n2 = c2._parents[0]._node
573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
574 if not cahs:
574 if not cahs:
575 anc = nullid
575 anc = nullid
576 elif len(cahs) == 1:
576 elif len(cahs) == 1:
577 anc = cahs[0]
577 anc = cahs[0]
578 else:
578 else:
579 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 for r in self._repo.ui.configlist('merge', 'preferancestor'):
580 try:
580 try:
581 ctx = changectx(self._repo, r)
581 ctx = changectx(self._repo, r)
582 except error.RepoLookupError:
582 except error.RepoLookupError:
583 continue
583 continue
584 anc = ctx.node()
584 anc = ctx.node()
585 if anc in cahs:
585 if anc in cahs:
586 break
586 break
587 else:
587 else:
588 anc = self._repo.changelog.ancestor(self._node, n2)
588 anc = self._repo.changelog.ancestor(self._node, n2)
589 if warn:
589 if warn:
590 self._repo.ui.status(
590 self._repo.ui.status(
591 (_("note: using %s as ancestor of %s and %s\n") %
591 (_("note: using %s as ancestor of %s and %s\n") %
592 (short(anc), short(self._node), short(n2))) +
592 (short(anc), short(self._node), short(n2))) +
593 ''.join(_(" alternatively, use --config "
593 ''.join(_(" alternatively, use --config "
594 "merge.preferancestor=%s\n") %
594 "merge.preferancestor=%s\n") %
595 short(n) for n in sorted(cahs) if n != anc))
595 short(n) for n in sorted(cahs) if n != anc))
596 return changectx(self._repo, anc)
596 return changectx(self._repo, anc)
597
597
598 def descendant(self, other):
598 def descendant(self, other):
599 """True if other is descendant of this changeset"""
599 """True if other is descendant of this changeset"""
600 return self._repo.changelog.descendant(self._rev, other._rev)
600 return self._repo.changelog.descendant(self._rev, other._rev)
601
601
602 def walk(self, match):
602 def walk(self, match):
603 '''Generates matching file names.'''
603 '''Generates matching file names.'''
604
604
605 # Wrap match.bad method to have message with nodeid
605 # Wrap match.bad method to have message with nodeid
606 def bad(fn, msg):
606 def bad(fn, msg):
607 # The manifest doesn't know about subrepos, so don't complain about
607 # The manifest doesn't know about subrepos, so don't complain about
608 # paths into valid subrepos.
608 # paths into valid subrepos.
609 if any(fn == s or fn.startswith(s + '/')
609 if any(fn == s or fn.startswith(s + '/')
610 for s in self.substate):
610 for s in self.substate):
611 return
611 return
612 match.bad(fn, _('no such file in rev %s') % self)
612 match.bad(fn, _('no such file in rev %s') % self)
613
613
614 m = matchmod.badmatch(match, bad)
614 m = matchmod.badmatch(match, bad)
615 return self._manifest.walk(m)
615 return self._manifest.walk(m)
616
616
617 def matches(self, match):
617 def matches(self, match):
618 return self.walk(match)
618 return self.walk(match)
619
619
620 class basefilectx(object):
620 class basefilectx(object):
621 """A filecontext object represents the common logic for its children:
621 """A filecontext object represents the common logic for its children:
622 filectx: read-only access to a filerevision that is already present
622 filectx: read-only access to a filerevision that is already present
623 in the repo,
623 in the repo,
624 workingfilectx: a filecontext that represents files from the working
624 workingfilectx: a filecontext that represents files from the working
625 directory,
625 directory,
626 memfilectx: a filecontext that represents files in-memory."""
626 memfilectx: a filecontext that represents files in-memory."""
627 def __new__(cls, repo, path, *args, **kwargs):
627 def __new__(cls, repo, path, *args, **kwargs):
628 return super(basefilectx, cls).__new__(cls)
628 return super(basefilectx, cls).__new__(cls)
629
629
630 @propertycache
630 @propertycache
631 def _filelog(self):
631 def _filelog(self):
632 return self._repo.file(self._path)
632 return self._repo.file(self._path)
633
633
634 @propertycache
634 @propertycache
635 def _changeid(self):
635 def _changeid(self):
636 if '_changeid' in self.__dict__:
636 if '_changeid' in self.__dict__:
637 return self._changeid
637 return self._changeid
638 elif '_changectx' in self.__dict__:
638 elif '_changectx' in self.__dict__:
639 return self._changectx.rev()
639 return self._changectx.rev()
640 elif '_descendantrev' in self.__dict__:
640 elif '_descendantrev' in self.__dict__:
641 # this file context was created from a revision with a known
641 # this file context was created from a revision with a known
642 # descendant, we can (lazily) correct for linkrev aliases
642 # descendant, we can (lazily) correct for linkrev aliases
643 return self._adjustlinkrev(self._path, self._filelog,
643 return self._adjustlinkrev(self._path, self._filelog,
644 self._filenode, self._descendantrev)
644 self._filenode, self._descendantrev)
645 else:
645 else:
646 return self._filelog.linkrev(self._filerev)
646 return self._filelog.linkrev(self._filerev)
647
647
648 @propertycache
648 @propertycache
649 def _filenode(self):
649 def _filenode(self):
650 if '_fileid' in self.__dict__:
650 if '_fileid' in self.__dict__:
651 return self._filelog.lookup(self._fileid)
651 return self._filelog.lookup(self._fileid)
652 else:
652 else:
653 return self._changectx.filenode(self._path)
653 return self._changectx.filenode(self._path)
654
654
655 @propertycache
655 @propertycache
656 def _filerev(self):
656 def _filerev(self):
657 return self._filelog.rev(self._filenode)
657 return self._filelog.rev(self._filenode)
658
658
659 @propertycache
659 @propertycache
660 def _repopath(self):
660 def _repopath(self):
661 return self._path
661 return self._path
662
662
663 def __nonzero__(self):
663 def __nonzero__(self):
664 try:
664 try:
665 self._filenode
665 self._filenode
666 return True
666 return True
667 except error.LookupError:
667 except error.LookupError:
668 # file is missing
668 # file is missing
669 return False
669 return False
670
670
671 def __str__(self):
671 def __str__(self):
672 return "%s@%s" % (self.path(), self._changectx)
672 return "%s@%s" % (self.path(), self._changectx)
673
673
674 def __repr__(self):
674 def __repr__(self):
675 return "<%s %s>" % (type(self).__name__, str(self))
675 return "<%s %s>" % (type(self).__name__, str(self))
676
676
677 def __hash__(self):
677 def __hash__(self):
678 try:
678 try:
679 return hash((self._path, self._filenode))
679 return hash((self._path, self._filenode))
680 except AttributeError:
680 except AttributeError:
681 return id(self)
681 return id(self)
682
682
683 def __eq__(self, other):
683 def __eq__(self, other):
684 try:
684 try:
685 return (type(self) == type(other) and self._path == other._path
685 return (type(self) == type(other) and self._path == other._path
686 and self._filenode == other._filenode)
686 and self._filenode == other._filenode)
687 except AttributeError:
687 except AttributeError:
688 return False
688 return False
689
689
690 def __ne__(self, other):
690 def __ne__(self, other):
691 return not (self == other)
691 return not (self == other)
692
692
693 def filerev(self):
693 def filerev(self):
694 return self._filerev
694 return self._filerev
695 def filenode(self):
695 def filenode(self):
696 return self._filenode
696 return self._filenode
697 def flags(self):
697 def flags(self):
698 return self._changectx.flags(self._path)
698 return self._changectx.flags(self._path)
699 def filelog(self):
699 def filelog(self):
700 return self._filelog
700 return self._filelog
701 def rev(self):
701 def rev(self):
702 return self._changeid
702 return self._changeid
703 def linkrev(self):
703 def linkrev(self):
704 return self._filelog.linkrev(self._filerev)
704 return self._filelog.linkrev(self._filerev)
705 def node(self):
705 def node(self):
706 return self._changectx.node()
706 return self._changectx.node()
707 def hex(self):
707 def hex(self):
708 return self._changectx.hex()
708 return self._changectx.hex()
709 def user(self):
709 def user(self):
710 return self._changectx.user()
710 return self._changectx.user()
711 def date(self):
711 def date(self):
712 return self._changectx.date()
712 return self._changectx.date()
713 def files(self):
713 def files(self):
714 return self._changectx.files()
714 return self._changectx.files()
715 def description(self):
715 def description(self):
716 return self._changectx.description()
716 return self._changectx.description()
717 def branch(self):
717 def branch(self):
718 return self._changectx.branch()
718 return self._changectx.branch()
719 def extra(self):
719 def extra(self):
720 return self._changectx.extra()
720 return self._changectx.extra()
721 def phase(self):
721 def phase(self):
722 return self._changectx.phase()
722 return self._changectx.phase()
723 def phasestr(self):
723 def phasestr(self):
724 return self._changectx.phasestr()
724 return self._changectx.phasestr()
725 def manifest(self):
725 def manifest(self):
726 return self._changectx.manifest()
726 return self._changectx.manifest()
727 def changectx(self):
727 def changectx(self):
728 return self._changectx
728 return self._changectx
729 def repo(self):
729 def repo(self):
730 return self._repo
730 return self._repo
731
731
732 def path(self):
732 def path(self):
733 return self._path
733 return self._path
734
734
735 def isbinary(self):
735 def isbinary(self):
736 try:
736 try:
737 return util.binary(self.data())
737 return util.binary(self.data())
738 except IOError:
738 except IOError:
739 return False
739 return False
740 def isexec(self):
740 def isexec(self):
741 return 'x' in self.flags()
741 return 'x' in self.flags()
742 def islink(self):
742 def islink(self):
743 return 'l' in self.flags()
743 return 'l' in self.flags()
744
744
745 def cmp(self, fctx):
745 def cmp(self, fctx):
746 """compare with other file context
746 """compare with other file context
747
747
748 returns True if different than fctx.
748 returns True if different than fctx.
749 """
749 """
750 if (fctx._filerev is None
750 if (fctx._filerev is None
751 and (self._repo._encodefilterpats
751 and (self._repo._encodefilterpats
752 # if file data starts with '\1\n', empty metadata block is
752 # if file data starts with '\1\n', empty metadata block is
753 # prepended, which adds 4 bytes to filelog.size().
753 # prepended, which adds 4 bytes to filelog.size().
754 or self.size() - 4 == fctx.size())
754 or self.size() - 4 == fctx.size())
755 or self.size() == fctx.size()):
755 or self.size() == fctx.size()):
756 return self._filelog.cmp(self._filenode, fctx.data())
756 return self._filelog.cmp(self._filenode, fctx.data())
757
757
758 return True
758 return True
759
759
760 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
760 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
761 """return the first ancestor of <srcrev> introducing <fnode>
761 """return the first ancestor of <srcrev> introducing <fnode>
762
762
763 If the linkrev of the file revision does not point to an ancestor of
763 If the linkrev of the file revision does not point to an ancestor of
764 srcrev, we'll walk down the ancestors until we find one introducing
764 srcrev, we'll walk down the ancestors until we find one introducing
765 this file revision.
765 this file revision.
766
766
767 :repo: a localrepository object (used to access changelog and manifest)
767 :repo: a localrepository object (used to access changelog and manifest)
768 :path: the file path
768 :path: the file path
769 :fnode: the nodeid of the file revision
769 :fnode: the nodeid of the file revision
770 :filelog: the filelog of this path
770 :filelog: the filelog of this path
771 :srcrev: the changeset revision we search ancestors from
771 :srcrev: the changeset revision we search ancestors from
772 :inclusive: if true, the src revision will also be checked
772 :inclusive: if true, the src revision will also be checked
773 """
773 """
774 repo = self._repo
774 repo = self._repo
775 cl = repo.unfiltered().changelog
775 cl = repo.unfiltered().changelog
776 ma = repo.manifest
776 ma = repo.manifest
777 # fetch the linkrev
777 # fetch the linkrev
778 fr = filelog.rev(fnode)
778 fr = filelog.rev(fnode)
779 lkr = filelog.linkrev(fr)
779 lkr = filelog.linkrev(fr)
780 # hack to reuse ancestor computation when searching for renames
780 # hack to reuse ancestor computation when searching for renames
781 memberanc = getattr(self, '_ancestrycontext', None)
781 memberanc = getattr(self, '_ancestrycontext', None)
782 iteranc = None
782 iteranc = None
783 if srcrev is None:
783 if srcrev is None:
784 # wctx case, used by workingfilectx during mergecopy
784 # wctx case, used by workingfilectx during mergecopy
785 revs = [p.rev() for p in self._repo[None].parents()]
785 revs = [p.rev() for p in self._repo[None].parents()]
786 inclusive = True # we skipped the real (revless) source
786 inclusive = True # we skipped the real (revless) source
787 else:
787 else:
788 revs = [srcrev]
788 revs = [srcrev]
789 if memberanc is None:
789 if memberanc is None:
790 memberanc = iteranc = cl.ancestors(revs, lkr,
790 memberanc = iteranc = cl.ancestors(revs, lkr,
791 inclusive=inclusive)
791 inclusive=inclusive)
792 # check if this linkrev is an ancestor of srcrev
792 # check if this linkrev is an ancestor of srcrev
793 if lkr not in memberanc:
793 if lkr not in memberanc:
794 if iteranc is None:
794 if iteranc is None:
795 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
795 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
796 for a in iteranc:
796 for a in iteranc:
797 ac = cl.read(a) # get changeset data (we avoid object creation)
797 ac = cl.read(a) # get changeset data (we avoid object creation)
798 if path in ac[3]: # checking the 'files' field.
798 if path in ac[3]: # checking the 'files' field.
799 # The file has been touched, check if the content is
799 # The file has been touched, check if the content is
800 # similar to the one we search for.
800 # similar to the one we search for.
801 if fnode == ma.readfast(ac[0]).get(path):
801 if fnode == ma.readfast(ac[0]).get(path):
802 return a
802 return a
803 # In theory, we should never get out of that loop without a result.
803 # In theory, we should never get out of that loop without a result.
804 # But if manifest uses a buggy file revision (not children of the
804 # But if manifest uses a buggy file revision (not children of the
805 # one it replaces) we could. Such a buggy situation will likely
805 # one it replaces) we could. Such a buggy situation will likely
806 # result is crash somewhere else at to some point.
806 # result is crash somewhere else at to some point.
807 return lkr
807 return lkr
808
808
809 def introrev(self):
809 def introrev(self):
810 """return the rev of the changeset which introduced this file revision
810 """return the rev of the changeset which introduced this file revision
811
811
812 This method is different from linkrev because it take into account the
812 This method is different from linkrev because it take into account the
813 changeset the filectx was created from. It ensures the returned
813 changeset the filectx was created from. It ensures the returned
814 revision is one of its ancestors. This prevents bugs from
814 revision is one of its ancestors. This prevents bugs from
815 'linkrev-shadowing' when a file revision is used by multiple
815 'linkrev-shadowing' when a file revision is used by multiple
816 changesets.
816 changesets.
817 """
817 """
818 lkr = self.linkrev()
818 lkr = self.linkrev()
819 attrs = vars(self)
819 attrs = vars(self)
820 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
820 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
821 if noctx or self.rev() == lkr:
821 if noctx or self.rev() == lkr:
822 return self.linkrev()
822 return self.linkrev()
823 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
823 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
824 self.rev(), inclusive=True)
824 self.rev(), inclusive=True)
825
825
826 def _parentfilectx(self, path, fileid, filelog):
826 def _parentfilectx(self, path, fileid, filelog):
827 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
827 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
828 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 if '_changeid' in vars(self) or '_changectx' in vars(self):
829 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 # If self is associated with a changeset (probably explicitly
830 # If self is associated with a changeset (probably explicitly
831 # fed), ensure the created filectx is associated with a
831 # fed), ensure the created filectx is associated with a
832 # changeset that is an ancestor of self.changectx.
832 # changeset that is an ancestor of self.changectx.
833 # This lets us later use _adjustlinkrev to get a correct link.
833 # This lets us later use _adjustlinkrev to get a correct link.
834 fctx._descendantrev = self.rev()
834 fctx._descendantrev = self.rev()
835 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
835 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 elif '_descendantrev' in vars(self):
836 elif '_descendantrev' in vars(self):
837 # Otherwise propagate _descendantrev if we have one associated.
837 # Otherwise propagate _descendantrev if we have one associated.
838 fctx._descendantrev = self._descendantrev
838 fctx._descendantrev = self._descendantrev
839 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
839 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 return fctx
840 return fctx
841
841
842 def parents(self):
842 def parents(self):
843 _path = self._path
843 _path = self._path
844 fl = self._filelog
844 fl = self._filelog
845 parents = self._filelog.parents(self._filenode)
845 parents = self._filelog.parents(self._filenode)
846 pl = [(_path, node, fl) for node in parents if node != nullid]
846 pl = [(_path, node, fl) for node in parents if node != nullid]
847
847
848 r = fl.renamed(self._filenode)
848 r = fl.renamed(self._filenode)
849 if r:
849 if r:
850 # - In the simple rename case, both parent are nullid, pl is empty.
850 # - In the simple rename case, both parent are nullid, pl is empty.
851 # - In case of merge, only one of the parent is null id and should
851 # - In case of merge, only one of the parent is null id and should
852 # be replaced with the rename information. This parent is -always-
852 # be replaced with the rename information. This parent is -always-
853 # the first one.
853 # the first one.
854 #
854 #
855 # As null id have always been filtered out in the previous list
855 # As null id have always been filtered out in the previous list
856 # comprehension, inserting to 0 will always result in "replacing
856 # comprehension, inserting to 0 will always result in "replacing
857 # first nullid parent with rename information.
857 # first nullid parent with rename information.
858 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
858 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859
859
860 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
860 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861
861
862 def p1(self):
862 def p1(self):
863 return self.parents()[0]
863 return self.parents()[0]
864
864
865 def p2(self):
865 def p2(self):
866 p = self.parents()
866 p = self.parents()
867 if len(p) == 2:
867 if len(p) == 2:
868 return p[1]
868 return p[1]
869 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
869 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870
870
871 def annotate(self, follow=False, linenumber=None, diffopts=None):
871 def annotate(self, follow=False, linenumber=None, diffopts=None):
872 '''returns a list of tuples of (ctx, line) for each line
872 '''returns a list of tuples of (ctx, line) for each line
873 in the file, where ctx is the filectx of the node where
873 in the file, where ctx is the filectx of the node where
874 that line was last changed.
874 that line was last changed.
875 This returns tuples of ((ctx, linenumber), line) for each line,
875 This returns tuples of ((ctx, linenumber), line) for each line,
876 if "linenumber" parameter is NOT "None".
876 if "linenumber" parameter is NOT "None".
877 In such tuples, linenumber means one at the first appearance
877 In such tuples, linenumber means one at the first appearance
878 in the managed file.
878 in the managed file.
879 To reduce annotation cost,
879 To reduce annotation cost,
880 this returns fixed value(False is used) as linenumber,
880 this returns fixed value(False is used) as linenumber,
881 if "linenumber" parameter is "False".'''
881 if "linenumber" parameter is "False".'''
882
882
883 if linenumber is None:
883 if linenumber is None:
884 def decorate(text, rev):
884 def decorate(text, rev):
885 return ([rev] * len(text.splitlines()), text)
885 return ([rev] * len(text.splitlines()), text)
886 elif linenumber:
886 elif linenumber:
887 def decorate(text, rev):
887 def decorate(text, rev):
888 size = len(text.splitlines())
888 size = len(text.splitlines())
889 return ([(rev, i) for i in xrange(1, size + 1)], text)
889 return ([(rev, i) for i in xrange(1, size + 1)], text)
890 else:
890 else:
891 def decorate(text, rev):
891 def decorate(text, rev):
892 return ([(rev, False)] * len(text.splitlines()), text)
892 return ([(rev, False)] * len(text.splitlines()), text)
893
893
894 def pair(parent, child):
894 def pair(parent, child):
895 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
895 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
896 refine=True)
896 refine=True)
897 for (a1, a2, b1, b2), t in blocks:
897 for (a1, a2, b1, b2), t in blocks:
898 # Changed blocks ('!') or blocks made only of blank lines ('~')
898 # Changed blocks ('!') or blocks made only of blank lines ('~')
899 # belong to the child.
899 # belong to the child.
900 if t == '=':
900 if t == '=':
901 child[0][b1:b2] = parent[0][a1:a2]
901 child[0][b1:b2] = parent[0][a1:a2]
902 return child
902 return child
903
903
904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905
905
906 def parents(f):
906 def parents(f):
907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # isn't an ancestor of the srcrev.
910 # isn't an ancestor of the srcrev.
911 f._changeid
911 f._changeid
912 pl = f.parents()
912 pl = f.parents()
913
913
914 # Don't return renamed parents if we aren't following.
914 # Don't return renamed parents if we aren't following.
915 if not follow:
915 if not follow:
916 pl = [p for p in pl if p.path() == f.path()]
916 pl = [p for p in pl if p.path() == f.path()]
917
917
918 # renamed filectx won't have a filelog yet, so set it
918 # renamed filectx won't have a filelog yet, so set it
919 # from the cache to save time
919 # from the cache to save time
920 for p in pl:
920 for p in pl:
921 if not '_filelog' in p.__dict__:
921 if not '_filelog' in p.__dict__:
922 p._filelog = getlog(p.path())
922 p._filelog = getlog(p.path())
923
923
924 return pl
924 return pl
925
925
926 # use linkrev to find the first changeset where self appeared
926 # use linkrev to find the first changeset where self appeared
927 base = self
927 base = self
928 introrev = self.introrev()
928 introrev = self.introrev()
929 if self.rev() != introrev:
929 if self.rev() != introrev:
930 base = self.filectx(self.filenode(), changeid=introrev)
930 base = self.filectx(self.filenode(), changeid=introrev)
931 if getattr(base, '_ancestrycontext', None) is None:
931 if getattr(base, '_ancestrycontext', None) is None:
932 cl = self._repo.changelog
932 cl = self._repo.changelog
933 if introrev is None:
933 if introrev is None:
934 # wctx is not inclusive, but works because _ancestrycontext
934 # wctx is not inclusive, but works because _ancestrycontext
935 # is used to test filelog revisions
935 # is used to test filelog revisions
936 ac = cl.ancestors([p.rev() for p in base.parents()],
936 ac = cl.ancestors([p.rev() for p in base.parents()],
937 inclusive=True)
937 inclusive=True)
938 else:
938 else:
939 ac = cl.ancestors([introrev], inclusive=True)
939 ac = cl.ancestors([introrev], inclusive=True)
940 base._ancestrycontext = ac
940 base._ancestrycontext = ac
941
941
942 # This algorithm would prefer to be recursive, but Python is a
942 # This algorithm would prefer to be recursive, but Python is a
943 # bit recursion-hostile. Instead we do an iterative
943 # bit recursion-hostile. Instead we do an iterative
944 # depth-first search.
944 # depth-first search.
945
945
946 visit = [base]
946 visit = [base]
947 hist = {}
947 hist = {}
948 pcache = {}
948 pcache = {}
949 needed = {base: 1}
949 needed = {base: 1}
950 while visit:
950 while visit:
951 f = visit[-1]
951 f = visit[-1]
952 pcached = f in pcache
952 pcached = f in pcache
953 if not pcached:
953 if not pcached:
954 pcache[f] = parents(f)
954 pcache[f] = parents(f)
955
955
956 ready = True
956 ready = True
957 pl = pcache[f]
957 pl = pcache[f]
958 for p in pl:
958 for p in pl:
959 if p not in hist:
959 if p not in hist:
960 ready = False
960 ready = False
961 visit.append(p)
961 visit.append(p)
962 if not pcached:
962 if not pcached:
963 needed[p] = needed.get(p, 0) + 1
963 needed[p] = needed.get(p, 0) + 1
964 if ready:
964 if ready:
965 visit.pop()
965 visit.pop()
966 reusable = f in hist
966 reusable = f in hist
967 if reusable:
967 if reusable:
968 curr = hist[f]
968 curr = hist[f]
969 else:
969 else:
970 curr = decorate(f.data(), f)
970 curr = decorate(f.data(), f)
971 for p in pl:
971 for p in pl:
972 if not reusable:
972 if not reusable:
973 curr = pair(hist[p], curr)
973 curr = pair(hist[p], curr)
974 if needed[p] == 1:
974 if needed[p] == 1:
975 del hist[p]
975 del hist[p]
976 del needed[p]
976 del needed[p]
977 else:
977 else:
978 needed[p] -= 1
978 needed[p] -= 1
979
979
980 hist[f] = curr
980 hist[f] = curr
981 pcache[f] = []
981 pcache[f] = []
982
982
983 return zip(hist[base][0], hist[base][1].splitlines(True))
983 return zip(hist[base][0], hist[base][1].splitlines(True))
984
984
985 def ancestors(self, followfirst=False):
985 def ancestors(self, followfirst=False):
986 visit = {}
986 visit = {}
987 c = self
987 c = self
988 if followfirst:
988 if followfirst:
989 cut = 1
989 cut = 1
990 else:
990 else:
991 cut = None
991 cut = None
992
992
993 while True:
993 while True:
994 for parent in c.parents()[:cut]:
994 for parent in c.parents()[:cut]:
995 visit[(parent.linkrev(), parent.filenode())] = parent
995 visit[(parent.linkrev(), parent.filenode())] = parent
996 if not visit:
996 if not visit:
997 break
997 break
998 c = visit.pop(max(visit))
998 c = visit.pop(max(visit))
999 yield c
999 yield c
1000
1000
1001 class filectx(basefilectx):
1001 class filectx(basefilectx):
1002 """A filecontext object makes access to data related to a particular
1002 """A filecontext object makes access to data related to a particular
1003 filerevision convenient."""
1003 filerevision convenient."""
1004 def __init__(self, repo, path, changeid=None, fileid=None,
1004 def __init__(self, repo, path, changeid=None, fileid=None,
1005 filelog=None, changectx=None):
1005 filelog=None, changectx=None):
1006 """changeid can be a changeset revision, node, or tag.
1006 """changeid can be a changeset revision, node, or tag.
1007 fileid can be a file revision or node."""
1007 fileid can be a file revision or node."""
1008 self._repo = repo
1008 self._repo = repo
1009 self._path = path
1009 self._path = path
1010
1010
1011 assert (changeid is not None
1011 assert (changeid is not None
1012 or fileid is not None
1012 or fileid is not None
1013 or changectx is not None), \
1013 or changectx is not None), \
1014 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1014 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1015 % (changeid, fileid, changectx))
1015 % (changeid, fileid, changectx))
1016
1016
1017 if filelog is not None:
1017 if filelog is not None:
1018 self._filelog = filelog
1018 self._filelog = filelog
1019
1019
1020 if changeid is not None:
1020 if changeid is not None:
1021 self._changeid = changeid
1021 self._changeid = changeid
1022 if changectx is not None:
1022 if changectx is not None:
1023 self._changectx = changectx
1023 self._changectx = changectx
1024 if fileid is not None:
1024 if fileid is not None:
1025 self._fileid = fileid
1025 self._fileid = fileid
1026
1026
1027 @propertycache
1027 @propertycache
1028 def _changectx(self):
1028 def _changectx(self):
1029 try:
1029 try:
1030 return changectx(self._repo, self._changeid)
1030 return changectx(self._repo, self._changeid)
1031 except error.FilteredRepoLookupError:
1031 except error.FilteredRepoLookupError:
1032 # Linkrev may point to any revision in the repository. When the
1032 # Linkrev may point to any revision in the repository. When the
1033 # repository is filtered this may lead to `filectx` trying to build
1033 # repository is filtered this may lead to `filectx` trying to build
1034 # `changectx` for filtered revision. In such case we fallback to
1034 # `changectx` for filtered revision. In such case we fallback to
1035 # creating `changectx` on the unfiltered version of the reposition.
1035 # creating `changectx` on the unfiltered version of the reposition.
1036 # This fallback should not be an issue because `changectx` from
1036 # This fallback should not be an issue because `changectx` from
1037 # `filectx` are not used in complex operations that care about
1037 # `filectx` are not used in complex operations that care about
1038 # filtering.
1038 # filtering.
1039 #
1039 #
1040 # This fallback is a cheap and dirty fix that prevent several
1040 # This fallback is a cheap and dirty fix that prevent several
1041 # crashes. It does not ensure the behavior is correct. However the
1041 # crashes. It does not ensure the behavior is correct. However the
1042 # behavior was not correct before filtering either and "incorrect
1042 # behavior was not correct before filtering either and "incorrect
1043 # behavior" is seen as better as "crash"
1043 # behavior" is seen as better as "crash"
1044 #
1044 #
1045 # Linkrevs have several serious troubles with filtering that are
1045 # Linkrevs have several serious troubles with filtering that are
1046 # complicated to solve. Proper handling of the issue here should be
1046 # complicated to solve. Proper handling of the issue here should be
1047 # considered when solving linkrev issue are on the table.
1047 # considered when solving linkrev issue are on the table.
1048 return changectx(self._repo.unfiltered(), self._changeid)
1048 return changectx(self._repo.unfiltered(), self._changeid)
1049
1049
1050 def filectx(self, fileid, changeid=None):
1050 def filectx(self, fileid, changeid=None):
1051 '''opens an arbitrary revision of the file without
1051 '''opens an arbitrary revision of the file without
1052 opening a new filelog'''
1052 opening a new filelog'''
1053 return filectx(self._repo, self._path, fileid=fileid,
1053 return filectx(self._repo, self._path, fileid=fileid,
1054 filelog=self._filelog, changeid=changeid)
1054 filelog=self._filelog, changeid=changeid)
1055
1055
1056 def data(self):
1056 def data(self):
1057 try:
1057 try:
1058 return self._filelog.read(self._filenode)
1058 return self._filelog.read(self._filenode)
1059 except error.CensoredNodeError:
1059 except error.CensoredNodeError:
1060 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1060 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1061 return ""
1061 return ""
1062 raise util.Abort(_("censored node: %s") % short(self._filenode),
1062 raise util.Abort(_("censored node: %s") % short(self._filenode),
1063 hint=_("set censor.policy to ignore errors"))
1063 hint=_("set censor.policy to ignore errors"))
1064
1064
1065 def size(self):
1065 def size(self):
1066 return self._filelog.size(self._filerev)
1066 return self._filelog.size(self._filerev)
1067
1067
1068 def renamed(self):
1068 def renamed(self):
1069 """check if file was actually renamed in this changeset revision
1069 """check if file was actually renamed in this changeset revision
1070
1070
1071 If rename logged in file revision, we report copy for changeset only
1071 If rename logged in file revision, we report copy for changeset only
1072 if file revisions linkrev points back to the changeset in question
1072 if file revisions linkrev points back to the changeset in question
1073 or both changeset parents contain different file revisions.
1073 or both changeset parents contain different file revisions.
1074 """
1074 """
1075
1075
1076 renamed = self._filelog.renamed(self._filenode)
1076 renamed = self._filelog.renamed(self._filenode)
1077 if not renamed:
1077 if not renamed:
1078 return renamed
1078 return renamed
1079
1079
1080 if self.rev() == self.linkrev():
1080 if self.rev() == self.linkrev():
1081 return renamed
1081 return renamed
1082
1082
1083 name = self.path()
1083 name = self.path()
1084 fnode = self._filenode
1084 fnode = self._filenode
1085 for p in self._changectx.parents():
1085 for p in self._changectx.parents():
1086 try:
1086 try:
1087 if fnode == p.filenode(name):
1087 if fnode == p.filenode(name):
1088 return None
1088 return None
1089 except error.LookupError:
1089 except error.LookupError:
1090 pass
1090 pass
1091 return renamed
1091 return renamed
1092
1092
1093 def children(self):
1093 def children(self):
1094 # hard for renames
1094 # hard for renames
1095 c = self._filelog.children(self._filenode)
1095 c = self._filelog.children(self._filenode)
1096 return [filectx(self._repo, self._path, fileid=x,
1096 return [filectx(self._repo, self._path, fileid=x,
1097 filelog=self._filelog) for x in c]
1097 filelog=self._filelog) for x in c]
1098
1098
1099 class committablectx(basectx):
1099 class committablectx(basectx):
1100 """A committablectx object provides common functionality for a context that
1100 """A committablectx object provides common functionality for a context that
1101 wants the ability to commit, e.g. workingctx or memctx."""
1101 wants the ability to commit, e.g. workingctx or memctx."""
1102 def __init__(self, repo, text="", user=None, date=None, extra=None,
1102 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 changes=None):
1103 changes=None):
1104 self._repo = repo
1104 self._repo = repo
1105 self._rev = None
1105 self._rev = None
1106 self._node = None
1106 self._node = None
1107 self._text = text
1107 self._text = text
1108 if date:
1108 if date:
1109 self._date = util.parsedate(date)
1109 self._date = util.parsedate(date)
1110 if user:
1110 if user:
1111 self._user = user
1111 self._user = user
1112 if changes:
1112 if changes:
1113 self._status = changes
1113 self._status = changes
1114
1114
1115 self._extra = {}
1115 self._extra = {}
1116 if extra:
1116 if extra:
1117 self._extra = extra.copy()
1117 self._extra = extra.copy()
1118 if 'branch' not in self._extra:
1118 if 'branch' not in self._extra:
1119 try:
1119 try:
1120 branch = encoding.fromlocal(self._repo.dirstate.branch())
1120 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 except UnicodeDecodeError:
1121 except UnicodeDecodeError:
1122 raise util.Abort(_('branch name not in UTF-8!'))
1122 raise util.Abort(_('branch name not in UTF-8!'))
1123 self._extra['branch'] = branch
1123 self._extra['branch'] = branch
1124 if self._extra['branch'] == '':
1124 if self._extra['branch'] == '':
1125 self._extra['branch'] = 'default'
1125 self._extra['branch'] = 'default'
1126
1126
1127 def __str__(self):
1127 def __str__(self):
1128 return str(self._parents[0]) + "+"
1128 return str(self._parents[0]) + "+"
1129
1129
1130 def __nonzero__(self):
1130 def __nonzero__(self):
1131 return True
1131 return True
1132
1132
1133 def _buildflagfunc(self):
1133 def _buildflagfunc(self):
1134 # Create a fallback function for getting file flags when the
1134 # Create a fallback function for getting file flags when the
1135 # filesystem doesn't support them
1135 # filesystem doesn't support them
1136
1136
1137 copiesget = self._repo.dirstate.copies().get
1137 copiesget = self._repo.dirstate.copies().get
1138
1138
1139 if len(self._parents) < 2:
1139 if len(self._parents) < 2:
1140 # when we have one parent, it's easy: copy from parent
1140 # when we have one parent, it's easy: copy from parent
1141 man = self._parents[0].manifest()
1141 man = self._parents[0].manifest()
1142 def func(f):
1142 def func(f):
1143 f = copiesget(f, f)
1143 f = copiesget(f, f)
1144 return man.flags(f)
1144 return man.flags(f)
1145 else:
1145 else:
1146 # merges are tricky: we try to reconstruct the unstored
1146 # merges are tricky: we try to reconstruct the unstored
1147 # result from the merge (issue1802)
1147 # result from the merge (issue1802)
1148 p1, p2 = self._parents
1148 p1, p2 = self._parents
1149 pa = p1.ancestor(p2)
1149 pa = p1.ancestor(p2)
1150 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1150 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1151
1151
1152 def func(f):
1152 def func(f):
1153 f = copiesget(f, f) # may be wrong for merges with copies
1153 f = copiesget(f, f) # may be wrong for merges with copies
1154 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1154 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1155 if fl1 == fl2:
1155 if fl1 == fl2:
1156 return fl1
1156 return fl1
1157 if fl1 == fla:
1157 if fl1 == fla:
1158 return fl2
1158 return fl2
1159 if fl2 == fla:
1159 if fl2 == fla:
1160 return fl1
1160 return fl1
1161 return '' # punt for conflicts
1161 return '' # punt for conflicts
1162
1162
1163 return func
1163 return func
1164
1164
1165 @propertycache
1165 @propertycache
1166 def _flagfunc(self):
1166 def _flagfunc(self):
1167 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1167 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1168
1168
1169 @propertycache
1169 @propertycache
1170 def _manifest(self):
1170 def _manifest(self):
1171 """generate a manifest corresponding to the values in self._status
1171 """generate a manifest corresponding to the values in self._status
1172
1172
1173 This reuse the file nodeid from parent, but we append an extra letter
1173 This reuse the file nodeid from parent, but we append an extra letter
1174 when modified. Modified files get an extra 'm' while added files get
1174 when modified. Modified files get an extra 'm' while added files get
1175 an extra 'a'. This is used by manifests merge to see that files
1175 an extra 'a'. This is used by manifests merge to see that files
1176 are different and by update logic to avoid deleting newly added files.
1176 are different and by update logic to avoid deleting newly added files.
1177 """
1177 """
1178
1178
1179 man1 = self._parents[0].manifest()
1179 man1 = self._parents[0].manifest()
1180 man = man1.copy()
1180 man = man1.copy()
1181 if len(self._parents) > 1:
1181 if len(self._parents) > 1:
1182 man2 = self.p2().manifest()
1182 man2 = self.p2().manifest()
1183 def getman(f):
1183 def getman(f):
1184 if f in man1:
1184 if f in man1:
1185 return man1
1185 return man1
1186 return man2
1186 return man2
1187 else:
1187 else:
1188 getman = lambda f: man1
1188 getman = lambda f: man1
1189
1189
1190 copied = self._repo.dirstate.copies()
1190 copied = self._repo.dirstate.copies()
1191 ff = self._flagfunc
1191 ff = self._flagfunc
1192 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1192 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1193 for f in l:
1193 for f in l:
1194 orig = copied.get(f, f)
1194 orig = copied.get(f, f)
1195 man[f] = getman(orig).get(orig, nullid) + i
1195 man[f] = getman(orig).get(orig, nullid) + i
1196 try:
1196 try:
1197 man.setflag(f, ff(f))
1197 man.setflag(f, ff(f))
1198 except OSError:
1198 except OSError:
1199 pass
1199 pass
1200
1200
1201 for f in self._status.deleted + self._status.removed:
1201 for f in self._status.deleted + self._status.removed:
1202 if f in man:
1202 if f in man:
1203 del man[f]
1203 del man[f]
1204
1204
1205 return man
1205 return man
1206
1206
1207 @propertycache
1207 @propertycache
1208 def _status(self):
1208 def _status(self):
1209 return self._repo.status()
1209 return self._repo.status()
1210
1210
1211 @propertycache
1211 @propertycache
1212 def _user(self):
1212 def _user(self):
1213 return self._repo.ui.username()
1213 return self._repo.ui.username()
1214
1214
1215 @propertycache
1215 @propertycache
1216 def _date(self):
1216 def _date(self):
1217 return util.makedate()
1217 return util.makedate()
1218
1218
1219 def subrev(self, subpath):
1219 def subrev(self, subpath):
1220 return None
1220 return None
1221
1221
1222 def manifestnode(self):
1222 def manifestnode(self):
1223 return None
1223 return None
1224 def user(self):
1224 def user(self):
1225 return self._user or self._repo.ui.username()
1225 return self._user or self._repo.ui.username()
1226 def date(self):
1226 def date(self):
1227 return self._date
1227 return self._date
1228 def description(self):
1228 def description(self):
1229 return self._text
1229 return self._text
1230 def files(self):
1230 def files(self):
1231 return sorted(self._status.modified + self._status.added +
1231 return sorted(self._status.modified + self._status.added +
1232 self._status.removed)
1232 self._status.removed)
1233
1233
1234 def modified(self):
1234 def modified(self):
1235 return self._status.modified
1235 return self._status.modified
1236 def added(self):
1236 def added(self):
1237 return self._status.added
1237 return self._status.added
1238 def removed(self):
1238 def removed(self):
1239 return self._status.removed
1239 return self._status.removed
1240 def deleted(self):
1240 def deleted(self):
1241 return self._status.deleted
1241 return self._status.deleted
1242 def branch(self):
1242 def branch(self):
1243 return encoding.tolocal(self._extra['branch'])
1243 return encoding.tolocal(self._extra['branch'])
1244 def closesbranch(self):
1244 def closesbranch(self):
1245 return 'close' in self._extra
1245 return 'close' in self._extra
1246 def extra(self):
1246 def extra(self):
1247 return self._extra
1247 return self._extra
1248
1248
1249 def tags(self):
1249 def tags(self):
1250 return []
1250 return []
1251
1251
1252 def bookmarks(self):
1252 def bookmarks(self):
1253 b = []
1253 b = []
1254 for p in self.parents():
1254 for p in self.parents():
1255 b.extend(p.bookmarks())
1255 b.extend(p.bookmarks())
1256 return b
1256 return b
1257
1257
1258 def phase(self):
1258 def phase(self):
1259 phase = phases.draft # default phase to draft
1259 phase = phases.draft # default phase to draft
1260 for p in self.parents():
1260 for p in self.parents():
1261 phase = max(phase, p.phase())
1261 phase = max(phase, p.phase())
1262 return phase
1262 return phase
1263
1263
1264 def hidden(self):
1264 def hidden(self):
1265 return False
1265 return False
1266
1266
1267 def children(self):
1267 def children(self):
1268 return []
1268 return []
1269
1269
1270 def flags(self, path):
1270 def flags(self, path):
1271 if '_manifest' in self.__dict__:
1271 if '_manifest' in self.__dict__:
1272 try:
1272 try:
1273 return self._manifest.flags(path)
1273 return self._manifest.flags(path)
1274 except KeyError:
1274 except KeyError:
1275 return ''
1275 return ''
1276
1276
1277 try:
1277 try:
1278 return self._flagfunc(path)
1278 return self._flagfunc(path)
1279 except OSError:
1279 except OSError:
1280 return ''
1280 return ''
1281
1281
1282 def ancestor(self, c2):
1282 def ancestor(self, c2):
1283 """return the "best" ancestor context of self and c2"""
1283 """return the "best" ancestor context of self and c2"""
1284 return self._parents[0].ancestor(c2) # punt on two parents for now
1284 return self._parents[0].ancestor(c2) # punt on two parents for now
1285
1285
1286 def walk(self, match):
1286 def walk(self, match):
1287 '''Generates matching file names.'''
1287 '''Generates matching file names.'''
1288 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1288 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1289 True, False))
1289 True, False))
1290
1290
1291 def matches(self, match):
1291 def matches(self, match):
1292 return sorted(self._repo.dirstate.matches(match))
1292 return sorted(self._repo.dirstate.matches(match))
1293
1293
1294 def ancestors(self):
1294 def ancestors(self):
1295 for p in self._parents:
1295 for p in self._parents:
1296 yield p
1296 yield p
1297 for a in self._repo.changelog.ancestors(
1297 for a in self._repo.changelog.ancestors(
1298 [p.rev() for p in self._parents]):
1298 [p.rev() for p in self._parents]):
1299 yield changectx(self._repo, a)
1299 yield changectx(self._repo, a)
1300
1300
1301 def markcommitted(self, node):
1301 def markcommitted(self, node):
1302 """Perform post-commit cleanup necessary after committing this ctx
1302 """Perform post-commit cleanup necessary after committing this ctx
1303
1303
1304 Specifically, this updates backing stores this working context
1304 Specifically, this updates backing stores this working context
1305 wraps to reflect the fact that the changes reflected by this
1305 wraps to reflect the fact that the changes reflected by this
1306 workingctx have been committed. For example, it marks
1306 workingctx have been committed. For example, it marks
1307 modified and added files as normal in the dirstate.
1307 modified and added files as normal in the dirstate.
1308
1308
1309 """
1309 """
1310
1310
1311 self._repo.dirstate.beginparentchange()
1311 self._repo.dirstate.beginparentchange()
1312 for f in self.modified() + self.added():
1312 for f in self.modified() + self.added():
1313 self._repo.dirstate.normal(f)
1313 self._repo.dirstate.normal(f)
1314 for f in self.removed():
1314 for f in self.removed():
1315 self._repo.dirstate.drop(f)
1315 self._repo.dirstate.drop(f)
1316 self._repo.dirstate.setparents(node)
1316 self._repo.dirstate.setparents(node)
1317 self._repo.dirstate.endparentchange()
1317 self._repo.dirstate.endparentchange()
1318
1318
1319 class workingctx(committablectx):
1319 class workingctx(committablectx):
1320 """A workingctx object makes access to data related to
1320 """A workingctx object makes access to data related to
1321 the current working directory convenient.
1321 the current working directory convenient.
1322 date - any valid date string or (unixtime, offset), or None.
1322 date - any valid date string or (unixtime, offset), or None.
1323 user - username string, or None.
1323 user - username string, or None.
1324 extra - a dictionary of extra values, or None.
1324 extra - a dictionary of extra values, or None.
1325 changes - a list of file lists as returned by localrepo.status()
1325 changes - a list of file lists as returned by localrepo.status()
1326 or None to use the repository status.
1326 or None to use the repository status.
1327 """
1327 """
1328 def __init__(self, repo, text="", user=None, date=None, extra=None,
1328 def __init__(self, repo, text="", user=None, date=None, extra=None,
1329 changes=None):
1329 changes=None):
1330 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1330 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1331
1331
1332 def __iter__(self):
1332 def __iter__(self):
1333 d = self._repo.dirstate
1333 d = self._repo.dirstate
1334 for f in d:
1334 for f in d:
1335 if d[f] != 'r':
1335 if d[f] != 'r':
1336 yield f
1336 yield f
1337
1337
1338 def __contains__(self, key):
1338 def __contains__(self, key):
1339 return self._repo.dirstate[key] not in "?r"
1339 return self._repo.dirstate[key] not in "?r"
1340
1340
1341 def hex(self):
1341 def hex(self):
1342 return "ff" * 20
1342 return hex(wdirid)
1343
1343
1344 @propertycache
1344 @propertycache
1345 def _parents(self):
1345 def _parents(self):
1346 p = self._repo.dirstate.parents()
1346 p = self._repo.dirstate.parents()
1347 if p[1] == nullid:
1347 if p[1] == nullid:
1348 p = p[:-1]
1348 p = p[:-1]
1349 return [changectx(self._repo, x) for x in p]
1349 return [changectx(self._repo, x) for x in p]
1350
1350
1351 def filectx(self, path, filelog=None):
1351 def filectx(self, path, filelog=None):
1352 """get a file context from the working directory"""
1352 """get a file context from the working directory"""
1353 return workingfilectx(self._repo, path, workingctx=self,
1353 return workingfilectx(self._repo, path, workingctx=self,
1354 filelog=filelog)
1354 filelog=filelog)
1355
1355
1356 def dirty(self, missing=False, merge=True, branch=True):
1356 def dirty(self, missing=False, merge=True, branch=True):
1357 "check whether a working directory is modified"
1357 "check whether a working directory is modified"
1358 # check subrepos first
1358 # check subrepos first
1359 for s in sorted(self.substate):
1359 for s in sorted(self.substate):
1360 if self.sub(s).dirty():
1360 if self.sub(s).dirty():
1361 return True
1361 return True
1362 # check current working dir
1362 # check current working dir
1363 return ((merge and self.p2()) or
1363 return ((merge and self.p2()) or
1364 (branch and self.branch() != self.p1().branch()) or
1364 (branch and self.branch() != self.p1().branch()) or
1365 self.modified() or self.added() or self.removed() or
1365 self.modified() or self.added() or self.removed() or
1366 (missing and self.deleted()))
1366 (missing and self.deleted()))
1367
1367
1368 def add(self, list, prefix=""):
1368 def add(self, list, prefix=""):
1369 join = lambda f: os.path.join(prefix, f)
1369 join = lambda f: os.path.join(prefix, f)
1370 wlock = self._repo.wlock()
1370 wlock = self._repo.wlock()
1371 ui, ds = self._repo.ui, self._repo.dirstate
1371 ui, ds = self._repo.ui, self._repo.dirstate
1372 try:
1372 try:
1373 rejected = []
1373 rejected = []
1374 lstat = self._repo.wvfs.lstat
1374 lstat = self._repo.wvfs.lstat
1375 for f in list:
1375 for f in list:
1376 scmutil.checkportable(ui, join(f))
1376 scmutil.checkportable(ui, join(f))
1377 try:
1377 try:
1378 st = lstat(f)
1378 st = lstat(f)
1379 except OSError:
1379 except OSError:
1380 ui.warn(_("%s does not exist!\n") % join(f))
1380 ui.warn(_("%s does not exist!\n") % join(f))
1381 rejected.append(f)
1381 rejected.append(f)
1382 continue
1382 continue
1383 if st.st_size > 10000000:
1383 if st.st_size > 10000000:
1384 ui.warn(_("%s: up to %d MB of RAM may be required "
1384 ui.warn(_("%s: up to %d MB of RAM may be required "
1385 "to manage this file\n"
1385 "to manage this file\n"
1386 "(use 'hg revert %s' to cancel the "
1386 "(use 'hg revert %s' to cancel the "
1387 "pending addition)\n")
1387 "pending addition)\n")
1388 % (f, 3 * st.st_size // 1000000, join(f)))
1388 % (f, 3 * st.st_size // 1000000, join(f)))
1389 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1389 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1390 ui.warn(_("%s not added: only files and symlinks "
1390 ui.warn(_("%s not added: only files and symlinks "
1391 "supported currently\n") % join(f))
1391 "supported currently\n") % join(f))
1392 rejected.append(f)
1392 rejected.append(f)
1393 elif ds[f] in 'amn':
1393 elif ds[f] in 'amn':
1394 ui.warn(_("%s already tracked!\n") % join(f))
1394 ui.warn(_("%s already tracked!\n") % join(f))
1395 elif ds[f] == 'r':
1395 elif ds[f] == 'r':
1396 ds.normallookup(f)
1396 ds.normallookup(f)
1397 else:
1397 else:
1398 ds.add(f)
1398 ds.add(f)
1399 return rejected
1399 return rejected
1400 finally:
1400 finally:
1401 wlock.release()
1401 wlock.release()
1402
1402
1403 def forget(self, files, prefix=""):
1403 def forget(self, files, prefix=""):
1404 join = lambda f: os.path.join(prefix, f)
1404 join = lambda f: os.path.join(prefix, f)
1405 wlock = self._repo.wlock()
1405 wlock = self._repo.wlock()
1406 try:
1406 try:
1407 rejected = []
1407 rejected = []
1408 for f in files:
1408 for f in files:
1409 if f not in self._repo.dirstate:
1409 if f not in self._repo.dirstate:
1410 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1410 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1411 rejected.append(f)
1411 rejected.append(f)
1412 elif self._repo.dirstate[f] != 'a':
1412 elif self._repo.dirstate[f] != 'a':
1413 self._repo.dirstate.remove(f)
1413 self._repo.dirstate.remove(f)
1414 else:
1414 else:
1415 self._repo.dirstate.drop(f)
1415 self._repo.dirstate.drop(f)
1416 return rejected
1416 return rejected
1417 finally:
1417 finally:
1418 wlock.release()
1418 wlock.release()
1419
1419
1420 def undelete(self, list):
1420 def undelete(self, list):
1421 pctxs = self.parents()
1421 pctxs = self.parents()
1422 wlock = self._repo.wlock()
1422 wlock = self._repo.wlock()
1423 try:
1423 try:
1424 for f in list:
1424 for f in list:
1425 if self._repo.dirstate[f] != 'r':
1425 if self._repo.dirstate[f] != 'r':
1426 self._repo.ui.warn(_("%s not removed!\n") % f)
1426 self._repo.ui.warn(_("%s not removed!\n") % f)
1427 else:
1427 else:
1428 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1428 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1429 t = fctx.data()
1429 t = fctx.data()
1430 self._repo.wwrite(f, t, fctx.flags())
1430 self._repo.wwrite(f, t, fctx.flags())
1431 self._repo.dirstate.normal(f)
1431 self._repo.dirstate.normal(f)
1432 finally:
1432 finally:
1433 wlock.release()
1433 wlock.release()
1434
1434
1435 def copy(self, source, dest):
1435 def copy(self, source, dest):
1436 try:
1436 try:
1437 st = self._repo.wvfs.lstat(dest)
1437 st = self._repo.wvfs.lstat(dest)
1438 except OSError as err:
1438 except OSError as err:
1439 if err.errno != errno.ENOENT:
1439 if err.errno != errno.ENOENT:
1440 raise
1440 raise
1441 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1441 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1442 return
1442 return
1443 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1443 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1444 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1444 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1445 "symbolic link\n") % dest)
1445 "symbolic link\n") % dest)
1446 else:
1446 else:
1447 wlock = self._repo.wlock()
1447 wlock = self._repo.wlock()
1448 try:
1448 try:
1449 if self._repo.dirstate[dest] in '?':
1449 if self._repo.dirstate[dest] in '?':
1450 self._repo.dirstate.add(dest)
1450 self._repo.dirstate.add(dest)
1451 elif self._repo.dirstate[dest] in 'r':
1451 elif self._repo.dirstate[dest] in 'r':
1452 self._repo.dirstate.normallookup(dest)
1452 self._repo.dirstate.normallookup(dest)
1453 self._repo.dirstate.copy(source, dest)
1453 self._repo.dirstate.copy(source, dest)
1454 finally:
1454 finally:
1455 wlock.release()
1455 wlock.release()
1456
1456
1457 def match(self, pats=[], include=None, exclude=None, default='glob',
1457 def match(self, pats=[], include=None, exclude=None, default='glob',
1458 listsubrepos=False, badfn=None):
1458 listsubrepos=False, badfn=None):
1459 r = self._repo
1459 r = self._repo
1460
1460
1461 # Only a case insensitive filesystem needs magic to translate user input
1461 # Only a case insensitive filesystem needs magic to translate user input
1462 # to actual case in the filesystem.
1462 # to actual case in the filesystem.
1463 if not util.checkcase(r.root):
1463 if not util.checkcase(r.root):
1464 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1464 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1465 exclude, default, r.auditor, self,
1465 exclude, default, r.auditor, self,
1466 listsubrepos=listsubrepos,
1466 listsubrepos=listsubrepos,
1467 badfn=badfn)
1467 badfn=badfn)
1468 return matchmod.match(r.root, r.getcwd(), pats,
1468 return matchmod.match(r.root, r.getcwd(), pats,
1469 include, exclude, default,
1469 include, exclude, default,
1470 auditor=r.auditor, ctx=self,
1470 auditor=r.auditor, ctx=self,
1471 listsubrepos=listsubrepos, badfn=badfn)
1471 listsubrepos=listsubrepos, badfn=badfn)
1472
1472
1473 def _filtersuspectsymlink(self, files):
1473 def _filtersuspectsymlink(self, files):
1474 if not files or self._repo.dirstate._checklink:
1474 if not files or self._repo.dirstate._checklink:
1475 return files
1475 return files
1476
1476
1477 # Symlink placeholders may get non-symlink-like contents
1477 # Symlink placeholders may get non-symlink-like contents
1478 # via user error or dereferencing by NFS or Samba servers,
1478 # via user error or dereferencing by NFS or Samba servers,
1479 # so we filter out any placeholders that don't look like a
1479 # so we filter out any placeholders that don't look like a
1480 # symlink
1480 # symlink
1481 sane = []
1481 sane = []
1482 for f in files:
1482 for f in files:
1483 if self.flags(f) == 'l':
1483 if self.flags(f) == 'l':
1484 d = self[f].data()
1484 d = self[f].data()
1485 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1485 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1486 self._repo.ui.debug('ignoring suspect symlink placeholder'
1486 self._repo.ui.debug('ignoring suspect symlink placeholder'
1487 ' "%s"\n' % f)
1487 ' "%s"\n' % f)
1488 continue
1488 continue
1489 sane.append(f)
1489 sane.append(f)
1490 return sane
1490 return sane
1491
1491
1492 def _checklookup(self, files):
1492 def _checklookup(self, files):
1493 # check for any possibly clean files
1493 # check for any possibly clean files
1494 if not files:
1494 if not files:
1495 return [], []
1495 return [], []
1496
1496
1497 modified = []
1497 modified = []
1498 fixup = []
1498 fixup = []
1499 pctx = self._parents[0]
1499 pctx = self._parents[0]
1500 # do a full compare of any files that might have changed
1500 # do a full compare of any files that might have changed
1501 for f in sorted(files):
1501 for f in sorted(files):
1502 if (f not in pctx or self.flags(f) != pctx.flags(f)
1502 if (f not in pctx or self.flags(f) != pctx.flags(f)
1503 or pctx[f].cmp(self[f])):
1503 or pctx[f].cmp(self[f])):
1504 modified.append(f)
1504 modified.append(f)
1505 else:
1505 else:
1506 fixup.append(f)
1506 fixup.append(f)
1507
1507
1508 # update dirstate for files that are actually clean
1508 # update dirstate for files that are actually clean
1509 if fixup:
1509 if fixup:
1510 try:
1510 try:
1511 # updating the dirstate is optional
1511 # updating the dirstate is optional
1512 # so we don't wait on the lock
1512 # so we don't wait on the lock
1513 # wlock can invalidate the dirstate, so cache normal _after_
1513 # wlock can invalidate the dirstate, so cache normal _after_
1514 # taking the lock
1514 # taking the lock
1515 wlock = self._repo.wlock(False)
1515 wlock = self._repo.wlock(False)
1516 normal = self._repo.dirstate.normal
1516 normal = self._repo.dirstate.normal
1517 try:
1517 try:
1518 for f in fixup:
1518 for f in fixup:
1519 normal(f)
1519 normal(f)
1520 finally:
1520 finally:
1521 wlock.release()
1521 wlock.release()
1522 except error.LockError:
1522 except error.LockError:
1523 pass
1523 pass
1524 return modified, fixup
1524 return modified, fixup
1525
1525
1526 def _manifestmatches(self, match, s):
1526 def _manifestmatches(self, match, s):
1527 """Slow path for workingctx
1527 """Slow path for workingctx
1528
1528
1529 The fast path is when we compare the working directory to its parent
1529 The fast path is when we compare the working directory to its parent
1530 which means this function is comparing with a non-parent; therefore we
1530 which means this function is comparing with a non-parent; therefore we
1531 need to build a manifest and return what matches.
1531 need to build a manifest and return what matches.
1532 """
1532 """
1533 mf = self._repo['.']._manifestmatches(match, s)
1533 mf = self._repo['.']._manifestmatches(match, s)
1534 for f in s.modified + s.added:
1534 for f in s.modified + s.added:
1535 mf[f] = _newnode
1535 mf[f] = _newnode
1536 mf.setflag(f, self.flags(f))
1536 mf.setflag(f, self.flags(f))
1537 for f in s.removed:
1537 for f in s.removed:
1538 if f in mf:
1538 if f in mf:
1539 del mf[f]
1539 del mf[f]
1540 return mf
1540 return mf
1541
1541
1542 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1542 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1543 unknown=False):
1543 unknown=False):
1544 '''Gets the status from the dirstate -- internal use only.'''
1544 '''Gets the status from the dirstate -- internal use only.'''
1545 listignored, listclean, listunknown = ignored, clean, unknown
1545 listignored, listclean, listunknown = ignored, clean, unknown
1546 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1546 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1547 subrepos = []
1547 subrepos = []
1548 if '.hgsub' in self:
1548 if '.hgsub' in self:
1549 subrepos = sorted(self.substate)
1549 subrepos = sorted(self.substate)
1550 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1550 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1551 listclean, listunknown)
1551 listclean, listunknown)
1552
1552
1553 # check for any possibly clean files
1553 # check for any possibly clean files
1554 if cmp:
1554 if cmp:
1555 modified2, fixup = self._checklookup(cmp)
1555 modified2, fixup = self._checklookup(cmp)
1556 s.modified.extend(modified2)
1556 s.modified.extend(modified2)
1557
1557
1558 # update dirstate for files that are actually clean
1558 # update dirstate for files that are actually clean
1559 if fixup and listclean:
1559 if fixup and listclean:
1560 s.clean.extend(fixup)
1560 s.clean.extend(fixup)
1561
1561
1562 if match.always():
1562 if match.always():
1563 # cache for performance
1563 # cache for performance
1564 if s.unknown or s.ignored or s.clean:
1564 if s.unknown or s.ignored or s.clean:
1565 # "_status" is cached with list*=False in the normal route
1565 # "_status" is cached with list*=False in the normal route
1566 self._status = scmutil.status(s.modified, s.added, s.removed,
1566 self._status = scmutil.status(s.modified, s.added, s.removed,
1567 s.deleted, [], [], [])
1567 s.deleted, [], [], [])
1568 else:
1568 else:
1569 self._status = s
1569 self._status = s
1570
1570
1571 return s
1571 return s
1572
1572
1573 def _buildstatus(self, other, s, match, listignored, listclean,
1573 def _buildstatus(self, other, s, match, listignored, listclean,
1574 listunknown):
1574 listunknown):
1575 """build a status with respect to another context
1575 """build a status with respect to another context
1576
1576
1577 This includes logic for maintaining the fast path of status when
1577 This includes logic for maintaining the fast path of status when
1578 comparing the working directory against its parent, which is to skip
1578 comparing the working directory against its parent, which is to skip
1579 building a new manifest if self (working directory) is not comparing
1579 building a new manifest if self (working directory) is not comparing
1580 against its parent (repo['.']).
1580 against its parent (repo['.']).
1581 """
1581 """
1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1584 # might have accidentally ended up with the entire contents of the file
1584 # might have accidentally ended up with the entire contents of the file
1585 # they are supposed to be linking to.
1585 # they are supposed to be linking to.
1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1587 if other != self._repo['.']:
1587 if other != self._repo['.']:
1588 s = super(workingctx, self)._buildstatus(other, s, match,
1588 s = super(workingctx, self)._buildstatus(other, s, match,
1589 listignored, listclean,
1589 listignored, listclean,
1590 listunknown)
1590 listunknown)
1591 return s
1591 return s
1592
1592
1593 def _matchstatus(self, other, match):
1593 def _matchstatus(self, other, match):
1594 """override the match method with a filter for directory patterns
1594 """override the match method with a filter for directory patterns
1595
1595
1596 We use inheritance to customize the match.bad method only in cases of
1596 We use inheritance to customize the match.bad method only in cases of
1597 workingctx since it belongs only to the working directory when
1597 workingctx since it belongs only to the working directory when
1598 comparing against the parent changeset.
1598 comparing against the parent changeset.
1599
1599
1600 If we aren't comparing against the working directory's parent, then we
1600 If we aren't comparing against the working directory's parent, then we
1601 just use the default match object sent to us.
1601 just use the default match object sent to us.
1602 """
1602 """
1603 superself = super(workingctx, self)
1603 superself = super(workingctx, self)
1604 match = superself._matchstatus(other, match)
1604 match = superself._matchstatus(other, match)
1605 if other != self._repo['.']:
1605 if other != self._repo['.']:
1606 def bad(f, msg):
1606 def bad(f, msg):
1607 # 'f' may be a directory pattern from 'match.files()',
1607 # 'f' may be a directory pattern from 'match.files()',
1608 # so 'f not in ctx1' is not enough
1608 # so 'f not in ctx1' is not enough
1609 if f not in other and not other.hasdir(f):
1609 if f not in other and not other.hasdir(f):
1610 self._repo.ui.warn('%s: %s\n' %
1610 self._repo.ui.warn('%s: %s\n' %
1611 (self._repo.dirstate.pathto(f), msg))
1611 (self._repo.dirstate.pathto(f), msg))
1612 match.bad = bad
1612 match.bad = bad
1613 return match
1613 return match
1614
1614
1615 class committablefilectx(basefilectx):
1615 class committablefilectx(basefilectx):
1616 """A committablefilectx provides common functionality for a file context
1616 """A committablefilectx provides common functionality for a file context
1617 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1617 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1618 def __init__(self, repo, path, filelog=None, ctx=None):
1618 def __init__(self, repo, path, filelog=None, ctx=None):
1619 self._repo = repo
1619 self._repo = repo
1620 self._path = path
1620 self._path = path
1621 self._changeid = None
1621 self._changeid = None
1622 self._filerev = self._filenode = None
1622 self._filerev = self._filenode = None
1623
1623
1624 if filelog is not None:
1624 if filelog is not None:
1625 self._filelog = filelog
1625 self._filelog = filelog
1626 if ctx:
1626 if ctx:
1627 self._changectx = ctx
1627 self._changectx = ctx
1628
1628
1629 def __nonzero__(self):
1629 def __nonzero__(self):
1630 return True
1630 return True
1631
1631
1632 def linkrev(self):
1632 def linkrev(self):
1633 # linked to self._changectx no matter if file is modified or not
1633 # linked to self._changectx no matter if file is modified or not
1634 return self.rev()
1634 return self.rev()
1635
1635
1636 def parents(self):
1636 def parents(self):
1637 '''return parent filectxs, following copies if necessary'''
1637 '''return parent filectxs, following copies if necessary'''
1638 def filenode(ctx, path):
1638 def filenode(ctx, path):
1639 return ctx._manifest.get(path, nullid)
1639 return ctx._manifest.get(path, nullid)
1640
1640
1641 path = self._path
1641 path = self._path
1642 fl = self._filelog
1642 fl = self._filelog
1643 pcl = self._changectx._parents
1643 pcl = self._changectx._parents
1644 renamed = self.renamed()
1644 renamed = self.renamed()
1645
1645
1646 if renamed:
1646 if renamed:
1647 pl = [renamed + (None,)]
1647 pl = [renamed + (None,)]
1648 else:
1648 else:
1649 pl = [(path, filenode(pcl[0], path), fl)]
1649 pl = [(path, filenode(pcl[0], path), fl)]
1650
1650
1651 for pc in pcl[1:]:
1651 for pc in pcl[1:]:
1652 pl.append((path, filenode(pc, path), fl))
1652 pl.append((path, filenode(pc, path), fl))
1653
1653
1654 return [self._parentfilectx(p, fileid=n, filelog=l)
1654 return [self._parentfilectx(p, fileid=n, filelog=l)
1655 for p, n, l in pl if n != nullid]
1655 for p, n, l in pl if n != nullid]
1656
1656
1657 def children(self):
1657 def children(self):
1658 return []
1658 return []
1659
1659
1660 class workingfilectx(committablefilectx):
1660 class workingfilectx(committablefilectx):
1661 """A workingfilectx object makes access to data related to a particular
1661 """A workingfilectx object makes access to data related to a particular
1662 file in the working directory convenient."""
1662 file in the working directory convenient."""
1663 def __init__(self, repo, path, filelog=None, workingctx=None):
1663 def __init__(self, repo, path, filelog=None, workingctx=None):
1664 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1664 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1665
1665
1666 @propertycache
1666 @propertycache
1667 def _changectx(self):
1667 def _changectx(self):
1668 return workingctx(self._repo)
1668 return workingctx(self._repo)
1669
1669
1670 def data(self):
1670 def data(self):
1671 return self._repo.wread(self._path)
1671 return self._repo.wread(self._path)
1672 def renamed(self):
1672 def renamed(self):
1673 rp = self._repo.dirstate.copied(self._path)
1673 rp = self._repo.dirstate.copied(self._path)
1674 if not rp:
1674 if not rp:
1675 return None
1675 return None
1676 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1676 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1677
1677
1678 def size(self):
1678 def size(self):
1679 return self._repo.wvfs.lstat(self._path).st_size
1679 return self._repo.wvfs.lstat(self._path).st_size
1680 def date(self):
1680 def date(self):
1681 t, tz = self._changectx.date()
1681 t, tz = self._changectx.date()
1682 try:
1682 try:
1683 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1683 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1684 except OSError as err:
1684 except OSError as err:
1685 if err.errno != errno.ENOENT:
1685 if err.errno != errno.ENOENT:
1686 raise
1686 raise
1687 return (t, tz)
1687 return (t, tz)
1688
1688
1689 def cmp(self, fctx):
1689 def cmp(self, fctx):
1690 """compare with other file context
1690 """compare with other file context
1691
1691
1692 returns True if different than fctx.
1692 returns True if different than fctx.
1693 """
1693 """
1694 # fctx should be a filectx (not a workingfilectx)
1694 # fctx should be a filectx (not a workingfilectx)
1695 # invert comparison to reuse the same code path
1695 # invert comparison to reuse the same code path
1696 return fctx.cmp(self)
1696 return fctx.cmp(self)
1697
1697
1698 def remove(self, ignoremissing=False):
1698 def remove(self, ignoremissing=False):
1699 """wraps unlink for a repo's working directory"""
1699 """wraps unlink for a repo's working directory"""
1700 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1700 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1701
1701
1702 def write(self, data, flags):
1702 def write(self, data, flags):
1703 """wraps repo.wwrite"""
1703 """wraps repo.wwrite"""
1704 self._repo.wwrite(self._path, data, flags)
1704 self._repo.wwrite(self._path, data, flags)
1705
1705
1706 class workingcommitctx(workingctx):
1706 class workingcommitctx(workingctx):
1707 """A workingcommitctx object makes access to data related to
1707 """A workingcommitctx object makes access to data related to
1708 the revision being committed convenient.
1708 the revision being committed convenient.
1709
1709
1710 This hides changes in the working directory, if they aren't
1710 This hides changes in the working directory, if they aren't
1711 committed in this context.
1711 committed in this context.
1712 """
1712 """
1713 def __init__(self, repo, changes,
1713 def __init__(self, repo, changes,
1714 text="", user=None, date=None, extra=None):
1714 text="", user=None, date=None, extra=None):
1715 super(workingctx, self).__init__(repo, text, user, date, extra,
1715 super(workingctx, self).__init__(repo, text, user, date, extra,
1716 changes)
1716 changes)
1717
1717
1718 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1718 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1719 unknown=False):
1719 unknown=False):
1720 """Return matched files only in ``self._status``
1720 """Return matched files only in ``self._status``
1721
1721
1722 Uncommitted files appear "clean" via this context, even if
1722 Uncommitted files appear "clean" via this context, even if
1723 they aren't actually so in the working directory.
1723 they aren't actually so in the working directory.
1724 """
1724 """
1725 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1725 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1726 if clean:
1726 if clean:
1727 clean = [f for f in self._manifest if f not in self._changedset]
1727 clean = [f for f in self._manifest if f not in self._changedset]
1728 else:
1728 else:
1729 clean = []
1729 clean = []
1730 return scmutil.status([f for f in self._status.modified if match(f)],
1730 return scmutil.status([f for f in self._status.modified if match(f)],
1731 [f for f in self._status.added if match(f)],
1731 [f for f in self._status.added if match(f)],
1732 [f for f in self._status.removed if match(f)],
1732 [f for f in self._status.removed if match(f)],
1733 [], [], [], clean)
1733 [], [], [], clean)
1734
1734
1735 @propertycache
1735 @propertycache
1736 def _changedset(self):
1736 def _changedset(self):
1737 """Return the set of files changed in this context
1737 """Return the set of files changed in this context
1738 """
1738 """
1739 changed = set(self._status.modified)
1739 changed = set(self._status.modified)
1740 changed.update(self._status.added)
1740 changed.update(self._status.added)
1741 changed.update(self._status.removed)
1741 changed.update(self._status.removed)
1742 return changed
1742 return changed
1743
1743
1744 class memctx(committablectx):
1744 class memctx(committablectx):
1745 """Use memctx to perform in-memory commits via localrepo.commitctx().
1745 """Use memctx to perform in-memory commits via localrepo.commitctx().
1746
1746
1747 Revision information is supplied at initialization time while
1747 Revision information is supplied at initialization time while
1748 related files data and is made available through a callback
1748 related files data and is made available through a callback
1749 mechanism. 'repo' is the current localrepo, 'parents' is a
1749 mechanism. 'repo' is the current localrepo, 'parents' is a
1750 sequence of two parent revisions identifiers (pass None for every
1750 sequence of two parent revisions identifiers (pass None for every
1751 missing parent), 'text' is the commit message and 'files' lists
1751 missing parent), 'text' is the commit message and 'files' lists
1752 names of files touched by the revision (normalized and relative to
1752 names of files touched by the revision (normalized and relative to
1753 repository root).
1753 repository root).
1754
1754
1755 filectxfn(repo, memctx, path) is a callable receiving the
1755 filectxfn(repo, memctx, path) is a callable receiving the
1756 repository, the current memctx object and the normalized path of
1756 repository, the current memctx object and the normalized path of
1757 requested file, relative to repository root. It is fired by the
1757 requested file, relative to repository root. It is fired by the
1758 commit function for every file in 'files', but calls order is
1758 commit function for every file in 'files', but calls order is
1759 undefined. If the file is available in the revision being
1759 undefined. If the file is available in the revision being
1760 committed (updated or added), filectxfn returns a memfilectx
1760 committed (updated or added), filectxfn returns a memfilectx
1761 object. If the file was removed, filectxfn raises an
1761 object. If the file was removed, filectxfn raises an
1762 IOError. Moved files are represented by marking the source file
1762 IOError. Moved files are represented by marking the source file
1763 removed and the new file added with copy information (see
1763 removed and the new file added with copy information (see
1764 memfilectx).
1764 memfilectx).
1765
1765
1766 user receives the committer name and defaults to current
1766 user receives the committer name and defaults to current
1767 repository username, date is the commit date in any format
1767 repository username, date is the commit date in any format
1768 supported by util.parsedate() and defaults to current date, extra
1768 supported by util.parsedate() and defaults to current date, extra
1769 is a dictionary of metadata or is left empty.
1769 is a dictionary of metadata or is left empty.
1770 """
1770 """
1771
1771
1772 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1772 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1773 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1773 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1774 # this field to determine what to do in filectxfn.
1774 # this field to determine what to do in filectxfn.
1775 _returnnoneformissingfiles = True
1775 _returnnoneformissingfiles = True
1776
1776
1777 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1777 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1778 date=None, extra=None, editor=False):
1778 date=None, extra=None, editor=False):
1779 super(memctx, self).__init__(repo, text, user, date, extra)
1779 super(memctx, self).__init__(repo, text, user, date, extra)
1780 self._rev = None
1780 self._rev = None
1781 self._node = None
1781 self._node = None
1782 parents = [(p or nullid) for p in parents]
1782 parents = [(p or nullid) for p in parents]
1783 p1, p2 = parents
1783 p1, p2 = parents
1784 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1784 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1785 files = sorted(set(files))
1785 files = sorted(set(files))
1786 self._files = files
1786 self._files = files
1787 self.substate = {}
1787 self.substate = {}
1788
1788
1789 # if store is not callable, wrap it in a function
1789 # if store is not callable, wrap it in a function
1790 if not callable(filectxfn):
1790 if not callable(filectxfn):
1791 def getfilectx(repo, memctx, path):
1791 def getfilectx(repo, memctx, path):
1792 fctx = filectxfn[path]
1792 fctx = filectxfn[path]
1793 # this is weird but apparently we only keep track of one parent
1793 # this is weird but apparently we only keep track of one parent
1794 # (why not only store that instead of a tuple?)
1794 # (why not only store that instead of a tuple?)
1795 copied = fctx.renamed()
1795 copied = fctx.renamed()
1796 if copied:
1796 if copied:
1797 copied = copied[0]
1797 copied = copied[0]
1798 return memfilectx(repo, path, fctx.data(),
1798 return memfilectx(repo, path, fctx.data(),
1799 islink=fctx.islink(), isexec=fctx.isexec(),
1799 islink=fctx.islink(), isexec=fctx.isexec(),
1800 copied=copied, memctx=memctx)
1800 copied=copied, memctx=memctx)
1801 self._filectxfn = getfilectx
1801 self._filectxfn = getfilectx
1802 else:
1802 else:
1803 # "util.cachefunc" reduces invocation of possibly expensive
1803 # "util.cachefunc" reduces invocation of possibly expensive
1804 # "filectxfn" for performance (e.g. converting from another VCS)
1804 # "filectxfn" for performance (e.g. converting from another VCS)
1805 self._filectxfn = util.cachefunc(filectxfn)
1805 self._filectxfn = util.cachefunc(filectxfn)
1806
1806
1807 if extra:
1807 if extra:
1808 self._extra = extra.copy()
1808 self._extra = extra.copy()
1809 else:
1809 else:
1810 self._extra = {}
1810 self._extra = {}
1811
1811
1812 if self._extra.get('branch', '') == '':
1812 if self._extra.get('branch', '') == '':
1813 self._extra['branch'] = 'default'
1813 self._extra['branch'] = 'default'
1814
1814
1815 if editor:
1815 if editor:
1816 self._text = editor(self._repo, self, [])
1816 self._text = editor(self._repo, self, [])
1817 self._repo.savecommitmessage(self._text)
1817 self._repo.savecommitmessage(self._text)
1818
1818
1819 def filectx(self, path, filelog=None):
1819 def filectx(self, path, filelog=None):
1820 """get a file context from the working directory
1820 """get a file context from the working directory
1821
1821
1822 Returns None if file doesn't exist and should be removed."""
1822 Returns None if file doesn't exist and should be removed."""
1823 return self._filectxfn(self._repo, self, path)
1823 return self._filectxfn(self._repo, self, path)
1824
1824
1825 def commit(self):
1825 def commit(self):
1826 """commit context to the repo"""
1826 """commit context to the repo"""
1827 return self._repo.commitctx(self)
1827 return self._repo.commitctx(self)
1828
1828
1829 @propertycache
1829 @propertycache
1830 def _manifest(self):
1830 def _manifest(self):
1831 """generate a manifest based on the return values of filectxfn"""
1831 """generate a manifest based on the return values of filectxfn"""
1832
1832
1833 # keep this simple for now; just worry about p1
1833 # keep this simple for now; just worry about p1
1834 pctx = self._parents[0]
1834 pctx = self._parents[0]
1835 man = pctx.manifest().copy()
1835 man = pctx.manifest().copy()
1836
1836
1837 for f in self._status.modified:
1837 for f in self._status.modified:
1838 p1node = nullid
1838 p1node = nullid
1839 p2node = nullid
1839 p2node = nullid
1840 p = pctx[f].parents() # if file isn't in pctx, check p2?
1840 p = pctx[f].parents() # if file isn't in pctx, check p2?
1841 if len(p) > 0:
1841 if len(p) > 0:
1842 p1node = p[0].node()
1842 p1node = p[0].node()
1843 if len(p) > 1:
1843 if len(p) > 1:
1844 p2node = p[1].node()
1844 p2node = p[1].node()
1845 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1845 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1846
1846
1847 for f in self._status.added:
1847 for f in self._status.added:
1848 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1848 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1849
1849
1850 for f in self._status.removed:
1850 for f in self._status.removed:
1851 if f in man:
1851 if f in man:
1852 del man[f]
1852 del man[f]
1853
1853
1854 return man
1854 return man
1855
1855
1856 @propertycache
1856 @propertycache
1857 def _status(self):
1857 def _status(self):
1858 """Calculate exact status from ``files`` specified at construction
1858 """Calculate exact status from ``files`` specified at construction
1859 """
1859 """
1860 man1 = self.p1().manifest()
1860 man1 = self.p1().manifest()
1861 p2 = self._parents[1]
1861 p2 = self._parents[1]
1862 # "1 < len(self._parents)" can't be used for checking
1862 # "1 < len(self._parents)" can't be used for checking
1863 # existence of the 2nd parent, because "memctx._parents" is
1863 # existence of the 2nd parent, because "memctx._parents" is
1864 # explicitly initialized by the list, of which length is 2.
1864 # explicitly initialized by the list, of which length is 2.
1865 if p2.node() != nullid:
1865 if p2.node() != nullid:
1866 man2 = p2.manifest()
1866 man2 = p2.manifest()
1867 managing = lambda f: f in man1 or f in man2
1867 managing = lambda f: f in man1 or f in man2
1868 else:
1868 else:
1869 managing = lambda f: f in man1
1869 managing = lambda f: f in man1
1870
1870
1871 modified, added, removed = [], [], []
1871 modified, added, removed = [], [], []
1872 for f in self._files:
1872 for f in self._files:
1873 if not managing(f):
1873 if not managing(f):
1874 added.append(f)
1874 added.append(f)
1875 elif self[f]:
1875 elif self[f]:
1876 modified.append(f)
1876 modified.append(f)
1877 else:
1877 else:
1878 removed.append(f)
1878 removed.append(f)
1879
1879
1880 return scmutil.status(modified, added, removed, [], [], [], [])
1880 return scmutil.status(modified, added, removed, [], [], [], [])
1881
1881
1882 class memfilectx(committablefilectx):
1882 class memfilectx(committablefilectx):
1883 """memfilectx represents an in-memory file to commit.
1883 """memfilectx represents an in-memory file to commit.
1884
1884
1885 See memctx and committablefilectx for more details.
1885 See memctx and committablefilectx for more details.
1886 """
1886 """
1887 def __init__(self, repo, path, data, islink=False,
1887 def __init__(self, repo, path, data, islink=False,
1888 isexec=False, copied=None, memctx=None):
1888 isexec=False, copied=None, memctx=None):
1889 """
1889 """
1890 path is the normalized file path relative to repository root.
1890 path is the normalized file path relative to repository root.
1891 data is the file content as a string.
1891 data is the file content as a string.
1892 islink is True if the file is a symbolic link.
1892 islink is True if the file is a symbolic link.
1893 isexec is True if the file is executable.
1893 isexec is True if the file is executable.
1894 copied is the source file path if current file was copied in the
1894 copied is the source file path if current file was copied in the
1895 revision being committed, or None."""
1895 revision being committed, or None."""
1896 super(memfilectx, self).__init__(repo, path, None, memctx)
1896 super(memfilectx, self).__init__(repo, path, None, memctx)
1897 self._data = data
1897 self._data = data
1898 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1898 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1899 self._copied = None
1899 self._copied = None
1900 if copied:
1900 if copied:
1901 self._copied = (copied, nullid)
1901 self._copied = (copied, nullid)
1902
1902
1903 def data(self):
1903 def data(self):
1904 return self._data
1904 return self._data
1905 def size(self):
1905 def size(self):
1906 return len(self.data())
1906 return len(self.data())
1907 def flags(self):
1907 def flags(self):
1908 return self._flags
1908 return self._flags
1909 def renamed(self):
1909 def renamed(self):
1910 return self._copied
1910 return self._copied
1911
1911
1912 def remove(self, ignoremissing=False):
1912 def remove(self, ignoremissing=False):
1913 """wraps unlink for a repo's working directory"""
1913 """wraps unlink for a repo's working directory"""
1914 # need to figure out what to do here
1914 # need to figure out what to do here
1915 del self._changectx[self._path]
1915 del self._changectx[self._path]
1916
1916
1917 def write(self, data, flags):
1917 def write(self, data, flags):
1918 """wraps repo.wwrite"""
1918 """wraps repo.wwrite"""
1919 self._data = data
1919 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now