##// END OF EJS Templates
context: replace match.bad() monkey patching with match.badmatch()...
Matt Harbison -
r25435:a592a6a6 default
parent child Browse files
Show More
@@ -1,1913 +1,1911 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import copy, os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 return iter(self._manifest)
69 return iter(self._manifest)
70
70
71 def _manifestmatches(self, match, s):
71 def _manifestmatches(self, match, s):
72 """generate a new manifest filtered by the match argument
72 """generate a new manifest filtered by the match argument
73
73
74 This method is for internal use only and mainly exists to provide an
74 This method is for internal use only and mainly exists to provide an
75 object oriented way for other contexts to customize the manifest
75 object oriented way for other contexts to customize the manifest
76 generation.
76 generation.
77 """
77 """
78 return self.manifest().matches(match)
78 return self.manifest().matches(match)
79
79
80 def _matchstatus(self, other, match):
80 def _matchstatus(self, other, match):
81 """return match.always if match is none
81 """return match.always if match is none
82
82
83 This internal method provides a way for child objects to override the
83 This internal method provides a way for child objects to override the
84 match operator.
84 match operator.
85 """
85 """
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87
87
88 def _buildstatus(self, other, s, match, listignored, listclean,
88 def _buildstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """build a status with respect to another context"""
90 """build a status with respect to another context"""
91 # Load earliest manifest first for caching reasons. More specifically,
91 # Load earliest manifest first for caching reasons. More specifically,
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # 1000 and cache it so that when you read 1001, we just need to apply a
94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # delta to what's in the cache. So that's one full reconstruction + one
95 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta application.
96 # delta application.
97 if self.rev() is not None and self.rev() < other.rev():
97 if self.rev() is not None and self.rev() < other.rev():
98 self.manifest()
98 self.manifest()
99 mf1 = other._manifestmatches(match, s)
99 mf1 = other._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
101
101
102 modified, added = [], []
102 modified, added = [], []
103 removed = []
103 removed = []
104 clean = []
104 clean = []
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deletedset = set(deleted)
106 deletedset = set(deleted)
107 d = mf1.diff(mf2, clean=listclean)
107 d = mf1.diff(mf2, clean=listclean)
108 for fn, value in d.iteritems():
108 for fn, value in d.iteritems():
109 if fn in deletedset:
109 if fn in deletedset:
110 continue
110 continue
111 if value is None:
111 if value is None:
112 clean.append(fn)
112 clean.append(fn)
113 continue
113 continue
114 (node1, flag1), (node2, flag2) = value
114 (node1, flag1), (node2, flag2) = value
115 if node1 is None:
115 if node1 is None:
116 added.append(fn)
116 added.append(fn)
117 elif node2 is None:
117 elif node2 is None:
118 removed.append(fn)
118 removed.append(fn)
119 elif node2 != _newnode:
119 elif node2 != _newnode:
120 # The file was not a new file in mf2, so an entry
120 # The file was not a new file in mf2, so an entry
121 # from diff is really a difference.
121 # from diff is really a difference.
122 modified.append(fn)
122 modified.append(fn)
123 elif self[fn].cmp(other[fn]):
123 elif self[fn].cmp(other[fn]):
124 # node2 was newnode, but the working file doesn't
124 # node2 was newnode, but the working file doesn't
125 # match the one in mf1.
125 # match the one in mf1.
126 modified.append(fn)
126 modified.append(fn)
127 else:
127 else:
128 clean.append(fn)
128 clean.append(fn)
129
129
130 if removed:
130 if removed:
131 # need to filter files if they are already reported as removed
131 # need to filter files if they are already reported as removed
132 unknown = [fn for fn in unknown if fn not in mf1]
132 unknown = [fn for fn in unknown if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
134 # if they're deleted, don't report them as removed
134 # if they're deleted, don't report them as removed
135 removed = [fn for fn in removed if fn not in deletedset]
135 removed = [fn for fn in removed if fn not in deletedset]
136
136
137 return scmutil.status(modified, added, removed, deleted, unknown,
137 return scmutil.status(modified, added, removed, deleted, unknown,
138 ignored, clean)
138 ignored, clean)
139
139
140 @propertycache
140 @propertycache
141 def substate(self):
141 def substate(self):
142 return subrepo.state(self, self._repo.ui)
142 return subrepo.state(self, self._repo.ui)
143
143
144 def subrev(self, subpath):
144 def subrev(self, subpath):
145 return self.substate[subpath][1]
145 return self.substate[subpath][1]
146
146
147 def rev(self):
147 def rev(self):
148 return self._rev
148 return self._rev
149 def node(self):
149 def node(self):
150 return self._node
150 return self._node
151 def hex(self):
151 def hex(self):
152 return hex(self.node())
152 return hex(self.node())
153 def manifest(self):
153 def manifest(self):
154 return self._manifest
154 return self._manifest
155 def repo(self):
155 def repo(self):
156 return self._repo
156 return self._repo
157 def phasestr(self):
157 def phasestr(self):
158 return phases.phasenames[self.phase()]
158 return phases.phasenames[self.phase()]
159 def mutable(self):
159 def mutable(self):
160 return self.phase() > phases.public
160 return self.phase() > phases.public
161
161
162 def getfileset(self, expr):
162 def getfileset(self, expr):
163 return fileset.getfileset(self, expr)
163 return fileset.getfileset(self, expr)
164
164
165 def obsolete(self):
165 def obsolete(self):
166 """True if the changeset is obsolete"""
166 """True if the changeset is obsolete"""
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168
168
169 def extinct(self):
169 def extinct(self):
170 """True if the changeset is extinct"""
170 """True if the changeset is extinct"""
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172
172
173 def unstable(self):
173 def unstable(self):
174 """True if the changeset is not obsolete but it's ancestor are"""
174 """True if the changeset is not obsolete but it's ancestor are"""
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176
176
177 def bumped(self):
177 def bumped(self):
178 """True if the changeset try to be a successor of a public changeset
178 """True if the changeset try to be a successor of a public changeset
179
179
180 Only non-public and non-obsolete changesets may be bumped.
180 Only non-public and non-obsolete changesets may be bumped.
181 """
181 """
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183
183
184 def divergent(self):
184 def divergent(self):
185 """Is a successors of a changeset with multiple possible successors set
185 """Is a successors of a changeset with multiple possible successors set
186
186
187 Only non-public and non-obsolete changesets may be divergent.
187 Only non-public and non-obsolete changesets may be divergent.
188 """
188 """
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190
190
191 def troubled(self):
191 def troubled(self):
192 """True if the changeset is either unstable, bumped or divergent"""
192 """True if the changeset is either unstable, bumped or divergent"""
193 return self.unstable() or self.bumped() or self.divergent()
193 return self.unstable() or self.bumped() or self.divergent()
194
194
195 def troubles(self):
195 def troubles(self):
196 """return the list of troubles affecting this changesets.
196 """return the list of troubles affecting this changesets.
197
197
198 Troubles are returned as strings. possible values are:
198 Troubles are returned as strings. possible values are:
199 - unstable,
199 - unstable,
200 - bumped,
200 - bumped,
201 - divergent.
201 - divergent.
202 """
202 """
203 troubles = []
203 troubles = []
204 if self.unstable():
204 if self.unstable():
205 troubles.append('unstable')
205 troubles.append('unstable')
206 if self.bumped():
206 if self.bumped():
207 troubles.append('bumped')
207 troubles.append('bumped')
208 if self.divergent():
208 if self.divergent():
209 troubles.append('divergent')
209 troubles.append('divergent')
210 return troubles
210 return troubles
211
211
212 def parents(self):
212 def parents(self):
213 """return contexts for each parent changeset"""
213 """return contexts for each parent changeset"""
214 return self._parents
214 return self._parents
215
215
216 def p1(self):
216 def p1(self):
217 return self._parents[0]
217 return self._parents[0]
218
218
219 def p2(self):
219 def p2(self):
220 if len(self._parents) == 2:
220 if len(self._parents) == 2:
221 return self._parents[1]
221 return self._parents[1]
222 return changectx(self._repo, -1)
222 return changectx(self._repo, -1)
223
223
224 def _fileinfo(self, path):
224 def _fileinfo(self, path):
225 if '_manifest' in self.__dict__:
225 if '_manifest' in self.__dict__:
226 try:
226 try:
227 return self._manifest[path], self._manifest.flags(path)
227 return self._manifest[path], self._manifest.flags(path)
228 except KeyError:
228 except KeyError:
229 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
230 _('not found in manifest'))
230 _('not found in manifest'))
231 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 if path in self._manifestdelta:
232 if path in self._manifestdelta:
233 return (self._manifestdelta[path],
233 return (self._manifestdelta[path],
234 self._manifestdelta.flags(path))
234 self._manifestdelta.flags(path))
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 if not node:
236 if not node:
237 raise error.ManifestLookupError(self._node, path,
237 raise error.ManifestLookupError(self._node, path,
238 _('not found in manifest'))
238 _('not found in manifest'))
239
239
240 return node, flag
240 return node, flag
241
241
242 def filenode(self, path):
242 def filenode(self, path):
243 return self._fileinfo(path)[0]
243 return self._fileinfo(path)[0]
244
244
245 def flags(self, path):
245 def flags(self, path):
246 try:
246 try:
247 return self._fileinfo(path)[1]
247 return self._fileinfo(path)[1]
248 except error.LookupError:
248 except error.LookupError:
249 return ''
249 return ''
250
250
251 def sub(self, path):
251 def sub(self, path):
252 return subrepo.subrepo(self, path)
252 return subrepo.subrepo(self, path)
253
253
254 def nullsub(self, path, pctx):
254 def nullsub(self, path, pctx):
255 return subrepo.nullsubrepo(self, path, pctx)
255 return subrepo.nullsubrepo(self, path, pctx)
256
256
257 def match(self, pats=[], include=None, exclude=None, default='glob',
257 def match(self, pats=[], include=None, exclude=None, default='glob',
258 listsubrepos=False):
258 listsubrepos=False):
259 r = self._repo
259 r = self._repo
260 return matchmod.match(r.root, r.getcwd(), pats,
260 return matchmod.match(r.root, r.getcwd(), pats,
261 include, exclude, default,
261 include, exclude, default,
262 auditor=r.auditor, ctx=self,
262 auditor=r.auditor, ctx=self,
263 listsubrepos=listsubrepos)
263 listsubrepos=listsubrepos)
264
264
265 def diff(self, ctx2=None, match=None, **opts):
265 def diff(self, ctx2=None, match=None, **opts):
266 """Returns a diff generator for the given contexts and matcher"""
266 """Returns a diff generator for the given contexts and matcher"""
267 if ctx2 is None:
267 if ctx2 is None:
268 ctx2 = self.p1()
268 ctx2 = self.p1()
269 if ctx2 is not None:
269 if ctx2 is not None:
270 ctx2 = self._repo[ctx2]
270 ctx2 = self._repo[ctx2]
271 diffopts = patch.diffopts(self._repo.ui, opts)
271 diffopts = patch.diffopts(self._repo.ui, opts)
272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
273
273
274 def dirs(self):
274 def dirs(self):
275 return self._manifest.dirs()
275 return self._manifest.dirs()
276
276
277 def hasdir(self, dir):
277 def hasdir(self, dir):
278 return self._manifest.hasdir(dir)
278 return self._manifest.hasdir(dir)
279
279
280 def dirty(self, missing=False, merge=True, branch=True):
280 def dirty(self, missing=False, merge=True, branch=True):
281 return False
281 return False
282
282
283 def status(self, other=None, match=None, listignored=False,
283 def status(self, other=None, match=None, listignored=False,
284 listclean=False, listunknown=False, listsubrepos=False):
284 listclean=False, listunknown=False, listsubrepos=False):
285 """return status of files between two nodes or node and working
285 """return status of files between two nodes or node and working
286 directory.
286 directory.
287
287
288 If other is None, compare this node with working directory.
288 If other is None, compare this node with working directory.
289
289
290 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 returns (modified, added, removed, deleted, unknown, ignored, clean)
291 """
291 """
292
292
293 ctx1 = self
293 ctx1 = self
294 ctx2 = self._repo[other]
294 ctx2 = self._repo[other]
295
295
296 # This next code block is, admittedly, fragile logic that tests for
296 # This next code block is, admittedly, fragile logic that tests for
297 # reversing the contexts and wouldn't need to exist if it weren't for
297 # reversing the contexts and wouldn't need to exist if it weren't for
298 # the fast (and common) code path of comparing the working directory
298 # the fast (and common) code path of comparing the working directory
299 # with its first parent.
299 # with its first parent.
300 #
300 #
301 # What we're aiming for here is the ability to call:
301 # What we're aiming for here is the ability to call:
302 #
302 #
303 # workingctx.status(parentctx)
303 # workingctx.status(parentctx)
304 #
304 #
305 # If we always built the manifest for each context and compared those,
305 # If we always built the manifest for each context and compared those,
306 # then we'd be done. But the special case of the above call means we
306 # then we'd be done. But the special case of the above call means we
307 # just copy the manifest of the parent.
307 # just copy the manifest of the parent.
308 reversed = False
308 reversed = False
309 if (not isinstance(ctx1, changectx)
309 if (not isinstance(ctx1, changectx)
310 and isinstance(ctx2, changectx)):
310 and isinstance(ctx2, changectx)):
311 reversed = True
311 reversed = True
312 ctx1, ctx2 = ctx2, ctx1
312 ctx1, ctx2 = ctx2, ctx1
313
313
314 match = ctx2._matchstatus(ctx1, match)
314 match = ctx2._matchstatus(ctx1, match)
315 r = scmutil.status([], [], [], [], [], [], [])
315 r = scmutil.status([], [], [], [], [], [], [])
316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
317 listunknown)
317 listunknown)
318
318
319 if reversed:
319 if reversed:
320 # Reverse added and removed. Clear deleted, unknown and ignored as
320 # Reverse added and removed. Clear deleted, unknown and ignored as
321 # these make no sense to reverse.
321 # these make no sense to reverse.
322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
323 r.clean)
323 r.clean)
324
324
325 if listsubrepos:
325 if listsubrepos:
326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
327 rev2 = ctx2.subrev(subpath)
327 rev2 = ctx2.subrev(subpath)
328 try:
328 try:
329 submatch = matchmod.narrowmatcher(subpath, match)
329 submatch = matchmod.narrowmatcher(subpath, match)
330 s = sub.status(rev2, match=submatch, ignored=listignored,
330 s = sub.status(rev2, match=submatch, ignored=listignored,
331 clean=listclean, unknown=listunknown,
331 clean=listclean, unknown=listunknown,
332 listsubrepos=True)
332 listsubrepos=True)
333 for rfiles, sfiles in zip(r, s):
333 for rfiles, sfiles in zip(r, s):
334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
335 except error.LookupError:
335 except error.LookupError:
336 self._repo.ui.status(_("skipping missing "
336 self._repo.ui.status(_("skipping missing "
337 "subrepository: %s\n") % subpath)
337 "subrepository: %s\n") % subpath)
338
338
339 for l in r:
339 for l in r:
340 l.sort()
340 l.sort()
341
341
342 return r
342 return r
343
343
344
344
345 def makememctx(repo, parents, text, user, date, branch, files, store,
345 def makememctx(repo, parents, text, user, date, branch, files, store,
346 editor=None, extra=None):
346 editor=None, extra=None):
347 def getfilectx(repo, memctx, path):
347 def getfilectx(repo, memctx, path):
348 data, mode, copied = store.getfile(path)
348 data, mode, copied = store.getfile(path)
349 if data is None:
349 if data is None:
350 return None
350 return None
351 islink, isexec = mode
351 islink, isexec = mode
352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
353 copied=copied, memctx=memctx)
353 copied=copied, memctx=memctx)
354 if extra is None:
354 if extra is None:
355 extra = {}
355 extra = {}
356 if branch:
356 if branch:
357 extra['branch'] = encoding.fromlocal(branch)
357 extra['branch'] = encoding.fromlocal(branch)
358 ctx = memctx(repo, parents, text, files, getfilectx, user,
358 ctx = memctx(repo, parents, text, files, getfilectx, user,
359 date, extra, editor)
359 date, extra, editor)
360 return ctx
360 return ctx
361
361
362 class changectx(basectx):
362 class changectx(basectx):
363 """A changecontext object makes access to data related to a particular
363 """A changecontext object makes access to data related to a particular
364 changeset convenient. It represents a read-only context already present in
364 changeset convenient. It represents a read-only context already present in
365 the repo."""
365 the repo."""
366 def __init__(self, repo, changeid=''):
366 def __init__(self, repo, changeid=''):
367 """changeid is a revision number, node, or tag"""
367 """changeid is a revision number, node, or tag"""
368
368
369 # since basectx.__new__ already took care of copying the object, we
369 # since basectx.__new__ already took care of copying the object, we
370 # don't need to do anything in __init__, so we just exit here
370 # don't need to do anything in __init__, so we just exit here
371 if isinstance(changeid, basectx):
371 if isinstance(changeid, basectx):
372 return
372 return
373
373
374 if changeid == '':
374 if changeid == '':
375 changeid = '.'
375 changeid = '.'
376 self._repo = repo
376 self._repo = repo
377
377
378 try:
378 try:
379 if isinstance(changeid, int):
379 if isinstance(changeid, int):
380 self._node = repo.changelog.node(changeid)
380 self._node = repo.changelog.node(changeid)
381 self._rev = changeid
381 self._rev = changeid
382 return
382 return
383 if isinstance(changeid, long):
383 if isinstance(changeid, long):
384 changeid = str(changeid)
384 changeid = str(changeid)
385 if changeid == 'null':
385 if changeid == 'null':
386 self._node = nullid
386 self._node = nullid
387 self._rev = nullrev
387 self._rev = nullrev
388 return
388 return
389 if changeid == 'tip':
389 if changeid == 'tip':
390 self._node = repo.changelog.tip()
390 self._node = repo.changelog.tip()
391 self._rev = repo.changelog.rev(self._node)
391 self._rev = repo.changelog.rev(self._node)
392 return
392 return
393 if changeid == '.' or changeid == repo.dirstate.p1():
393 if changeid == '.' or changeid == repo.dirstate.p1():
394 # this is a hack to delay/avoid loading obsmarkers
394 # this is a hack to delay/avoid loading obsmarkers
395 # when we know that '.' won't be hidden
395 # when we know that '.' won't be hidden
396 self._node = repo.dirstate.p1()
396 self._node = repo.dirstate.p1()
397 self._rev = repo.unfiltered().changelog.rev(self._node)
397 self._rev = repo.unfiltered().changelog.rev(self._node)
398 return
398 return
399 if len(changeid) == 20:
399 if len(changeid) == 20:
400 try:
400 try:
401 self._node = changeid
401 self._node = changeid
402 self._rev = repo.changelog.rev(changeid)
402 self._rev = repo.changelog.rev(changeid)
403 return
403 return
404 except error.FilteredRepoLookupError:
404 except error.FilteredRepoLookupError:
405 raise
405 raise
406 except LookupError:
406 except LookupError:
407 pass
407 pass
408
408
409 try:
409 try:
410 r = int(changeid)
410 r = int(changeid)
411 if str(r) != changeid:
411 if str(r) != changeid:
412 raise ValueError
412 raise ValueError
413 l = len(repo.changelog)
413 l = len(repo.changelog)
414 if r < 0:
414 if r < 0:
415 r += l
415 r += l
416 if r < 0 or r >= l:
416 if r < 0 or r >= l:
417 raise ValueError
417 raise ValueError
418 self._rev = r
418 self._rev = r
419 self._node = repo.changelog.node(r)
419 self._node = repo.changelog.node(r)
420 return
420 return
421 except error.FilteredIndexError:
421 except error.FilteredIndexError:
422 raise
422 raise
423 except (ValueError, OverflowError, IndexError):
423 except (ValueError, OverflowError, IndexError):
424 pass
424 pass
425
425
426 if len(changeid) == 40:
426 if len(changeid) == 40:
427 try:
427 try:
428 self._node = bin(changeid)
428 self._node = bin(changeid)
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431 except error.FilteredLookupError:
431 except error.FilteredLookupError:
432 raise
432 raise
433 except (TypeError, LookupError):
433 except (TypeError, LookupError):
434 pass
434 pass
435
435
436 # lookup bookmarks through the name interface
436 # lookup bookmarks through the name interface
437 try:
437 try:
438 self._node = repo.names.singlenode(repo, changeid)
438 self._node = repo.names.singlenode(repo, changeid)
439 self._rev = repo.changelog.rev(self._node)
439 self._rev = repo.changelog.rev(self._node)
440 return
440 return
441 except KeyError:
441 except KeyError:
442 pass
442 pass
443 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
444 raise
444 raise
445 except error.RepoLookupError:
445 except error.RepoLookupError:
446 pass
446 pass
447
447
448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
449 if self._node is not None:
449 if self._node is not None:
450 self._rev = repo.changelog.rev(self._node)
450 self._rev = repo.changelog.rev(self._node)
451 return
451 return
452
452
453 # lookup failed
453 # lookup failed
454 # check if it might have come from damaged dirstate
454 # check if it might have come from damaged dirstate
455 #
455 #
456 # XXX we could avoid the unfiltered if we had a recognizable
456 # XXX we could avoid the unfiltered if we had a recognizable
457 # exception for filtered changeset access
457 # exception for filtered changeset access
458 if changeid in repo.unfiltered().dirstate.parents():
458 if changeid in repo.unfiltered().dirstate.parents():
459 msg = _("working directory has unknown parent '%s'!")
459 msg = _("working directory has unknown parent '%s'!")
460 raise error.Abort(msg % short(changeid))
460 raise error.Abort(msg % short(changeid))
461 try:
461 try:
462 if len(changeid) == 20:
462 if len(changeid) == 20:
463 changeid = hex(changeid)
463 changeid = hex(changeid)
464 except TypeError:
464 except TypeError:
465 pass
465 pass
466 except (error.FilteredIndexError, error.FilteredLookupError,
466 except (error.FilteredIndexError, error.FilteredLookupError,
467 error.FilteredRepoLookupError):
467 error.FilteredRepoLookupError):
468 if repo.filtername.startswith('visible'):
468 if repo.filtername.startswith('visible'):
469 msg = _("hidden revision '%s'") % changeid
469 msg = _("hidden revision '%s'") % changeid
470 hint = _('use --hidden to access hidden revisions')
470 hint = _('use --hidden to access hidden revisions')
471 raise error.FilteredRepoLookupError(msg, hint=hint)
471 raise error.FilteredRepoLookupError(msg, hint=hint)
472 msg = _("filtered revision '%s' (not in '%s' subset)")
472 msg = _("filtered revision '%s' (not in '%s' subset)")
473 msg %= (changeid, repo.filtername)
473 msg %= (changeid, repo.filtername)
474 raise error.FilteredRepoLookupError(msg)
474 raise error.FilteredRepoLookupError(msg)
475 except IndexError:
475 except IndexError:
476 pass
476 pass
477 raise error.RepoLookupError(
477 raise error.RepoLookupError(
478 _("unknown revision '%s'") % changeid)
478 _("unknown revision '%s'") % changeid)
479
479
480 def __hash__(self):
480 def __hash__(self):
481 try:
481 try:
482 return hash(self._rev)
482 return hash(self._rev)
483 except AttributeError:
483 except AttributeError:
484 return id(self)
484 return id(self)
485
485
486 def __nonzero__(self):
486 def __nonzero__(self):
487 return self._rev != nullrev
487 return self._rev != nullrev
488
488
489 @propertycache
489 @propertycache
490 def _changeset(self):
490 def _changeset(self):
491 return self._repo.changelog.read(self.rev())
491 return self._repo.changelog.read(self.rev())
492
492
493 @propertycache
493 @propertycache
494 def _manifest(self):
494 def _manifest(self):
495 return self._repo.manifest.read(self._changeset[0])
495 return self._repo.manifest.read(self._changeset[0])
496
496
497 @propertycache
497 @propertycache
498 def _manifestdelta(self):
498 def _manifestdelta(self):
499 return self._repo.manifest.readdelta(self._changeset[0])
499 return self._repo.manifest.readdelta(self._changeset[0])
500
500
501 @propertycache
501 @propertycache
502 def _parents(self):
502 def _parents(self):
503 p = self._repo.changelog.parentrevs(self._rev)
503 p = self._repo.changelog.parentrevs(self._rev)
504 if p[1] == nullrev:
504 if p[1] == nullrev:
505 p = p[:-1]
505 p = p[:-1]
506 return [changectx(self._repo, x) for x in p]
506 return [changectx(self._repo, x) for x in p]
507
507
508 def changeset(self):
508 def changeset(self):
509 return self._changeset
509 return self._changeset
510 def manifestnode(self):
510 def manifestnode(self):
511 return self._changeset[0]
511 return self._changeset[0]
512
512
513 def user(self):
513 def user(self):
514 return self._changeset[1]
514 return self._changeset[1]
515 def date(self):
515 def date(self):
516 return self._changeset[2]
516 return self._changeset[2]
517 def files(self):
517 def files(self):
518 return self._changeset[3]
518 return self._changeset[3]
519 def description(self):
519 def description(self):
520 return self._changeset[4]
520 return self._changeset[4]
521 def branch(self):
521 def branch(self):
522 return encoding.tolocal(self._changeset[5].get("branch"))
522 return encoding.tolocal(self._changeset[5].get("branch"))
523 def closesbranch(self):
523 def closesbranch(self):
524 return 'close' in self._changeset[5]
524 return 'close' in self._changeset[5]
525 def extra(self):
525 def extra(self):
526 return self._changeset[5]
526 return self._changeset[5]
527 def tags(self):
527 def tags(self):
528 return self._repo.nodetags(self._node)
528 return self._repo.nodetags(self._node)
529 def bookmarks(self):
529 def bookmarks(self):
530 return self._repo.nodebookmarks(self._node)
530 return self._repo.nodebookmarks(self._node)
531 def phase(self):
531 def phase(self):
532 return self._repo._phasecache.phase(self._repo, self._rev)
532 return self._repo._phasecache.phase(self._repo, self._rev)
533 def hidden(self):
533 def hidden(self):
534 return self._rev in repoview.filterrevs(self._repo, 'visible')
534 return self._rev in repoview.filterrevs(self._repo, 'visible')
535
535
536 def children(self):
536 def children(self):
537 """return contexts for each child changeset"""
537 """return contexts for each child changeset"""
538 c = self._repo.changelog.children(self._node)
538 c = self._repo.changelog.children(self._node)
539 return [changectx(self._repo, x) for x in c]
539 return [changectx(self._repo, x) for x in c]
540
540
541 def ancestors(self):
541 def ancestors(self):
542 for a in self._repo.changelog.ancestors([self._rev]):
542 for a in self._repo.changelog.ancestors([self._rev]):
543 yield changectx(self._repo, a)
543 yield changectx(self._repo, a)
544
544
545 def descendants(self):
545 def descendants(self):
546 for d in self._repo.changelog.descendants([self._rev]):
546 for d in self._repo.changelog.descendants([self._rev]):
547 yield changectx(self._repo, d)
547 yield changectx(self._repo, d)
548
548
549 def filectx(self, path, fileid=None, filelog=None):
549 def filectx(self, path, fileid=None, filelog=None):
550 """get a file context from this changeset"""
550 """get a file context from this changeset"""
551 if fileid is None:
551 if fileid is None:
552 fileid = self.filenode(path)
552 fileid = self.filenode(path)
553 return filectx(self._repo, path, fileid=fileid,
553 return filectx(self._repo, path, fileid=fileid,
554 changectx=self, filelog=filelog)
554 changectx=self, filelog=filelog)
555
555
556 def ancestor(self, c2, warn=False):
556 def ancestor(self, c2, warn=False):
557 """return the "best" ancestor context of self and c2
557 """return the "best" ancestor context of self and c2
558
558
559 If there are multiple candidates, it will show a message and check
559 If there are multiple candidates, it will show a message and check
560 merge.preferancestor configuration before falling back to the
560 merge.preferancestor configuration before falling back to the
561 revlog ancestor."""
561 revlog ancestor."""
562 # deal with workingctxs
562 # deal with workingctxs
563 n2 = c2._node
563 n2 = c2._node
564 if n2 is None:
564 if n2 is None:
565 n2 = c2._parents[0]._node
565 n2 = c2._parents[0]._node
566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 if not cahs:
567 if not cahs:
568 anc = nullid
568 anc = nullid
569 elif len(cahs) == 1:
569 elif len(cahs) == 1:
570 anc = cahs[0]
570 anc = cahs[0]
571 else:
571 else:
572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
573 try:
573 try:
574 ctx = changectx(self._repo, r)
574 ctx = changectx(self._repo, r)
575 except error.RepoLookupError:
575 except error.RepoLookupError:
576 continue
576 continue
577 anc = ctx.node()
577 anc = ctx.node()
578 if anc in cahs:
578 if anc in cahs:
579 break
579 break
580 else:
580 else:
581 anc = self._repo.changelog.ancestor(self._node, n2)
581 anc = self._repo.changelog.ancestor(self._node, n2)
582 if warn:
582 if warn:
583 self._repo.ui.status(
583 self._repo.ui.status(
584 (_("note: using %s as ancestor of %s and %s\n") %
584 (_("note: using %s as ancestor of %s and %s\n") %
585 (short(anc), short(self._node), short(n2))) +
585 (short(anc), short(self._node), short(n2))) +
586 ''.join(_(" alternatively, use --config "
586 ''.join(_(" alternatively, use --config "
587 "merge.preferancestor=%s\n") %
587 "merge.preferancestor=%s\n") %
588 short(n) for n in sorted(cahs) if n != anc))
588 short(n) for n in sorted(cahs) if n != anc))
589 return changectx(self._repo, anc)
589 return changectx(self._repo, anc)
590
590
591 def descendant(self, other):
591 def descendant(self, other):
592 """True if other is descendant of this changeset"""
592 """True if other is descendant of this changeset"""
593 return self._repo.changelog.descendant(self._rev, other._rev)
593 return self._repo.changelog.descendant(self._rev, other._rev)
594
594
595 def walk(self, match):
595 def walk(self, match):
596 '''Generates matching file names.'''
596 '''Generates matching file names.'''
597
597
598 # Override match.bad method to have message with nodeid
598 # Wrap match.bad method to have message with nodeid
599 match = copy.copy(match)
600 oldbad = match.bad
601 def bad(fn, msg):
599 def bad(fn, msg):
602 # The manifest doesn't know about subrepos, so don't complain about
600 # The manifest doesn't know about subrepos, so don't complain about
603 # paths into valid subrepos.
601 # paths into valid subrepos.
604 if any(fn == s or fn.startswith(s + '/')
602 if any(fn == s or fn.startswith(s + '/')
605 for s in self.substate):
603 for s in self.substate):
606 return
604 return
607 oldbad(fn, _('no such file in rev %s') % self)
605 match.bad(fn, _('no such file in rev %s') % self)
608 match.bad = bad
609
606
610 return self._manifest.walk(match)
607 m = matchmod.badmatch(match, bad)
608 return self._manifest.walk(m)
611
609
612 def matches(self, match):
610 def matches(self, match):
613 return self.walk(match)
611 return self.walk(match)
614
612
615 class basefilectx(object):
613 class basefilectx(object):
616 """A filecontext object represents the common logic for its children:
614 """A filecontext object represents the common logic for its children:
617 filectx: read-only access to a filerevision that is already present
615 filectx: read-only access to a filerevision that is already present
618 in the repo,
616 in the repo,
619 workingfilectx: a filecontext that represents files from the working
617 workingfilectx: a filecontext that represents files from the working
620 directory,
618 directory,
621 memfilectx: a filecontext that represents files in-memory."""
619 memfilectx: a filecontext that represents files in-memory."""
622 def __new__(cls, repo, path, *args, **kwargs):
620 def __new__(cls, repo, path, *args, **kwargs):
623 return super(basefilectx, cls).__new__(cls)
621 return super(basefilectx, cls).__new__(cls)
624
622
625 @propertycache
623 @propertycache
626 def _filelog(self):
624 def _filelog(self):
627 return self._repo.file(self._path)
625 return self._repo.file(self._path)
628
626
629 @propertycache
627 @propertycache
630 def _changeid(self):
628 def _changeid(self):
631 if '_changeid' in self.__dict__:
629 if '_changeid' in self.__dict__:
632 return self._changeid
630 return self._changeid
633 elif '_changectx' in self.__dict__:
631 elif '_changectx' in self.__dict__:
634 return self._changectx.rev()
632 return self._changectx.rev()
635 elif '_descendantrev' in self.__dict__:
633 elif '_descendantrev' in self.__dict__:
636 # this file context was created from a revision with a known
634 # this file context was created from a revision with a known
637 # descendant, we can (lazily) correct for linkrev aliases
635 # descendant, we can (lazily) correct for linkrev aliases
638 return self._adjustlinkrev(self._path, self._filelog,
636 return self._adjustlinkrev(self._path, self._filelog,
639 self._filenode, self._descendantrev)
637 self._filenode, self._descendantrev)
640 else:
638 else:
641 return self._filelog.linkrev(self._filerev)
639 return self._filelog.linkrev(self._filerev)
642
640
643 @propertycache
641 @propertycache
644 def _filenode(self):
642 def _filenode(self):
645 if '_fileid' in self.__dict__:
643 if '_fileid' in self.__dict__:
646 return self._filelog.lookup(self._fileid)
644 return self._filelog.lookup(self._fileid)
647 else:
645 else:
648 return self._changectx.filenode(self._path)
646 return self._changectx.filenode(self._path)
649
647
650 @propertycache
648 @propertycache
651 def _filerev(self):
649 def _filerev(self):
652 return self._filelog.rev(self._filenode)
650 return self._filelog.rev(self._filenode)
653
651
654 @propertycache
652 @propertycache
655 def _repopath(self):
653 def _repopath(self):
656 return self._path
654 return self._path
657
655
658 def __nonzero__(self):
656 def __nonzero__(self):
659 try:
657 try:
660 self._filenode
658 self._filenode
661 return True
659 return True
662 except error.LookupError:
660 except error.LookupError:
663 # file is missing
661 # file is missing
664 return False
662 return False
665
663
666 def __str__(self):
664 def __str__(self):
667 return "%s@%s" % (self.path(), self._changectx)
665 return "%s@%s" % (self.path(), self._changectx)
668
666
669 def __repr__(self):
667 def __repr__(self):
670 return "<%s %s>" % (type(self).__name__, str(self))
668 return "<%s %s>" % (type(self).__name__, str(self))
671
669
672 def __hash__(self):
670 def __hash__(self):
673 try:
671 try:
674 return hash((self._path, self._filenode))
672 return hash((self._path, self._filenode))
675 except AttributeError:
673 except AttributeError:
676 return id(self)
674 return id(self)
677
675
678 def __eq__(self, other):
676 def __eq__(self, other):
679 try:
677 try:
680 return (type(self) == type(other) and self._path == other._path
678 return (type(self) == type(other) and self._path == other._path
681 and self._filenode == other._filenode)
679 and self._filenode == other._filenode)
682 except AttributeError:
680 except AttributeError:
683 return False
681 return False
684
682
685 def __ne__(self, other):
683 def __ne__(self, other):
686 return not (self == other)
684 return not (self == other)
687
685
688 def filerev(self):
686 def filerev(self):
689 return self._filerev
687 return self._filerev
690 def filenode(self):
688 def filenode(self):
691 return self._filenode
689 return self._filenode
692 def flags(self):
690 def flags(self):
693 return self._changectx.flags(self._path)
691 return self._changectx.flags(self._path)
694 def filelog(self):
692 def filelog(self):
695 return self._filelog
693 return self._filelog
696 def rev(self):
694 def rev(self):
697 return self._changeid
695 return self._changeid
698 def linkrev(self):
696 def linkrev(self):
699 return self._filelog.linkrev(self._filerev)
697 return self._filelog.linkrev(self._filerev)
700 def node(self):
698 def node(self):
701 return self._changectx.node()
699 return self._changectx.node()
702 def hex(self):
700 def hex(self):
703 return self._changectx.hex()
701 return self._changectx.hex()
704 def user(self):
702 def user(self):
705 return self._changectx.user()
703 return self._changectx.user()
706 def date(self):
704 def date(self):
707 return self._changectx.date()
705 return self._changectx.date()
708 def files(self):
706 def files(self):
709 return self._changectx.files()
707 return self._changectx.files()
710 def description(self):
708 def description(self):
711 return self._changectx.description()
709 return self._changectx.description()
712 def branch(self):
710 def branch(self):
713 return self._changectx.branch()
711 return self._changectx.branch()
714 def extra(self):
712 def extra(self):
715 return self._changectx.extra()
713 return self._changectx.extra()
716 def phase(self):
714 def phase(self):
717 return self._changectx.phase()
715 return self._changectx.phase()
718 def phasestr(self):
716 def phasestr(self):
719 return self._changectx.phasestr()
717 return self._changectx.phasestr()
720 def manifest(self):
718 def manifest(self):
721 return self._changectx.manifest()
719 return self._changectx.manifest()
722 def changectx(self):
720 def changectx(self):
723 return self._changectx
721 return self._changectx
724 def repo(self):
722 def repo(self):
725 return self._repo
723 return self._repo
726
724
727 def path(self):
725 def path(self):
728 return self._path
726 return self._path
729
727
730 def isbinary(self):
728 def isbinary(self):
731 try:
729 try:
732 return util.binary(self.data())
730 return util.binary(self.data())
733 except IOError:
731 except IOError:
734 return False
732 return False
735 def isexec(self):
733 def isexec(self):
736 return 'x' in self.flags()
734 return 'x' in self.flags()
737 def islink(self):
735 def islink(self):
738 return 'l' in self.flags()
736 return 'l' in self.flags()
739
737
740 def cmp(self, fctx):
738 def cmp(self, fctx):
741 """compare with other file context
739 """compare with other file context
742
740
743 returns True if different than fctx.
741 returns True if different than fctx.
744 """
742 """
745 if (fctx._filerev is None
743 if (fctx._filerev is None
746 and (self._repo._encodefilterpats
744 and (self._repo._encodefilterpats
747 # if file data starts with '\1\n', empty metadata block is
745 # if file data starts with '\1\n', empty metadata block is
748 # prepended, which adds 4 bytes to filelog.size().
746 # prepended, which adds 4 bytes to filelog.size().
749 or self.size() - 4 == fctx.size())
747 or self.size() - 4 == fctx.size())
750 or self.size() == fctx.size()):
748 or self.size() == fctx.size()):
751 return self._filelog.cmp(self._filenode, fctx.data())
749 return self._filelog.cmp(self._filenode, fctx.data())
752
750
753 return True
751 return True
754
752
755 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
753 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
756 """return the first ancestor of <srcrev> introducing <fnode>
754 """return the first ancestor of <srcrev> introducing <fnode>
757
755
758 If the linkrev of the file revision does not point to an ancestor of
756 If the linkrev of the file revision does not point to an ancestor of
759 srcrev, we'll walk down the ancestors until we find one introducing
757 srcrev, we'll walk down the ancestors until we find one introducing
760 this file revision.
758 this file revision.
761
759
762 :repo: a localrepository object (used to access changelog and manifest)
760 :repo: a localrepository object (used to access changelog and manifest)
763 :path: the file path
761 :path: the file path
764 :fnode: the nodeid of the file revision
762 :fnode: the nodeid of the file revision
765 :filelog: the filelog of this path
763 :filelog: the filelog of this path
766 :srcrev: the changeset revision we search ancestors from
764 :srcrev: the changeset revision we search ancestors from
767 :inclusive: if true, the src revision will also be checked
765 :inclusive: if true, the src revision will also be checked
768 """
766 """
769 repo = self._repo
767 repo = self._repo
770 cl = repo.unfiltered().changelog
768 cl = repo.unfiltered().changelog
771 ma = repo.manifest
769 ma = repo.manifest
772 # fetch the linkrev
770 # fetch the linkrev
773 fr = filelog.rev(fnode)
771 fr = filelog.rev(fnode)
774 lkr = filelog.linkrev(fr)
772 lkr = filelog.linkrev(fr)
775 # hack to reuse ancestor computation when searching for renames
773 # hack to reuse ancestor computation when searching for renames
776 memberanc = getattr(self, '_ancestrycontext', None)
774 memberanc = getattr(self, '_ancestrycontext', None)
777 iteranc = None
775 iteranc = None
778 if srcrev is None:
776 if srcrev is None:
779 # wctx case, used by workingfilectx during mergecopy
777 # wctx case, used by workingfilectx during mergecopy
780 revs = [p.rev() for p in self._repo[None].parents()]
778 revs = [p.rev() for p in self._repo[None].parents()]
781 inclusive = True # we skipped the real (revless) source
779 inclusive = True # we skipped the real (revless) source
782 else:
780 else:
783 revs = [srcrev]
781 revs = [srcrev]
784 if memberanc is None:
782 if memberanc is None:
785 memberanc = iteranc = cl.ancestors(revs, lkr,
783 memberanc = iteranc = cl.ancestors(revs, lkr,
786 inclusive=inclusive)
784 inclusive=inclusive)
787 # check if this linkrev is an ancestor of srcrev
785 # check if this linkrev is an ancestor of srcrev
788 if lkr not in memberanc:
786 if lkr not in memberanc:
789 if iteranc is None:
787 if iteranc is None:
790 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
788 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
791 for a in iteranc:
789 for a in iteranc:
792 ac = cl.read(a) # get changeset data (we avoid object creation)
790 ac = cl.read(a) # get changeset data (we avoid object creation)
793 if path in ac[3]: # checking the 'files' field.
791 if path in ac[3]: # checking the 'files' field.
794 # The file has been touched, check if the content is
792 # The file has been touched, check if the content is
795 # similar to the one we search for.
793 # similar to the one we search for.
796 if fnode == ma.readfast(ac[0]).get(path):
794 if fnode == ma.readfast(ac[0]).get(path):
797 return a
795 return a
798 # In theory, we should never get out of that loop without a result.
796 # In theory, we should never get out of that loop without a result.
799 # But if manifest uses a buggy file revision (not children of the
797 # But if manifest uses a buggy file revision (not children of the
800 # one it replaces) we could. Such a buggy situation will likely
798 # one it replaces) we could. Such a buggy situation will likely
801 # result is crash somewhere else at to some point.
799 # result is crash somewhere else at to some point.
802 return lkr
800 return lkr
803
801
804 def introrev(self):
802 def introrev(self):
805 """return the rev of the changeset which introduced this file revision
803 """return the rev of the changeset which introduced this file revision
806
804
807 This method is different from linkrev because it take into account the
805 This method is different from linkrev because it take into account the
808 changeset the filectx was created from. It ensures the returned
806 changeset the filectx was created from. It ensures the returned
809 revision is one of its ancestors. This prevents bugs from
807 revision is one of its ancestors. This prevents bugs from
810 'linkrev-shadowing' when a file revision is used by multiple
808 'linkrev-shadowing' when a file revision is used by multiple
811 changesets.
809 changesets.
812 """
810 """
813 lkr = self.linkrev()
811 lkr = self.linkrev()
814 attrs = vars(self)
812 attrs = vars(self)
815 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
813 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
816 if noctx or self.rev() == lkr:
814 if noctx or self.rev() == lkr:
817 return self.linkrev()
815 return self.linkrev()
818 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
816 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
819 self.rev(), inclusive=True)
817 self.rev(), inclusive=True)
820
818
821 def _parentfilectx(self, path, fileid, filelog):
819 def _parentfilectx(self, path, fileid, filelog):
822 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
820 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
823 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
821 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
824 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 if '_changeid' in vars(self) or '_changectx' in vars(self):
825 # If self is associated with a changeset (probably explicitly
823 # If self is associated with a changeset (probably explicitly
826 # fed), ensure the created filectx is associated with a
824 # fed), ensure the created filectx is associated with a
827 # changeset that is an ancestor of self.changectx.
825 # changeset that is an ancestor of self.changectx.
828 # This lets us later use _adjustlinkrev to get a correct link.
826 # This lets us later use _adjustlinkrev to get a correct link.
829 fctx._descendantrev = self.rev()
827 fctx._descendantrev = self.rev()
830 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
828 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
831 elif '_descendantrev' in vars(self):
829 elif '_descendantrev' in vars(self):
832 # Otherwise propagate _descendantrev if we have one associated.
830 # Otherwise propagate _descendantrev if we have one associated.
833 fctx._descendantrev = self._descendantrev
831 fctx._descendantrev = self._descendantrev
834 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
832 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
835 return fctx
833 return fctx
836
834
837 def parents(self):
835 def parents(self):
838 _path = self._path
836 _path = self._path
839 fl = self._filelog
837 fl = self._filelog
840 parents = self._filelog.parents(self._filenode)
838 parents = self._filelog.parents(self._filenode)
841 pl = [(_path, node, fl) for node in parents if node != nullid]
839 pl = [(_path, node, fl) for node in parents if node != nullid]
842
840
843 r = fl.renamed(self._filenode)
841 r = fl.renamed(self._filenode)
844 if r:
842 if r:
845 # - In the simple rename case, both parent are nullid, pl is empty.
843 # - In the simple rename case, both parent are nullid, pl is empty.
846 # - In case of merge, only one of the parent is null id and should
844 # - In case of merge, only one of the parent is null id and should
847 # be replaced with the rename information. This parent is -always-
845 # be replaced with the rename information. This parent is -always-
848 # the first one.
846 # the first one.
849 #
847 #
850 # As null id have always been filtered out in the previous list
848 # As null id have always been filtered out in the previous list
851 # comprehension, inserting to 0 will always result in "replacing
849 # comprehension, inserting to 0 will always result in "replacing
852 # first nullid parent with rename information.
850 # first nullid parent with rename information.
853 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
851 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
854
852
855 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
853 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
856
854
857 def p1(self):
855 def p1(self):
858 return self.parents()[0]
856 return self.parents()[0]
859
857
860 def p2(self):
858 def p2(self):
861 p = self.parents()
859 p = self.parents()
862 if len(p) == 2:
860 if len(p) == 2:
863 return p[1]
861 return p[1]
864 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
862 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
865
863
866 def annotate(self, follow=False, linenumber=None, diffopts=None):
864 def annotate(self, follow=False, linenumber=None, diffopts=None):
867 '''returns a list of tuples of (ctx, line) for each line
865 '''returns a list of tuples of (ctx, line) for each line
868 in the file, where ctx is the filectx of the node where
866 in the file, where ctx is the filectx of the node where
869 that line was last changed.
867 that line was last changed.
870 This returns tuples of ((ctx, linenumber), line) for each line,
868 This returns tuples of ((ctx, linenumber), line) for each line,
871 if "linenumber" parameter is NOT "None".
869 if "linenumber" parameter is NOT "None".
872 In such tuples, linenumber means one at the first appearance
870 In such tuples, linenumber means one at the first appearance
873 in the managed file.
871 in the managed file.
874 To reduce annotation cost,
872 To reduce annotation cost,
875 this returns fixed value(False is used) as linenumber,
873 this returns fixed value(False is used) as linenumber,
876 if "linenumber" parameter is "False".'''
874 if "linenumber" parameter is "False".'''
877
875
878 if linenumber is None:
876 if linenumber is None:
879 def decorate(text, rev):
877 def decorate(text, rev):
880 return ([rev] * len(text.splitlines()), text)
878 return ([rev] * len(text.splitlines()), text)
881 elif linenumber:
879 elif linenumber:
882 def decorate(text, rev):
880 def decorate(text, rev):
883 size = len(text.splitlines())
881 size = len(text.splitlines())
884 return ([(rev, i) for i in xrange(1, size + 1)], text)
882 return ([(rev, i) for i in xrange(1, size + 1)], text)
885 else:
883 else:
886 def decorate(text, rev):
884 def decorate(text, rev):
887 return ([(rev, False)] * len(text.splitlines()), text)
885 return ([(rev, False)] * len(text.splitlines()), text)
888
886
889 def pair(parent, child):
887 def pair(parent, child):
890 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
888 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
891 refine=True)
889 refine=True)
892 for (a1, a2, b1, b2), t in blocks:
890 for (a1, a2, b1, b2), t in blocks:
893 # Changed blocks ('!') or blocks made only of blank lines ('~')
891 # Changed blocks ('!') or blocks made only of blank lines ('~')
894 # belong to the child.
892 # belong to the child.
895 if t == '=':
893 if t == '=':
896 child[0][b1:b2] = parent[0][a1:a2]
894 child[0][b1:b2] = parent[0][a1:a2]
897 return child
895 return child
898
896
899 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
897 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
900
898
901 def parents(f):
899 def parents(f):
902 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
900 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
903 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
901 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
904 # from the topmost introrev (= srcrev) down to p.linkrev() if it
902 # from the topmost introrev (= srcrev) down to p.linkrev() if it
905 # isn't an ancestor of the srcrev.
903 # isn't an ancestor of the srcrev.
906 f._changeid
904 f._changeid
907 pl = f.parents()
905 pl = f.parents()
908
906
909 # Don't return renamed parents if we aren't following.
907 # Don't return renamed parents if we aren't following.
910 if not follow:
908 if not follow:
911 pl = [p for p in pl if p.path() == f.path()]
909 pl = [p for p in pl if p.path() == f.path()]
912
910
913 # renamed filectx won't have a filelog yet, so set it
911 # renamed filectx won't have a filelog yet, so set it
914 # from the cache to save time
912 # from the cache to save time
915 for p in pl:
913 for p in pl:
916 if not '_filelog' in p.__dict__:
914 if not '_filelog' in p.__dict__:
917 p._filelog = getlog(p.path())
915 p._filelog = getlog(p.path())
918
916
919 return pl
917 return pl
920
918
921 # use linkrev to find the first changeset where self appeared
919 # use linkrev to find the first changeset where self appeared
922 base = self
920 base = self
923 introrev = self.introrev()
921 introrev = self.introrev()
924 if self.rev() != introrev:
922 if self.rev() != introrev:
925 base = self.filectx(self.filenode(), changeid=introrev)
923 base = self.filectx(self.filenode(), changeid=introrev)
926 if getattr(base, '_ancestrycontext', None) is None:
924 if getattr(base, '_ancestrycontext', None) is None:
927 cl = self._repo.changelog
925 cl = self._repo.changelog
928 if introrev is None:
926 if introrev is None:
929 # wctx is not inclusive, but works because _ancestrycontext
927 # wctx is not inclusive, but works because _ancestrycontext
930 # is used to test filelog revisions
928 # is used to test filelog revisions
931 ac = cl.ancestors([p.rev() for p in base.parents()],
929 ac = cl.ancestors([p.rev() for p in base.parents()],
932 inclusive=True)
930 inclusive=True)
933 else:
931 else:
934 ac = cl.ancestors([introrev], inclusive=True)
932 ac = cl.ancestors([introrev], inclusive=True)
935 base._ancestrycontext = ac
933 base._ancestrycontext = ac
936
934
937 # This algorithm would prefer to be recursive, but Python is a
935 # This algorithm would prefer to be recursive, but Python is a
938 # bit recursion-hostile. Instead we do an iterative
936 # bit recursion-hostile. Instead we do an iterative
939 # depth-first search.
937 # depth-first search.
940
938
941 visit = [base]
939 visit = [base]
942 hist = {}
940 hist = {}
943 pcache = {}
941 pcache = {}
944 needed = {base: 1}
942 needed = {base: 1}
945 while visit:
943 while visit:
946 f = visit[-1]
944 f = visit[-1]
947 pcached = f in pcache
945 pcached = f in pcache
948 if not pcached:
946 if not pcached:
949 pcache[f] = parents(f)
947 pcache[f] = parents(f)
950
948
951 ready = True
949 ready = True
952 pl = pcache[f]
950 pl = pcache[f]
953 for p in pl:
951 for p in pl:
954 if p not in hist:
952 if p not in hist:
955 ready = False
953 ready = False
956 visit.append(p)
954 visit.append(p)
957 if not pcached:
955 if not pcached:
958 needed[p] = needed.get(p, 0) + 1
956 needed[p] = needed.get(p, 0) + 1
959 if ready:
957 if ready:
960 visit.pop()
958 visit.pop()
961 reusable = f in hist
959 reusable = f in hist
962 if reusable:
960 if reusable:
963 curr = hist[f]
961 curr = hist[f]
964 else:
962 else:
965 curr = decorate(f.data(), f)
963 curr = decorate(f.data(), f)
966 for p in pl:
964 for p in pl:
967 if not reusable:
965 if not reusable:
968 curr = pair(hist[p], curr)
966 curr = pair(hist[p], curr)
969 if needed[p] == 1:
967 if needed[p] == 1:
970 del hist[p]
968 del hist[p]
971 del needed[p]
969 del needed[p]
972 else:
970 else:
973 needed[p] -= 1
971 needed[p] -= 1
974
972
975 hist[f] = curr
973 hist[f] = curr
976 pcache[f] = []
974 pcache[f] = []
977
975
978 return zip(hist[base][0], hist[base][1].splitlines(True))
976 return zip(hist[base][0], hist[base][1].splitlines(True))
979
977
980 def ancestors(self, followfirst=False):
978 def ancestors(self, followfirst=False):
981 visit = {}
979 visit = {}
982 c = self
980 c = self
983 if followfirst:
981 if followfirst:
984 cut = 1
982 cut = 1
985 else:
983 else:
986 cut = None
984 cut = None
987
985
988 while True:
986 while True:
989 for parent in c.parents()[:cut]:
987 for parent in c.parents()[:cut]:
990 visit[(parent.linkrev(), parent.filenode())] = parent
988 visit[(parent.linkrev(), parent.filenode())] = parent
991 if not visit:
989 if not visit:
992 break
990 break
993 c = visit.pop(max(visit))
991 c = visit.pop(max(visit))
994 yield c
992 yield c
995
993
996 class filectx(basefilectx):
994 class filectx(basefilectx):
997 """A filecontext object makes access to data related to a particular
995 """A filecontext object makes access to data related to a particular
998 filerevision convenient."""
996 filerevision convenient."""
999 def __init__(self, repo, path, changeid=None, fileid=None,
997 def __init__(self, repo, path, changeid=None, fileid=None,
1000 filelog=None, changectx=None):
998 filelog=None, changectx=None):
1001 """changeid can be a changeset revision, node, or tag.
999 """changeid can be a changeset revision, node, or tag.
1002 fileid can be a file revision or node."""
1000 fileid can be a file revision or node."""
1003 self._repo = repo
1001 self._repo = repo
1004 self._path = path
1002 self._path = path
1005
1003
1006 assert (changeid is not None
1004 assert (changeid is not None
1007 or fileid is not None
1005 or fileid is not None
1008 or changectx is not None), \
1006 or changectx is not None), \
1009 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1010 % (changeid, fileid, changectx))
1008 % (changeid, fileid, changectx))
1011
1009
1012 if filelog is not None:
1010 if filelog is not None:
1013 self._filelog = filelog
1011 self._filelog = filelog
1014
1012
1015 if changeid is not None:
1013 if changeid is not None:
1016 self._changeid = changeid
1014 self._changeid = changeid
1017 if changectx is not None:
1015 if changectx is not None:
1018 self._changectx = changectx
1016 self._changectx = changectx
1019 if fileid is not None:
1017 if fileid is not None:
1020 self._fileid = fileid
1018 self._fileid = fileid
1021
1019
1022 @propertycache
1020 @propertycache
1023 def _changectx(self):
1021 def _changectx(self):
1024 try:
1022 try:
1025 return changectx(self._repo, self._changeid)
1023 return changectx(self._repo, self._changeid)
1026 except error.FilteredRepoLookupError:
1024 except error.FilteredRepoLookupError:
1027 # Linkrev may point to any revision in the repository. When the
1025 # Linkrev may point to any revision in the repository. When the
1028 # repository is filtered this may lead to `filectx` trying to build
1026 # repository is filtered this may lead to `filectx` trying to build
1029 # `changectx` for filtered revision. In such case we fallback to
1027 # `changectx` for filtered revision. In such case we fallback to
1030 # creating `changectx` on the unfiltered version of the reposition.
1028 # creating `changectx` on the unfiltered version of the reposition.
1031 # This fallback should not be an issue because `changectx` from
1029 # This fallback should not be an issue because `changectx` from
1032 # `filectx` are not used in complex operations that care about
1030 # `filectx` are not used in complex operations that care about
1033 # filtering.
1031 # filtering.
1034 #
1032 #
1035 # This fallback is a cheap and dirty fix that prevent several
1033 # This fallback is a cheap and dirty fix that prevent several
1036 # crashes. It does not ensure the behavior is correct. However the
1034 # crashes. It does not ensure the behavior is correct. However the
1037 # behavior was not correct before filtering either and "incorrect
1035 # behavior was not correct before filtering either and "incorrect
1038 # behavior" is seen as better as "crash"
1036 # behavior" is seen as better as "crash"
1039 #
1037 #
1040 # Linkrevs have several serious troubles with filtering that are
1038 # Linkrevs have several serious troubles with filtering that are
1041 # complicated to solve. Proper handling of the issue here should be
1039 # complicated to solve. Proper handling of the issue here should be
1042 # considered when solving linkrev issue are on the table.
1040 # considered when solving linkrev issue are on the table.
1043 return changectx(self._repo.unfiltered(), self._changeid)
1041 return changectx(self._repo.unfiltered(), self._changeid)
1044
1042
1045 def filectx(self, fileid, changeid=None):
1043 def filectx(self, fileid, changeid=None):
1046 '''opens an arbitrary revision of the file without
1044 '''opens an arbitrary revision of the file without
1047 opening a new filelog'''
1045 opening a new filelog'''
1048 return filectx(self._repo, self._path, fileid=fileid,
1046 return filectx(self._repo, self._path, fileid=fileid,
1049 filelog=self._filelog, changeid=changeid)
1047 filelog=self._filelog, changeid=changeid)
1050
1048
1051 def data(self):
1049 def data(self):
1052 try:
1050 try:
1053 return self._filelog.read(self._filenode)
1051 return self._filelog.read(self._filenode)
1054 except error.CensoredNodeError:
1052 except error.CensoredNodeError:
1055 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1053 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1056 return ""
1054 return ""
1057 raise util.Abort(_("censored node: %s") % short(self._filenode),
1055 raise util.Abort(_("censored node: %s") % short(self._filenode),
1058 hint=_("set censor.policy to ignore errors"))
1056 hint=_("set censor.policy to ignore errors"))
1059
1057
1060 def size(self):
1058 def size(self):
1061 return self._filelog.size(self._filerev)
1059 return self._filelog.size(self._filerev)
1062
1060
1063 def renamed(self):
1061 def renamed(self):
1064 """check if file was actually renamed in this changeset revision
1062 """check if file was actually renamed in this changeset revision
1065
1063
1066 If rename logged in file revision, we report copy for changeset only
1064 If rename logged in file revision, we report copy for changeset only
1067 if file revisions linkrev points back to the changeset in question
1065 if file revisions linkrev points back to the changeset in question
1068 or both changeset parents contain different file revisions.
1066 or both changeset parents contain different file revisions.
1069 """
1067 """
1070
1068
1071 renamed = self._filelog.renamed(self._filenode)
1069 renamed = self._filelog.renamed(self._filenode)
1072 if not renamed:
1070 if not renamed:
1073 return renamed
1071 return renamed
1074
1072
1075 if self.rev() == self.linkrev():
1073 if self.rev() == self.linkrev():
1076 return renamed
1074 return renamed
1077
1075
1078 name = self.path()
1076 name = self.path()
1079 fnode = self._filenode
1077 fnode = self._filenode
1080 for p in self._changectx.parents():
1078 for p in self._changectx.parents():
1081 try:
1079 try:
1082 if fnode == p.filenode(name):
1080 if fnode == p.filenode(name):
1083 return None
1081 return None
1084 except error.LookupError:
1082 except error.LookupError:
1085 pass
1083 pass
1086 return renamed
1084 return renamed
1087
1085
1088 def children(self):
1086 def children(self):
1089 # hard for renames
1087 # hard for renames
1090 c = self._filelog.children(self._filenode)
1088 c = self._filelog.children(self._filenode)
1091 return [filectx(self._repo, self._path, fileid=x,
1089 return [filectx(self._repo, self._path, fileid=x,
1092 filelog=self._filelog) for x in c]
1090 filelog=self._filelog) for x in c]
1093
1091
1094 class committablectx(basectx):
1092 class committablectx(basectx):
1095 """A committablectx object provides common functionality for a context that
1093 """A committablectx object provides common functionality for a context that
1096 wants the ability to commit, e.g. workingctx or memctx."""
1094 wants the ability to commit, e.g. workingctx or memctx."""
1097 def __init__(self, repo, text="", user=None, date=None, extra=None,
1095 def __init__(self, repo, text="", user=None, date=None, extra=None,
1098 changes=None):
1096 changes=None):
1099 self._repo = repo
1097 self._repo = repo
1100 self._rev = None
1098 self._rev = None
1101 self._node = None
1099 self._node = None
1102 self._text = text
1100 self._text = text
1103 if date:
1101 if date:
1104 self._date = util.parsedate(date)
1102 self._date = util.parsedate(date)
1105 if user:
1103 if user:
1106 self._user = user
1104 self._user = user
1107 if changes:
1105 if changes:
1108 self._status = changes
1106 self._status = changes
1109
1107
1110 self._extra = {}
1108 self._extra = {}
1111 if extra:
1109 if extra:
1112 self._extra = extra.copy()
1110 self._extra = extra.copy()
1113 if 'branch' not in self._extra:
1111 if 'branch' not in self._extra:
1114 try:
1112 try:
1115 branch = encoding.fromlocal(self._repo.dirstate.branch())
1113 branch = encoding.fromlocal(self._repo.dirstate.branch())
1116 except UnicodeDecodeError:
1114 except UnicodeDecodeError:
1117 raise util.Abort(_('branch name not in UTF-8!'))
1115 raise util.Abort(_('branch name not in UTF-8!'))
1118 self._extra['branch'] = branch
1116 self._extra['branch'] = branch
1119 if self._extra['branch'] == '':
1117 if self._extra['branch'] == '':
1120 self._extra['branch'] = 'default'
1118 self._extra['branch'] = 'default'
1121
1119
1122 def __str__(self):
1120 def __str__(self):
1123 return str(self._parents[0]) + "+"
1121 return str(self._parents[0]) + "+"
1124
1122
1125 def __nonzero__(self):
1123 def __nonzero__(self):
1126 return True
1124 return True
1127
1125
1128 def _buildflagfunc(self):
1126 def _buildflagfunc(self):
1129 # Create a fallback function for getting file flags when the
1127 # Create a fallback function for getting file flags when the
1130 # filesystem doesn't support them
1128 # filesystem doesn't support them
1131
1129
1132 copiesget = self._repo.dirstate.copies().get
1130 copiesget = self._repo.dirstate.copies().get
1133
1131
1134 if len(self._parents) < 2:
1132 if len(self._parents) < 2:
1135 # when we have one parent, it's easy: copy from parent
1133 # when we have one parent, it's easy: copy from parent
1136 man = self._parents[0].manifest()
1134 man = self._parents[0].manifest()
1137 def func(f):
1135 def func(f):
1138 f = copiesget(f, f)
1136 f = copiesget(f, f)
1139 return man.flags(f)
1137 return man.flags(f)
1140 else:
1138 else:
1141 # merges are tricky: we try to reconstruct the unstored
1139 # merges are tricky: we try to reconstruct the unstored
1142 # result from the merge (issue1802)
1140 # result from the merge (issue1802)
1143 p1, p2 = self._parents
1141 p1, p2 = self._parents
1144 pa = p1.ancestor(p2)
1142 pa = p1.ancestor(p2)
1145 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1143 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1146
1144
1147 def func(f):
1145 def func(f):
1148 f = copiesget(f, f) # may be wrong for merges with copies
1146 f = copiesget(f, f) # may be wrong for merges with copies
1149 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1147 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1150 if fl1 == fl2:
1148 if fl1 == fl2:
1151 return fl1
1149 return fl1
1152 if fl1 == fla:
1150 if fl1 == fla:
1153 return fl2
1151 return fl2
1154 if fl2 == fla:
1152 if fl2 == fla:
1155 return fl1
1153 return fl1
1156 return '' # punt for conflicts
1154 return '' # punt for conflicts
1157
1155
1158 return func
1156 return func
1159
1157
1160 @propertycache
1158 @propertycache
1161 def _flagfunc(self):
1159 def _flagfunc(self):
1162 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1160 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1163
1161
1164 @propertycache
1162 @propertycache
1165 def _manifest(self):
1163 def _manifest(self):
1166 """generate a manifest corresponding to the values in self._status
1164 """generate a manifest corresponding to the values in self._status
1167
1165
1168 This reuse the file nodeid from parent, but we append an extra letter
1166 This reuse the file nodeid from parent, but we append an extra letter
1169 when modified. Modified files get an extra 'm' while added files get
1167 when modified. Modified files get an extra 'm' while added files get
1170 an extra 'a'. This is used by manifests merge to see that files
1168 an extra 'a'. This is used by manifests merge to see that files
1171 are different and by update logic to avoid deleting newly added files.
1169 are different and by update logic to avoid deleting newly added files.
1172 """
1170 """
1173
1171
1174 man1 = self._parents[0].manifest()
1172 man1 = self._parents[0].manifest()
1175 man = man1.copy()
1173 man = man1.copy()
1176 if len(self._parents) > 1:
1174 if len(self._parents) > 1:
1177 man2 = self.p2().manifest()
1175 man2 = self.p2().manifest()
1178 def getman(f):
1176 def getman(f):
1179 if f in man1:
1177 if f in man1:
1180 return man1
1178 return man1
1181 return man2
1179 return man2
1182 else:
1180 else:
1183 getman = lambda f: man1
1181 getman = lambda f: man1
1184
1182
1185 copied = self._repo.dirstate.copies()
1183 copied = self._repo.dirstate.copies()
1186 ff = self._flagfunc
1184 ff = self._flagfunc
1187 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1185 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1188 for f in l:
1186 for f in l:
1189 orig = copied.get(f, f)
1187 orig = copied.get(f, f)
1190 man[f] = getman(orig).get(orig, nullid) + i
1188 man[f] = getman(orig).get(orig, nullid) + i
1191 try:
1189 try:
1192 man.setflag(f, ff(f))
1190 man.setflag(f, ff(f))
1193 except OSError:
1191 except OSError:
1194 pass
1192 pass
1195
1193
1196 for f in self._status.deleted + self._status.removed:
1194 for f in self._status.deleted + self._status.removed:
1197 if f in man:
1195 if f in man:
1198 del man[f]
1196 del man[f]
1199
1197
1200 return man
1198 return man
1201
1199
1202 @propertycache
1200 @propertycache
1203 def _status(self):
1201 def _status(self):
1204 return self._repo.status()
1202 return self._repo.status()
1205
1203
1206 @propertycache
1204 @propertycache
1207 def _user(self):
1205 def _user(self):
1208 return self._repo.ui.username()
1206 return self._repo.ui.username()
1209
1207
1210 @propertycache
1208 @propertycache
1211 def _date(self):
1209 def _date(self):
1212 return util.makedate()
1210 return util.makedate()
1213
1211
1214 def subrev(self, subpath):
1212 def subrev(self, subpath):
1215 return None
1213 return None
1216
1214
1217 def manifestnode(self):
1215 def manifestnode(self):
1218 return None
1216 return None
1219 def user(self):
1217 def user(self):
1220 return self._user or self._repo.ui.username()
1218 return self._user or self._repo.ui.username()
1221 def date(self):
1219 def date(self):
1222 return self._date
1220 return self._date
1223 def description(self):
1221 def description(self):
1224 return self._text
1222 return self._text
1225 def files(self):
1223 def files(self):
1226 return sorted(self._status.modified + self._status.added +
1224 return sorted(self._status.modified + self._status.added +
1227 self._status.removed)
1225 self._status.removed)
1228
1226
1229 def modified(self):
1227 def modified(self):
1230 return self._status.modified
1228 return self._status.modified
1231 def added(self):
1229 def added(self):
1232 return self._status.added
1230 return self._status.added
1233 def removed(self):
1231 def removed(self):
1234 return self._status.removed
1232 return self._status.removed
1235 def deleted(self):
1233 def deleted(self):
1236 return self._status.deleted
1234 return self._status.deleted
1237 def branch(self):
1235 def branch(self):
1238 return encoding.tolocal(self._extra['branch'])
1236 return encoding.tolocal(self._extra['branch'])
1239 def closesbranch(self):
1237 def closesbranch(self):
1240 return 'close' in self._extra
1238 return 'close' in self._extra
1241 def extra(self):
1239 def extra(self):
1242 return self._extra
1240 return self._extra
1243
1241
1244 def tags(self):
1242 def tags(self):
1245 t = []
1243 t = []
1246 for p in self.parents():
1244 for p in self.parents():
1247 t.extend(p.tags())
1245 t.extend(p.tags())
1248 return t
1246 return t
1249
1247
1250 def bookmarks(self):
1248 def bookmarks(self):
1251 b = []
1249 b = []
1252 for p in self.parents():
1250 for p in self.parents():
1253 b.extend(p.bookmarks())
1251 b.extend(p.bookmarks())
1254 return b
1252 return b
1255
1253
1256 def phase(self):
1254 def phase(self):
1257 phase = phases.draft # default phase to draft
1255 phase = phases.draft # default phase to draft
1258 for p in self.parents():
1256 for p in self.parents():
1259 phase = max(phase, p.phase())
1257 phase = max(phase, p.phase())
1260 return phase
1258 return phase
1261
1259
1262 def hidden(self):
1260 def hidden(self):
1263 return False
1261 return False
1264
1262
1265 def children(self):
1263 def children(self):
1266 return []
1264 return []
1267
1265
1268 def flags(self, path):
1266 def flags(self, path):
1269 if '_manifest' in self.__dict__:
1267 if '_manifest' in self.__dict__:
1270 try:
1268 try:
1271 return self._manifest.flags(path)
1269 return self._manifest.flags(path)
1272 except KeyError:
1270 except KeyError:
1273 return ''
1271 return ''
1274
1272
1275 try:
1273 try:
1276 return self._flagfunc(path)
1274 return self._flagfunc(path)
1277 except OSError:
1275 except OSError:
1278 return ''
1276 return ''
1279
1277
1280 def ancestor(self, c2):
1278 def ancestor(self, c2):
1281 """return the "best" ancestor context of self and c2"""
1279 """return the "best" ancestor context of self and c2"""
1282 return self._parents[0].ancestor(c2) # punt on two parents for now
1280 return self._parents[0].ancestor(c2) # punt on two parents for now
1283
1281
1284 def walk(self, match):
1282 def walk(self, match):
1285 '''Generates matching file names.'''
1283 '''Generates matching file names.'''
1286 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1284 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1287 True, False))
1285 True, False))
1288
1286
1289 def matches(self, match):
1287 def matches(self, match):
1290 return sorted(self._repo.dirstate.matches(match))
1288 return sorted(self._repo.dirstate.matches(match))
1291
1289
1292 def ancestors(self):
1290 def ancestors(self):
1293 for p in self._parents:
1291 for p in self._parents:
1294 yield p
1292 yield p
1295 for a in self._repo.changelog.ancestors(
1293 for a in self._repo.changelog.ancestors(
1296 [p.rev() for p in self._parents]):
1294 [p.rev() for p in self._parents]):
1297 yield changectx(self._repo, a)
1295 yield changectx(self._repo, a)
1298
1296
1299 def markcommitted(self, node):
1297 def markcommitted(self, node):
1300 """Perform post-commit cleanup necessary after committing this ctx
1298 """Perform post-commit cleanup necessary after committing this ctx
1301
1299
1302 Specifically, this updates backing stores this working context
1300 Specifically, this updates backing stores this working context
1303 wraps to reflect the fact that the changes reflected by this
1301 wraps to reflect the fact that the changes reflected by this
1304 workingctx have been committed. For example, it marks
1302 workingctx have been committed. For example, it marks
1305 modified and added files as normal in the dirstate.
1303 modified and added files as normal in the dirstate.
1306
1304
1307 """
1305 """
1308
1306
1309 self._repo.dirstate.beginparentchange()
1307 self._repo.dirstate.beginparentchange()
1310 for f in self.modified() + self.added():
1308 for f in self.modified() + self.added():
1311 self._repo.dirstate.normal(f)
1309 self._repo.dirstate.normal(f)
1312 for f in self.removed():
1310 for f in self.removed():
1313 self._repo.dirstate.drop(f)
1311 self._repo.dirstate.drop(f)
1314 self._repo.dirstate.setparents(node)
1312 self._repo.dirstate.setparents(node)
1315 self._repo.dirstate.endparentchange()
1313 self._repo.dirstate.endparentchange()
1316
1314
1317 class workingctx(committablectx):
1315 class workingctx(committablectx):
1318 """A workingctx object makes access to data related to
1316 """A workingctx object makes access to data related to
1319 the current working directory convenient.
1317 the current working directory convenient.
1320 date - any valid date string or (unixtime, offset), or None.
1318 date - any valid date string or (unixtime, offset), or None.
1321 user - username string, or None.
1319 user - username string, or None.
1322 extra - a dictionary of extra values, or None.
1320 extra - a dictionary of extra values, or None.
1323 changes - a list of file lists as returned by localrepo.status()
1321 changes - a list of file lists as returned by localrepo.status()
1324 or None to use the repository status.
1322 or None to use the repository status.
1325 """
1323 """
1326 def __init__(self, repo, text="", user=None, date=None, extra=None,
1324 def __init__(self, repo, text="", user=None, date=None, extra=None,
1327 changes=None):
1325 changes=None):
1328 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1326 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1329
1327
1330 def __iter__(self):
1328 def __iter__(self):
1331 d = self._repo.dirstate
1329 d = self._repo.dirstate
1332 for f in d:
1330 for f in d:
1333 if d[f] != 'r':
1331 if d[f] != 'r':
1334 yield f
1332 yield f
1335
1333
1336 def __contains__(self, key):
1334 def __contains__(self, key):
1337 return self._repo.dirstate[key] not in "?r"
1335 return self._repo.dirstate[key] not in "?r"
1338
1336
1339 @propertycache
1337 @propertycache
1340 def _parents(self):
1338 def _parents(self):
1341 p = self._repo.dirstate.parents()
1339 p = self._repo.dirstate.parents()
1342 if p[1] == nullid:
1340 if p[1] == nullid:
1343 p = p[:-1]
1341 p = p[:-1]
1344 return [changectx(self._repo, x) for x in p]
1342 return [changectx(self._repo, x) for x in p]
1345
1343
1346 def filectx(self, path, filelog=None):
1344 def filectx(self, path, filelog=None):
1347 """get a file context from the working directory"""
1345 """get a file context from the working directory"""
1348 return workingfilectx(self._repo, path, workingctx=self,
1346 return workingfilectx(self._repo, path, workingctx=self,
1349 filelog=filelog)
1347 filelog=filelog)
1350
1348
1351 def dirty(self, missing=False, merge=True, branch=True):
1349 def dirty(self, missing=False, merge=True, branch=True):
1352 "check whether a working directory is modified"
1350 "check whether a working directory is modified"
1353 # check subrepos first
1351 # check subrepos first
1354 for s in sorted(self.substate):
1352 for s in sorted(self.substate):
1355 if self.sub(s).dirty():
1353 if self.sub(s).dirty():
1356 return True
1354 return True
1357 # check current working dir
1355 # check current working dir
1358 return ((merge and self.p2()) or
1356 return ((merge and self.p2()) or
1359 (branch and self.branch() != self.p1().branch()) or
1357 (branch and self.branch() != self.p1().branch()) or
1360 self.modified() or self.added() or self.removed() or
1358 self.modified() or self.added() or self.removed() or
1361 (missing and self.deleted()))
1359 (missing and self.deleted()))
1362
1360
1363 def add(self, list, prefix=""):
1361 def add(self, list, prefix=""):
1364 join = lambda f: os.path.join(prefix, f)
1362 join = lambda f: os.path.join(prefix, f)
1365 wlock = self._repo.wlock()
1363 wlock = self._repo.wlock()
1366 ui, ds = self._repo.ui, self._repo.dirstate
1364 ui, ds = self._repo.ui, self._repo.dirstate
1367 try:
1365 try:
1368 rejected = []
1366 rejected = []
1369 lstat = self._repo.wvfs.lstat
1367 lstat = self._repo.wvfs.lstat
1370 for f in list:
1368 for f in list:
1371 scmutil.checkportable(ui, join(f))
1369 scmutil.checkportable(ui, join(f))
1372 try:
1370 try:
1373 st = lstat(f)
1371 st = lstat(f)
1374 except OSError:
1372 except OSError:
1375 ui.warn(_("%s does not exist!\n") % join(f))
1373 ui.warn(_("%s does not exist!\n") % join(f))
1376 rejected.append(f)
1374 rejected.append(f)
1377 continue
1375 continue
1378 if st.st_size > 10000000:
1376 if st.st_size > 10000000:
1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1377 ui.warn(_("%s: up to %d MB of RAM may be required "
1380 "to manage this file\n"
1378 "to manage this file\n"
1381 "(use 'hg revert %s' to cancel the "
1379 "(use 'hg revert %s' to cancel the "
1382 "pending addition)\n")
1380 "pending addition)\n")
1383 % (f, 3 * st.st_size // 1000000, join(f)))
1381 % (f, 3 * st.st_size // 1000000, join(f)))
1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1382 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1385 ui.warn(_("%s not added: only files and symlinks "
1383 ui.warn(_("%s not added: only files and symlinks "
1386 "supported currently\n") % join(f))
1384 "supported currently\n") % join(f))
1387 rejected.append(f)
1385 rejected.append(f)
1388 elif ds[f] in 'amn':
1386 elif ds[f] in 'amn':
1389 ui.warn(_("%s already tracked!\n") % join(f))
1387 ui.warn(_("%s already tracked!\n") % join(f))
1390 elif ds[f] == 'r':
1388 elif ds[f] == 'r':
1391 ds.normallookup(f)
1389 ds.normallookup(f)
1392 else:
1390 else:
1393 ds.add(f)
1391 ds.add(f)
1394 return rejected
1392 return rejected
1395 finally:
1393 finally:
1396 wlock.release()
1394 wlock.release()
1397
1395
1398 def forget(self, files, prefix=""):
1396 def forget(self, files, prefix=""):
1399 join = lambda f: os.path.join(prefix, f)
1397 join = lambda f: os.path.join(prefix, f)
1400 wlock = self._repo.wlock()
1398 wlock = self._repo.wlock()
1401 try:
1399 try:
1402 rejected = []
1400 rejected = []
1403 for f in files:
1401 for f in files:
1404 if f not in self._repo.dirstate:
1402 if f not in self._repo.dirstate:
1405 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1403 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1406 rejected.append(f)
1404 rejected.append(f)
1407 elif self._repo.dirstate[f] != 'a':
1405 elif self._repo.dirstate[f] != 'a':
1408 self._repo.dirstate.remove(f)
1406 self._repo.dirstate.remove(f)
1409 else:
1407 else:
1410 self._repo.dirstate.drop(f)
1408 self._repo.dirstate.drop(f)
1411 return rejected
1409 return rejected
1412 finally:
1410 finally:
1413 wlock.release()
1411 wlock.release()
1414
1412
1415 def undelete(self, list):
1413 def undelete(self, list):
1416 pctxs = self.parents()
1414 pctxs = self.parents()
1417 wlock = self._repo.wlock()
1415 wlock = self._repo.wlock()
1418 try:
1416 try:
1419 for f in list:
1417 for f in list:
1420 if self._repo.dirstate[f] != 'r':
1418 if self._repo.dirstate[f] != 'r':
1421 self._repo.ui.warn(_("%s not removed!\n") % f)
1419 self._repo.ui.warn(_("%s not removed!\n") % f)
1422 else:
1420 else:
1423 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1421 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1424 t = fctx.data()
1422 t = fctx.data()
1425 self._repo.wwrite(f, t, fctx.flags())
1423 self._repo.wwrite(f, t, fctx.flags())
1426 self._repo.dirstate.normal(f)
1424 self._repo.dirstate.normal(f)
1427 finally:
1425 finally:
1428 wlock.release()
1426 wlock.release()
1429
1427
1430 def copy(self, source, dest):
1428 def copy(self, source, dest):
1431 try:
1429 try:
1432 st = self._repo.wvfs.lstat(dest)
1430 st = self._repo.wvfs.lstat(dest)
1433 except OSError, err:
1431 except OSError, err:
1434 if err.errno != errno.ENOENT:
1432 if err.errno != errno.ENOENT:
1435 raise
1433 raise
1436 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1434 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1437 return
1435 return
1438 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1439 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1437 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1440 "symbolic link\n") % dest)
1438 "symbolic link\n") % dest)
1441 else:
1439 else:
1442 wlock = self._repo.wlock()
1440 wlock = self._repo.wlock()
1443 try:
1441 try:
1444 if self._repo.dirstate[dest] in '?':
1442 if self._repo.dirstate[dest] in '?':
1445 self._repo.dirstate.add(dest)
1443 self._repo.dirstate.add(dest)
1446 elif self._repo.dirstate[dest] in 'r':
1444 elif self._repo.dirstate[dest] in 'r':
1447 self._repo.dirstate.normallookup(dest)
1445 self._repo.dirstate.normallookup(dest)
1448 self._repo.dirstate.copy(source, dest)
1446 self._repo.dirstate.copy(source, dest)
1449 finally:
1447 finally:
1450 wlock.release()
1448 wlock.release()
1451
1449
1452 def match(self, pats=[], include=None, exclude=None, default='glob',
1450 def match(self, pats=[], include=None, exclude=None, default='glob',
1453 listsubrepos=False):
1451 listsubrepos=False):
1454 r = self._repo
1452 r = self._repo
1455
1453
1456 # Only a case insensitive filesystem needs magic to translate user input
1454 # Only a case insensitive filesystem needs magic to translate user input
1457 # to actual case in the filesystem.
1455 # to actual case in the filesystem.
1458 if not util.checkcase(r.root):
1456 if not util.checkcase(r.root):
1459 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1457 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1460 exclude, default, r.auditor, self,
1458 exclude, default, r.auditor, self,
1461 listsubrepos=listsubrepos)
1459 listsubrepos=listsubrepos)
1462 return matchmod.match(r.root, r.getcwd(), pats,
1460 return matchmod.match(r.root, r.getcwd(), pats,
1463 include, exclude, default,
1461 include, exclude, default,
1464 auditor=r.auditor, ctx=self,
1462 auditor=r.auditor, ctx=self,
1465 listsubrepos=listsubrepos)
1463 listsubrepos=listsubrepos)
1466
1464
1467 def _filtersuspectsymlink(self, files):
1465 def _filtersuspectsymlink(self, files):
1468 if not files or self._repo.dirstate._checklink:
1466 if not files or self._repo.dirstate._checklink:
1469 return files
1467 return files
1470
1468
1471 # Symlink placeholders may get non-symlink-like contents
1469 # Symlink placeholders may get non-symlink-like contents
1472 # via user error or dereferencing by NFS or Samba servers,
1470 # via user error or dereferencing by NFS or Samba servers,
1473 # so we filter out any placeholders that don't look like a
1471 # so we filter out any placeholders that don't look like a
1474 # symlink
1472 # symlink
1475 sane = []
1473 sane = []
1476 for f in files:
1474 for f in files:
1477 if self.flags(f) == 'l':
1475 if self.flags(f) == 'l':
1478 d = self[f].data()
1476 d = self[f].data()
1479 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1477 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1480 self._repo.ui.debug('ignoring suspect symlink placeholder'
1478 self._repo.ui.debug('ignoring suspect symlink placeholder'
1481 ' "%s"\n' % f)
1479 ' "%s"\n' % f)
1482 continue
1480 continue
1483 sane.append(f)
1481 sane.append(f)
1484 return sane
1482 return sane
1485
1483
1486 def _checklookup(self, files):
1484 def _checklookup(self, files):
1487 # check for any possibly clean files
1485 # check for any possibly clean files
1488 if not files:
1486 if not files:
1489 return [], []
1487 return [], []
1490
1488
1491 modified = []
1489 modified = []
1492 fixup = []
1490 fixup = []
1493 pctx = self._parents[0]
1491 pctx = self._parents[0]
1494 # do a full compare of any files that might have changed
1492 # do a full compare of any files that might have changed
1495 for f in sorted(files):
1493 for f in sorted(files):
1496 if (f not in pctx or self.flags(f) != pctx.flags(f)
1494 if (f not in pctx or self.flags(f) != pctx.flags(f)
1497 or pctx[f].cmp(self[f])):
1495 or pctx[f].cmp(self[f])):
1498 modified.append(f)
1496 modified.append(f)
1499 else:
1497 else:
1500 fixup.append(f)
1498 fixup.append(f)
1501
1499
1502 # update dirstate for files that are actually clean
1500 # update dirstate for files that are actually clean
1503 if fixup:
1501 if fixup:
1504 try:
1502 try:
1505 # updating the dirstate is optional
1503 # updating the dirstate is optional
1506 # so we don't wait on the lock
1504 # so we don't wait on the lock
1507 # wlock can invalidate the dirstate, so cache normal _after_
1505 # wlock can invalidate the dirstate, so cache normal _after_
1508 # taking the lock
1506 # taking the lock
1509 wlock = self._repo.wlock(False)
1507 wlock = self._repo.wlock(False)
1510 normal = self._repo.dirstate.normal
1508 normal = self._repo.dirstate.normal
1511 try:
1509 try:
1512 for f in fixup:
1510 for f in fixup:
1513 normal(f)
1511 normal(f)
1514 finally:
1512 finally:
1515 wlock.release()
1513 wlock.release()
1516 except error.LockError:
1514 except error.LockError:
1517 pass
1515 pass
1518 return modified, fixup
1516 return modified, fixup
1519
1517
1520 def _manifestmatches(self, match, s):
1518 def _manifestmatches(self, match, s):
1521 """Slow path for workingctx
1519 """Slow path for workingctx
1522
1520
1523 The fast path is when we compare the working directory to its parent
1521 The fast path is when we compare the working directory to its parent
1524 which means this function is comparing with a non-parent; therefore we
1522 which means this function is comparing with a non-parent; therefore we
1525 need to build a manifest and return what matches.
1523 need to build a manifest and return what matches.
1526 """
1524 """
1527 mf = self._repo['.']._manifestmatches(match, s)
1525 mf = self._repo['.']._manifestmatches(match, s)
1528 for f in s.modified + s.added:
1526 for f in s.modified + s.added:
1529 mf[f] = _newnode
1527 mf[f] = _newnode
1530 mf.setflag(f, self.flags(f))
1528 mf.setflag(f, self.flags(f))
1531 for f in s.removed:
1529 for f in s.removed:
1532 if f in mf:
1530 if f in mf:
1533 del mf[f]
1531 del mf[f]
1534 return mf
1532 return mf
1535
1533
1536 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1534 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1537 unknown=False):
1535 unknown=False):
1538 '''Gets the status from the dirstate -- internal use only.'''
1536 '''Gets the status from the dirstate -- internal use only.'''
1539 listignored, listclean, listunknown = ignored, clean, unknown
1537 listignored, listclean, listunknown = ignored, clean, unknown
1540 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1538 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1541 subrepos = []
1539 subrepos = []
1542 if '.hgsub' in self:
1540 if '.hgsub' in self:
1543 subrepos = sorted(self.substate)
1541 subrepos = sorted(self.substate)
1544 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1542 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1545 listclean, listunknown)
1543 listclean, listunknown)
1546
1544
1547 # check for any possibly clean files
1545 # check for any possibly clean files
1548 if cmp:
1546 if cmp:
1549 modified2, fixup = self._checklookup(cmp)
1547 modified2, fixup = self._checklookup(cmp)
1550 s.modified.extend(modified2)
1548 s.modified.extend(modified2)
1551
1549
1552 # update dirstate for files that are actually clean
1550 # update dirstate for files that are actually clean
1553 if fixup and listclean:
1551 if fixup and listclean:
1554 s.clean.extend(fixup)
1552 s.clean.extend(fixup)
1555
1553
1556 if match.always():
1554 if match.always():
1557 # cache for performance
1555 # cache for performance
1558 if s.unknown or s.ignored or s.clean:
1556 if s.unknown or s.ignored or s.clean:
1559 # "_status" is cached with list*=False in the normal route
1557 # "_status" is cached with list*=False in the normal route
1560 self._status = scmutil.status(s.modified, s.added, s.removed,
1558 self._status = scmutil.status(s.modified, s.added, s.removed,
1561 s.deleted, [], [], [])
1559 s.deleted, [], [], [])
1562 else:
1560 else:
1563 self._status = s
1561 self._status = s
1564
1562
1565 return s
1563 return s
1566
1564
1567 def _buildstatus(self, other, s, match, listignored, listclean,
1565 def _buildstatus(self, other, s, match, listignored, listclean,
1568 listunknown):
1566 listunknown):
1569 """build a status with respect to another context
1567 """build a status with respect to another context
1570
1568
1571 This includes logic for maintaining the fast path of status when
1569 This includes logic for maintaining the fast path of status when
1572 comparing the working directory against its parent, which is to skip
1570 comparing the working directory against its parent, which is to skip
1573 building a new manifest if self (working directory) is not comparing
1571 building a new manifest if self (working directory) is not comparing
1574 against its parent (repo['.']).
1572 against its parent (repo['.']).
1575 """
1573 """
1576 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1574 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1577 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1575 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1578 # might have accidentally ended up with the entire contents of the file
1576 # might have accidentally ended up with the entire contents of the file
1579 # they are supposed to be linking to.
1577 # they are supposed to be linking to.
1580 s.modified[:] = self._filtersuspectsymlink(s.modified)
1578 s.modified[:] = self._filtersuspectsymlink(s.modified)
1581 if other != self._repo['.']:
1579 if other != self._repo['.']:
1582 s = super(workingctx, self)._buildstatus(other, s, match,
1580 s = super(workingctx, self)._buildstatus(other, s, match,
1583 listignored, listclean,
1581 listignored, listclean,
1584 listunknown)
1582 listunknown)
1585 return s
1583 return s
1586
1584
1587 def _matchstatus(self, other, match):
1585 def _matchstatus(self, other, match):
1588 """override the match method with a filter for directory patterns
1586 """override the match method with a filter for directory patterns
1589
1587
1590 We use inheritance to customize the match.bad method only in cases of
1588 We use inheritance to customize the match.bad method only in cases of
1591 workingctx since it belongs only to the working directory when
1589 workingctx since it belongs only to the working directory when
1592 comparing against the parent changeset.
1590 comparing against the parent changeset.
1593
1591
1594 If we aren't comparing against the working directory's parent, then we
1592 If we aren't comparing against the working directory's parent, then we
1595 just use the default match object sent to us.
1593 just use the default match object sent to us.
1596 """
1594 """
1597 superself = super(workingctx, self)
1595 superself = super(workingctx, self)
1598 match = superself._matchstatus(other, match)
1596 match = superself._matchstatus(other, match)
1599 if other != self._repo['.']:
1597 if other != self._repo['.']:
1600 def bad(f, msg):
1598 def bad(f, msg):
1601 # 'f' may be a directory pattern from 'match.files()',
1599 # 'f' may be a directory pattern from 'match.files()',
1602 # so 'f not in ctx1' is not enough
1600 # so 'f not in ctx1' is not enough
1603 if f not in other and not other.hasdir(f):
1601 if f not in other and not other.hasdir(f):
1604 self._repo.ui.warn('%s: %s\n' %
1602 self._repo.ui.warn('%s: %s\n' %
1605 (self._repo.dirstate.pathto(f), msg))
1603 (self._repo.dirstate.pathto(f), msg))
1606 match.bad = bad
1604 match.bad = bad
1607 return match
1605 return match
1608
1606
1609 class committablefilectx(basefilectx):
1607 class committablefilectx(basefilectx):
1610 """A committablefilectx provides common functionality for a file context
1608 """A committablefilectx provides common functionality for a file context
1611 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1609 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1612 def __init__(self, repo, path, filelog=None, ctx=None):
1610 def __init__(self, repo, path, filelog=None, ctx=None):
1613 self._repo = repo
1611 self._repo = repo
1614 self._path = path
1612 self._path = path
1615 self._changeid = None
1613 self._changeid = None
1616 self._filerev = self._filenode = None
1614 self._filerev = self._filenode = None
1617
1615
1618 if filelog is not None:
1616 if filelog is not None:
1619 self._filelog = filelog
1617 self._filelog = filelog
1620 if ctx:
1618 if ctx:
1621 self._changectx = ctx
1619 self._changectx = ctx
1622
1620
1623 def __nonzero__(self):
1621 def __nonzero__(self):
1624 return True
1622 return True
1625
1623
1626 def linkrev(self):
1624 def linkrev(self):
1627 # linked to self._changectx no matter if file is modified or not
1625 # linked to self._changectx no matter if file is modified or not
1628 return self.rev()
1626 return self.rev()
1629
1627
1630 def parents(self):
1628 def parents(self):
1631 '''return parent filectxs, following copies if necessary'''
1629 '''return parent filectxs, following copies if necessary'''
1632 def filenode(ctx, path):
1630 def filenode(ctx, path):
1633 return ctx._manifest.get(path, nullid)
1631 return ctx._manifest.get(path, nullid)
1634
1632
1635 path = self._path
1633 path = self._path
1636 fl = self._filelog
1634 fl = self._filelog
1637 pcl = self._changectx._parents
1635 pcl = self._changectx._parents
1638 renamed = self.renamed()
1636 renamed = self.renamed()
1639
1637
1640 if renamed:
1638 if renamed:
1641 pl = [renamed + (None,)]
1639 pl = [renamed + (None,)]
1642 else:
1640 else:
1643 pl = [(path, filenode(pcl[0], path), fl)]
1641 pl = [(path, filenode(pcl[0], path), fl)]
1644
1642
1645 for pc in pcl[1:]:
1643 for pc in pcl[1:]:
1646 pl.append((path, filenode(pc, path), fl))
1644 pl.append((path, filenode(pc, path), fl))
1647
1645
1648 return [self._parentfilectx(p, fileid=n, filelog=l)
1646 return [self._parentfilectx(p, fileid=n, filelog=l)
1649 for p, n, l in pl if n != nullid]
1647 for p, n, l in pl if n != nullid]
1650
1648
1651 def children(self):
1649 def children(self):
1652 return []
1650 return []
1653
1651
1654 class workingfilectx(committablefilectx):
1652 class workingfilectx(committablefilectx):
1655 """A workingfilectx object makes access to data related to a particular
1653 """A workingfilectx object makes access to data related to a particular
1656 file in the working directory convenient."""
1654 file in the working directory convenient."""
1657 def __init__(self, repo, path, filelog=None, workingctx=None):
1655 def __init__(self, repo, path, filelog=None, workingctx=None):
1658 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1656 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1659
1657
1660 @propertycache
1658 @propertycache
1661 def _changectx(self):
1659 def _changectx(self):
1662 return workingctx(self._repo)
1660 return workingctx(self._repo)
1663
1661
1664 def data(self):
1662 def data(self):
1665 return self._repo.wread(self._path)
1663 return self._repo.wread(self._path)
1666 def renamed(self):
1664 def renamed(self):
1667 rp = self._repo.dirstate.copied(self._path)
1665 rp = self._repo.dirstate.copied(self._path)
1668 if not rp:
1666 if not rp:
1669 return None
1667 return None
1670 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1671
1669
1672 def size(self):
1670 def size(self):
1673 return self._repo.wvfs.lstat(self._path).st_size
1671 return self._repo.wvfs.lstat(self._path).st_size
1674 def date(self):
1672 def date(self):
1675 t, tz = self._changectx.date()
1673 t, tz = self._changectx.date()
1676 try:
1674 try:
1677 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1675 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1678 except OSError, err:
1676 except OSError, err:
1679 if err.errno != errno.ENOENT:
1677 if err.errno != errno.ENOENT:
1680 raise
1678 raise
1681 return (t, tz)
1679 return (t, tz)
1682
1680
1683 def cmp(self, fctx):
1681 def cmp(self, fctx):
1684 """compare with other file context
1682 """compare with other file context
1685
1683
1686 returns True if different than fctx.
1684 returns True if different than fctx.
1687 """
1685 """
1688 # fctx should be a filectx (not a workingfilectx)
1686 # fctx should be a filectx (not a workingfilectx)
1689 # invert comparison to reuse the same code path
1687 # invert comparison to reuse the same code path
1690 return fctx.cmp(self)
1688 return fctx.cmp(self)
1691
1689
1692 def remove(self, ignoremissing=False):
1690 def remove(self, ignoremissing=False):
1693 """wraps unlink for a repo's working directory"""
1691 """wraps unlink for a repo's working directory"""
1694 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1692 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1695
1693
1696 def write(self, data, flags):
1694 def write(self, data, flags):
1697 """wraps repo.wwrite"""
1695 """wraps repo.wwrite"""
1698 self._repo.wwrite(self._path, data, flags)
1696 self._repo.wwrite(self._path, data, flags)
1699
1697
1700 class workingcommitctx(workingctx):
1698 class workingcommitctx(workingctx):
1701 """A workingcommitctx object makes access to data related to
1699 """A workingcommitctx object makes access to data related to
1702 the revision being committed convenient.
1700 the revision being committed convenient.
1703
1701
1704 This hides changes in the working directory, if they aren't
1702 This hides changes in the working directory, if they aren't
1705 committed in this context.
1703 committed in this context.
1706 """
1704 """
1707 def __init__(self, repo, changes,
1705 def __init__(self, repo, changes,
1708 text="", user=None, date=None, extra=None):
1706 text="", user=None, date=None, extra=None):
1709 super(workingctx, self).__init__(repo, text, user, date, extra,
1707 super(workingctx, self).__init__(repo, text, user, date, extra,
1710 changes)
1708 changes)
1711
1709
1712 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1710 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1713 unknown=False):
1711 unknown=False):
1714 """Return matched files only in ``self._status``
1712 """Return matched files only in ``self._status``
1715
1713
1716 Uncommitted files appear "clean" via this context, even if
1714 Uncommitted files appear "clean" via this context, even if
1717 they aren't actually so in the working directory.
1715 they aren't actually so in the working directory.
1718 """
1716 """
1719 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1717 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1720 if clean:
1718 if clean:
1721 clean = [f for f in self._manifest if f not in self._changedset]
1719 clean = [f for f in self._manifest if f not in self._changedset]
1722 else:
1720 else:
1723 clean = []
1721 clean = []
1724 return scmutil.status([f for f in self._status.modified if match(f)],
1722 return scmutil.status([f for f in self._status.modified if match(f)],
1725 [f for f in self._status.added if match(f)],
1723 [f for f in self._status.added if match(f)],
1726 [f for f in self._status.removed if match(f)],
1724 [f for f in self._status.removed if match(f)],
1727 [], [], [], clean)
1725 [], [], [], clean)
1728
1726
1729 @propertycache
1727 @propertycache
1730 def _changedset(self):
1728 def _changedset(self):
1731 """Return the set of files changed in this context
1729 """Return the set of files changed in this context
1732 """
1730 """
1733 changed = set(self._status.modified)
1731 changed = set(self._status.modified)
1734 changed.update(self._status.added)
1732 changed.update(self._status.added)
1735 changed.update(self._status.removed)
1733 changed.update(self._status.removed)
1736 return changed
1734 return changed
1737
1735
1738 class memctx(committablectx):
1736 class memctx(committablectx):
1739 """Use memctx to perform in-memory commits via localrepo.commitctx().
1737 """Use memctx to perform in-memory commits via localrepo.commitctx().
1740
1738
1741 Revision information is supplied at initialization time while
1739 Revision information is supplied at initialization time while
1742 related files data and is made available through a callback
1740 related files data and is made available through a callback
1743 mechanism. 'repo' is the current localrepo, 'parents' is a
1741 mechanism. 'repo' is the current localrepo, 'parents' is a
1744 sequence of two parent revisions identifiers (pass None for every
1742 sequence of two parent revisions identifiers (pass None for every
1745 missing parent), 'text' is the commit message and 'files' lists
1743 missing parent), 'text' is the commit message and 'files' lists
1746 names of files touched by the revision (normalized and relative to
1744 names of files touched by the revision (normalized and relative to
1747 repository root).
1745 repository root).
1748
1746
1749 filectxfn(repo, memctx, path) is a callable receiving the
1747 filectxfn(repo, memctx, path) is a callable receiving the
1750 repository, the current memctx object and the normalized path of
1748 repository, the current memctx object and the normalized path of
1751 requested file, relative to repository root. It is fired by the
1749 requested file, relative to repository root. It is fired by the
1752 commit function for every file in 'files', but calls order is
1750 commit function for every file in 'files', but calls order is
1753 undefined. If the file is available in the revision being
1751 undefined. If the file is available in the revision being
1754 committed (updated or added), filectxfn returns a memfilectx
1752 committed (updated or added), filectxfn returns a memfilectx
1755 object. If the file was removed, filectxfn raises an
1753 object. If the file was removed, filectxfn raises an
1756 IOError. Moved files are represented by marking the source file
1754 IOError. Moved files are represented by marking the source file
1757 removed and the new file added with copy information (see
1755 removed and the new file added with copy information (see
1758 memfilectx).
1756 memfilectx).
1759
1757
1760 user receives the committer name and defaults to current
1758 user receives the committer name and defaults to current
1761 repository username, date is the commit date in any format
1759 repository username, date is the commit date in any format
1762 supported by util.parsedate() and defaults to current date, extra
1760 supported by util.parsedate() and defaults to current date, extra
1763 is a dictionary of metadata or is left empty.
1761 is a dictionary of metadata or is left empty.
1764 """
1762 """
1765
1763
1766 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1764 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1767 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1765 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1768 # this field to determine what to do in filectxfn.
1766 # this field to determine what to do in filectxfn.
1769 _returnnoneformissingfiles = True
1767 _returnnoneformissingfiles = True
1770
1768
1771 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1769 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1772 date=None, extra=None, editor=False):
1770 date=None, extra=None, editor=False):
1773 super(memctx, self).__init__(repo, text, user, date, extra)
1771 super(memctx, self).__init__(repo, text, user, date, extra)
1774 self._rev = None
1772 self._rev = None
1775 self._node = None
1773 self._node = None
1776 parents = [(p or nullid) for p in parents]
1774 parents = [(p or nullid) for p in parents]
1777 p1, p2 = parents
1775 p1, p2 = parents
1778 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1776 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1779 files = sorted(set(files))
1777 files = sorted(set(files))
1780 self._files = files
1778 self._files = files
1781 self.substate = {}
1779 self.substate = {}
1782
1780
1783 # if store is not callable, wrap it in a function
1781 # if store is not callable, wrap it in a function
1784 if not callable(filectxfn):
1782 if not callable(filectxfn):
1785 def getfilectx(repo, memctx, path):
1783 def getfilectx(repo, memctx, path):
1786 fctx = filectxfn[path]
1784 fctx = filectxfn[path]
1787 # this is weird but apparently we only keep track of one parent
1785 # this is weird but apparently we only keep track of one parent
1788 # (why not only store that instead of a tuple?)
1786 # (why not only store that instead of a tuple?)
1789 copied = fctx.renamed()
1787 copied = fctx.renamed()
1790 if copied:
1788 if copied:
1791 copied = copied[0]
1789 copied = copied[0]
1792 return memfilectx(repo, path, fctx.data(),
1790 return memfilectx(repo, path, fctx.data(),
1793 islink=fctx.islink(), isexec=fctx.isexec(),
1791 islink=fctx.islink(), isexec=fctx.isexec(),
1794 copied=copied, memctx=memctx)
1792 copied=copied, memctx=memctx)
1795 self._filectxfn = getfilectx
1793 self._filectxfn = getfilectx
1796 else:
1794 else:
1797 # "util.cachefunc" reduces invocation of possibly expensive
1795 # "util.cachefunc" reduces invocation of possibly expensive
1798 # "filectxfn" for performance (e.g. converting from another VCS)
1796 # "filectxfn" for performance (e.g. converting from another VCS)
1799 self._filectxfn = util.cachefunc(filectxfn)
1797 self._filectxfn = util.cachefunc(filectxfn)
1800
1798
1801 if extra:
1799 if extra:
1802 self._extra = extra.copy()
1800 self._extra = extra.copy()
1803 else:
1801 else:
1804 self._extra = {}
1802 self._extra = {}
1805
1803
1806 if self._extra.get('branch', '') == '':
1804 if self._extra.get('branch', '') == '':
1807 self._extra['branch'] = 'default'
1805 self._extra['branch'] = 'default'
1808
1806
1809 if editor:
1807 if editor:
1810 self._text = editor(self._repo, self, [])
1808 self._text = editor(self._repo, self, [])
1811 self._repo.savecommitmessage(self._text)
1809 self._repo.savecommitmessage(self._text)
1812
1810
1813 def filectx(self, path, filelog=None):
1811 def filectx(self, path, filelog=None):
1814 """get a file context from the working directory
1812 """get a file context from the working directory
1815
1813
1816 Returns None if file doesn't exist and should be removed."""
1814 Returns None if file doesn't exist and should be removed."""
1817 return self._filectxfn(self._repo, self, path)
1815 return self._filectxfn(self._repo, self, path)
1818
1816
1819 def commit(self):
1817 def commit(self):
1820 """commit context to the repo"""
1818 """commit context to the repo"""
1821 return self._repo.commitctx(self)
1819 return self._repo.commitctx(self)
1822
1820
1823 @propertycache
1821 @propertycache
1824 def _manifest(self):
1822 def _manifest(self):
1825 """generate a manifest based on the return values of filectxfn"""
1823 """generate a manifest based on the return values of filectxfn"""
1826
1824
1827 # keep this simple for now; just worry about p1
1825 # keep this simple for now; just worry about p1
1828 pctx = self._parents[0]
1826 pctx = self._parents[0]
1829 man = pctx.manifest().copy()
1827 man = pctx.manifest().copy()
1830
1828
1831 for f in self._status.modified:
1829 for f in self._status.modified:
1832 p1node = nullid
1830 p1node = nullid
1833 p2node = nullid
1831 p2node = nullid
1834 p = pctx[f].parents() # if file isn't in pctx, check p2?
1832 p = pctx[f].parents() # if file isn't in pctx, check p2?
1835 if len(p) > 0:
1833 if len(p) > 0:
1836 p1node = p[0].node()
1834 p1node = p[0].node()
1837 if len(p) > 1:
1835 if len(p) > 1:
1838 p2node = p[1].node()
1836 p2node = p[1].node()
1839 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1837 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1840
1838
1841 for f in self._status.added:
1839 for f in self._status.added:
1842 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1840 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1843
1841
1844 for f in self._status.removed:
1842 for f in self._status.removed:
1845 if f in man:
1843 if f in man:
1846 del man[f]
1844 del man[f]
1847
1845
1848 return man
1846 return man
1849
1847
1850 @propertycache
1848 @propertycache
1851 def _status(self):
1849 def _status(self):
1852 """Calculate exact status from ``files`` specified at construction
1850 """Calculate exact status from ``files`` specified at construction
1853 """
1851 """
1854 man1 = self.p1().manifest()
1852 man1 = self.p1().manifest()
1855 p2 = self._parents[1]
1853 p2 = self._parents[1]
1856 # "1 < len(self._parents)" can't be used for checking
1854 # "1 < len(self._parents)" can't be used for checking
1857 # existence of the 2nd parent, because "memctx._parents" is
1855 # existence of the 2nd parent, because "memctx._parents" is
1858 # explicitly initialized by the list, of which length is 2.
1856 # explicitly initialized by the list, of which length is 2.
1859 if p2.node() != nullid:
1857 if p2.node() != nullid:
1860 man2 = p2.manifest()
1858 man2 = p2.manifest()
1861 managing = lambda f: f in man1 or f in man2
1859 managing = lambda f: f in man1 or f in man2
1862 else:
1860 else:
1863 managing = lambda f: f in man1
1861 managing = lambda f: f in man1
1864
1862
1865 modified, added, removed = [], [], []
1863 modified, added, removed = [], [], []
1866 for f in self._files:
1864 for f in self._files:
1867 if not managing(f):
1865 if not managing(f):
1868 added.append(f)
1866 added.append(f)
1869 elif self[f]:
1867 elif self[f]:
1870 modified.append(f)
1868 modified.append(f)
1871 else:
1869 else:
1872 removed.append(f)
1870 removed.append(f)
1873
1871
1874 return scmutil.status(modified, added, removed, [], [], [], [])
1872 return scmutil.status(modified, added, removed, [], [], [], [])
1875
1873
1876 class memfilectx(committablefilectx):
1874 class memfilectx(committablefilectx):
1877 """memfilectx represents an in-memory file to commit.
1875 """memfilectx represents an in-memory file to commit.
1878
1876
1879 See memctx and committablefilectx for more details.
1877 See memctx and committablefilectx for more details.
1880 """
1878 """
1881 def __init__(self, repo, path, data, islink=False,
1879 def __init__(self, repo, path, data, islink=False,
1882 isexec=False, copied=None, memctx=None):
1880 isexec=False, copied=None, memctx=None):
1883 """
1881 """
1884 path is the normalized file path relative to repository root.
1882 path is the normalized file path relative to repository root.
1885 data is the file content as a string.
1883 data is the file content as a string.
1886 islink is True if the file is a symbolic link.
1884 islink is True if the file is a symbolic link.
1887 isexec is True if the file is executable.
1885 isexec is True if the file is executable.
1888 copied is the source file path if current file was copied in the
1886 copied is the source file path if current file was copied in the
1889 revision being committed, or None."""
1887 revision being committed, or None."""
1890 super(memfilectx, self).__init__(repo, path, None, memctx)
1888 super(memfilectx, self).__init__(repo, path, None, memctx)
1891 self._data = data
1889 self._data = data
1892 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1890 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1893 self._copied = None
1891 self._copied = None
1894 if copied:
1892 if copied:
1895 self._copied = (copied, nullid)
1893 self._copied = (copied, nullid)
1896
1894
1897 def data(self):
1895 def data(self):
1898 return self._data
1896 return self._data
1899 def size(self):
1897 def size(self):
1900 return len(self.data())
1898 return len(self.data())
1901 def flags(self):
1899 def flags(self):
1902 return self._flags
1900 return self._flags
1903 def renamed(self):
1901 def renamed(self):
1904 return self._copied
1902 return self._copied
1905
1903
1906 def remove(self, ignoremissing=False):
1904 def remove(self, ignoremissing=False):
1907 """wraps unlink for a repo's working directory"""
1905 """wraps unlink for a repo's working directory"""
1908 # need to figure out what to do here
1906 # need to figure out what to do here
1909 del self._changectx[self._path]
1907 del self._changectx[self._path]
1910
1908
1911 def write(self, data, flags):
1909 def write(self, data, flags):
1912 """wraps repo.wwrite"""
1910 """wraps repo.wwrite"""
1913 self._data = data
1911 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now