##// END OF EJS Templates
context.status: pass status tuple into _buildstatus...
Martin von Zweigbergk -
r23304:dd3f8575 default
parent child Browse files
Show More
@@ -1,1689 +1,1688
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class basectx(object):
20 class basectx(object):
21 """A basectx object represents the common logic for its children:
21 """A basectx object represents the common logic for its children:
22 changectx: read-only context that is already present in the repo,
22 changectx: read-only context that is already present in the repo,
23 workingctx: a context that represents the working directory and can
23 workingctx: a context that represents the working directory and can
24 be committed,
24 be committed,
25 memctx: a context that represents changes in-memory and can also
25 memctx: a context that represents changes in-memory and can also
26 be committed."""
26 be committed."""
27 def __new__(cls, repo, changeid='', *args, **kwargs):
27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 if isinstance(changeid, basectx):
28 if isinstance(changeid, basectx):
29 return changeid
29 return changeid
30
30
31 o = super(basectx, cls).__new__(cls)
31 o = super(basectx, cls).__new__(cls)
32
32
33 o._repo = repo
33 o._repo = repo
34 o._rev = nullrev
34 o._rev = nullrev
35 o._node = nullid
35 o._node = nullid
36
36
37 return o
37 return o
38
38
39 def __str__(self):
39 def __str__(self):
40 return short(self.node())
40 return short(self.node())
41
41
42 def __int__(self):
42 def __int__(self):
43 return self.rev()
43 return self.rev()
44
44
45 def __repr__(self):
45 def __repr__(self):
46 return "<%s %s>" % (type(self).__name__, str(self))
46 return "<%s %s>" % (type(self).__name__, str(self))
47
47
48 def __eq__(self, other):
48 def __eq__(self, other):
49 try:
49 try:
50 return type(self) == type(other) and self._rev == other._rev
50 return type(self) == type(other) and self._rev == other._rev
51 except AttributeError:
51 except AttributeError:
52 return False
52 return False
53
53
54 def __ne__(self, other):
54 def __ne__(self, other):
55 return not (self == other)
55 return not (self == other)
56
56
57 def __contains__(self, key):
57 def __contains__(self, key):
58 return key in self._manifest
58 return key in self._manifest
59
59
60 def __getitem__(self, key):
60 def __getitem__(self, key):
61 return self.filectx(key)
61 return self.filectx(key)
62
62
63 def __iter__(self):
63 def __iter__(self):
64 for f in sorted(self._manifest):
64 for f in sorted(self._manifest):
65 yield f
65 yield f
66
66
67 def _manifestmatches(self, match, s):
67 def _manifestmatches(self, match, s):
68 """generate a new manifest filtered by the match argument
68 """generate a new manifest filtered by the match argument
69
69
70 This method is for internal use only and mainly exists to provide an
70 This method is for internal use only and mainly exists to provide an
71 object oriented way for other contexts to customize the manifest
71 object oriented way for other contexts to customize the manifest
72 generation.
72 generation.
73 """
73 """
74 if match.always():
74 if match.always():
75 return self.manifest().copy()
75 return self.manifest().copy()
76
76
77 files = match.files()
77 files = match.files()
78 if (match.matchfn == match.exact or
78 if (match.matchfn == match.exact or
79 (not match.anypats() and util.all(fn in self for fn in files))):
79 (not match.anypats() and util.all(fn in self for fn in files))):
80 return self.manifest().intersectfiles(files)
80 return self.manifest().intersectfiles(files)
81
81
82 mf = self.manifest().copy()
82 mf = self.manifest().copy()
83 for fn in mf.keys():
83 for fn in mf.keys():
84 if not match(fn):
84 if not match(fn):
85 del mf[fn]
85 del mf[fn]
86 return mf
86 return mf
87
87
88 def _matchstatus(self, other, match):
88 def _matchstatus(self, other, match):
89 """return match.always if match is none
89 """return match.always if match is none
90
90
91 This internal method provides a way for child objects to override the
91 This internal method provides a way for child objects to override the
92 match operator.
92 match operator.
93 """
93 """
94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95
95
96 def _buildstatus(self, other, s, match, listignored, listclean,
96 def _buildstatus(self, other, s, match, listignored, listclean,
97 listunknown):
97 listunknown):
98 """build a status with respect to another context"""
98 """build a status with respect to another context"""
99 # Load earliest manifest first for caching reasons. More specifically,
99 # Load earliest manifest first for caching reasons. More specifically,
100 # if you have revisions 1000 and 1001, 1001 is probably stored as a
100 # if you have revisions 1000 and 1001, 1001 is probably stored as a
101 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
101 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
102 # 1000 and cache it so that when you read 1001, we just need to apply a
102 # 1000 and cache it so that when you read 1001, we just need to apply a
103 # delta to what's in the cache. So that's one full reconstruction + one
103 # delta to what's in the cache. So that's one full reconstruction + one
104 # delta application.
104 # delta application.
105 if self.rev() is not None and self.rev() < other.rev():
105 if self.rev() is not None and self.rev() < other.rev():
106 self.manifest()
106 self.manifest()
107 mf1 = other._manifestmatches(match, s)
107 mf1 = other._manifestmatches(match, s)
108 mf2 = self._manifestmatches(match, s)
108 mf2 = self._manifestmatches(match, s)
109
109
110 modified, added, clean = [], [], []
110 modified, added, clean = [], [], []
111 deleted, unknown, ignored = s[3], s[4], s[5]
111 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
112 deletedset = set(deleted)
112 deletedset = set(deleted)
113 withflags = mf1.withflags() | mf2.withflags()
113 withflags = mf1.withflags() | mf2.withflags()
114 for fn, mf2node in mf2.iteritems():
114 for fn, mf2node in mf2.iteritems():
115 if fn in mf1:
115 if fn in mf1:
116 if (fn not in deletedset and
116 if (fn not in deletedset and
117 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
117 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
118 (mf1[fn] != mf2node and
118 (mf1[fn] != mf2node and
119 (mf2node or self[fn].cmp(other[fn]))))):
119 (mf2node or self[fn].cmp(other[fn]))))):
120 modified.append(fn)
120 modified.append(fn)
121 elif listclean:
121 elif listclean:
122 clean.append(fn)
122 clean.append(fn)
123 del mf1[fn]
123 del mf1[fn]
124 elif fn not in deletedset:
124 elif fn not in deletedset:
125 added.append(fn)
125 added.append(fn)
126 removed = mf1.keys()
126 removed = mf1.keys()
127 if removed:
127 if removed:
128 # need to filter files if they are already reported as removed
128 # need to filter files if they are already reported as removed
129 unknown = [fn for fn in unknown if fn not in mf1]
129 unknown = [fn for fn in unknown if fn not in mf1]
130 ignored = [fn for fn in ignored if fn not in mf1]
130 ignored = [fn for fn in ignored if fn not in mf1]
131
131
132 return scmutil.status(modified, added, removed, deleted, unknown,
132 return scmutil.status(modified, added, removed, deleted, unknown,
133 ignored, clean)
133 ignored, clean)
134
134
135 @propertycache
135 @propertycache
136 def substate(self):
136 def substate(self):
137 return subrepo.state(self, self._repo.ui)
137 return subrepo.state(self, self._repo.ui)
138
138
139 def subrev(self, subpath):
139 def subrev(self, subpath):
140 return self.substate[subpath][1]
140 return self.substate[subpath][1]
141
141
142 def rev(self):
142 def rev(self):
143 return self._rev
143 return self._rev
144 def node(self):
144 def node(self):
145 return self._node
145 return self._node
146 def hex(self):
146 def hex(self):
147 return hex(self.node())
147 return hex(self.node())
148 def manifest(self):
148 def manifest(self):
149 return self._manifest
149 return self._manifest
150 def phasestr(self):
150 def phasestr(self):
151 return phases.phasenames[self.phase()]
151 return phases.phasenames[self.phase()]
152 def mutable(self):
152 def mutable(self):
153 return self.phase() > phases.public
153 return self.phase() > phases.public
154
154
155 def getfileset(self, expr):
155 def getfileset(self, expr):
156 return fileset.getfileset(self, expr)
156 return fileset.getfileset(self, expr)
157
157
158 def obsolete(self):
158 def obsolete(self):
159 """True if the changeset is obsolete"""
159 """True if the changeset is obsolete"""
160 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
160 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
161
161
162 def extinct(self):
162 def extinct(self):
163 """True if the changeset is extinct"""
163 """True if the changeset is extinct"""
164 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
164 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
165
165
166 def unstable(self):
166 def unstable(self):
167 """True if the changeset is not obsolete but it's ancestor are"""
167 """True if the changeset is not obsolete but it's ancestor are"""
168 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
168 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
169
169
170 def bumped(self):
170 def bumped(self):
171 """True if the changeset try to be a successor of a public changeset
171 """True if the changeset try to be a successor of a public changeset
172
172
173 Only non-public and non-obsolete changesets may be bumped.
173 Only non-public and non-obsolete changesets may be bumped.
174 """
174 """
175 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
175 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
176
176
177 def divergent(self):
177 def divergent(self):
178 """Is a successors of a changeset with multiple possible successors set
178 """Is a successors of a changeset with multiple possible successors set
179
179
180 Only non-public and non-obsolete changesets may be divergent.
180 Only non-public and non-obsolete changesets may be divergent.
181 """
181 """
182 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
182 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
183
183
184 def troubled(self):
184 def troubled(self):
185 """True if the changeset is either unstable, bumped or divergent"""
185 """True if the changeset is either unstable, bumped or divergent"""
186 return self.unstable() or self.bumped() or self.divergent()
186 return self.unstable() or self.bumped() or self.divergent()
187
187
188 def troubles(self):
188 def troubles(self):
189 """return the list of troubles affecting this changesets.
189 """return the list of troubles affecting this changesets.
190
190
191 Troubles are returned as strings. possible values are:
191 Troubles are returned as strings. possible values are:
192 - unstable,
192 - unstable,
193 - bumped,
193 - bumped,
194 - divergent.
194 - divergent.
195 """
195 """
196 troubles = []
196 troubles = []
197 if self.unstable():
197 if self.unstable():
198 troubles.append('unstable')
198 troubles.append('unstable')
199 if self.bumped():
199 if self.bumped():
200 troubles.append('bumped')
200 troubles.append('bumped')
201 if self.divergent():
201 if self.divergent():
202 troubles.append('divergent')
202 troubles.append('divergent')
203 return troubles
203 return troubles
204
204
205 def parents(self):
205 def parents(self):
206 """return contexts for each parent changeset"""
206 """return contexts for each parent changeset"""
207 return self._parents
207 return self._parents
208
208
209 def p1(self):
209 def p1(self):
210 return self._parents[0]
210 return self._parents[0]
211
211
212 def p2(self):
212 def p2(self):
213 if len(self._parents) == 2:
213 if len(self._parents) == 2:
214 return self._parents[1]
214 return self._parents[1]
215 return changectx(self._repo, -1)
215 return changectx(self._repo, -1)
216
216
217 def _fileinfo(self, path):
217 def _fileinfo(self, path):
218 if '_manifest' in self.__dict__:
218 if '_manifest' in self.__dict__:
219 try:
219 try:
220 return self._manifest[path], self._manifest.flags(path)
220 return self._manifest[path], self._manifest.flags(path)
221 except KeyError:
221 except KeyError:
222 raise error.ManifestLookupError(self._node, path,
222 raise error.ManifestLookupError(self._node, path,
223 _('not found in manifest'))
223 _('not found in manifest'))
224 if '_manifestdelta' in self.__dict__ or path in self.files():
224 if '_manifestdelta' in self.__dict__ or path in self.files():
225 if path in self._manifestdelta:
225 if path in self._manifestdelta:
226 return (self._manifestdelta[path],
226 return (self._manifestdelta[path],
227 self._manifestdelta.flags(path))
227 self._manifestdelta.flags(path))
228 node, flag = self._repo.manifest.find(self._changeset[0], path)
228 node, flag = self._repo.manifest.find(self._changeset[0], path)
229 if not node:
229 if not node:
230 raise error.ManifestLookupError(self._node, path,
230 raise error.ManifestLookupError(self._node, path,
231 _('not found in manifest'))
231 _('not found in manifest'))
232
232
233 return node, flag
233 return node, flag
234
234
235 def filenode(self, path):
235 def filenode(self, path):
236 return self._fileinfo(path)[0]
236 return self._fileinfo(path)[0]
237
237
238 def flags(self, path):
238 def flags(self, path):
239 try:
239 try:
240 return self._fileinfo(path)[1]
240 return self._fileinfo(path)[1]
241 except error.LookupError:
241 except error.LookupError:
242 return ''
242 return ''
243
243
244 def sub(self, path):
244 def sub(self, path):
245 return subrepo.subrepo(self, path)
245 return subrepo.subrepo(self, path)
246
246
247 def match(self, pats=[], include=None, exclude=None, default='glob'):
247 def match(self, pats=[], include=None, exclude=None, default='glob'):
248 r = self._repo
248 r = self._repo
249 return matchmod.match(r.root, r.getcwd(), pats,
249 return matchmod.match(r.root, r.getcwd(), pats,
250 include, exclude, default,
250 include, exclude, default,
251 auditor=r.auditor, ctx=self)
251 auditor=r.auditor, ctx=self)
252
252
253 def diff(self, ctx2=None, match=None, **opts):
253 def diff(self, ctx2=None, match=None, **opts):
254 """Returns a diff generator for the given contexts and matcher"""
254 """Returns a diff generator for the given contexts and matcher"""
255 if ctx2 is None:
255 if ctx2 is None:
256 ctx2 = self.p1()
256 ctx2 = self.p1()
257 if ctx2 is not None:
257 if ctx2 is not None:
258 ctx2 = self._repo[ctx2]
258 ctx2 = self._repo[ctx2]
259 diffopts = patch.diffopts(self._repo.ui, opts)
259 diffopts = patch.diffopts(self._repo.ui, opts)
260 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
260 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
261
261
262 @propertycache
262 @propertycache
263 def _dirs(self):
263 def _dirs(self):
264 return scmutil.dirs(self._manifest)
264 return scmutil.dirs(self._manifest)
265
265
266 def dirs(self):
266 def dirs(self):
267 return self._dirs
267 return self._dirs
268
268
269 def dirty(self, missing=False, merge=True, branch=True):
269 def dirty(self, missing=False, merge=True, branch=True):
270 return False
270 return False
271
271
272 def status(self, other=None, match=None, listignored=False,
272 def status(self, other=None, match=None, listignored=False,
273 listclean=False, listunknown=False, listsubrepos=False):
273 listclean=False, listunknown=False, listsubrepos=False):
274 """return status of files between two nodes or node and working
274 """return status of files between two nodes or node and working
275 directory.
275 directory.
276
276
277 If other is None, compare this node with working directory.
277 If other is None, compare this node with working directory.
278
278
279 returns (modified, added, removed, deleted, unknown, ignored, clean)
279 returns (modified, added, removed, deleted, unknown, ignored, clean)
280 """
280 """
281
281
282 ctx1 = self
282 ctx1 = self
283 ctx2 = self._repo[other]
283 ctx2 = self._repo[other]
284
284
285 # This next code block is, admittedly, fragile logic that tests for
285 # This next code block is, admittedly, fragile logic that tests for
286 # reversing the contexts and wouldn't need to exist if it weren't for
286 # reversing the contexts and wouldn't need to exist if it weren't for
287 # the fast (and common) code path of comparing the working directory
287 # the fast (and common) code path of comparing the working directory
288 # with its first parent.
288 # with its first parent.
289 #
289 #
290 # What we're aiming for here is the ability to call:
290 # What we're aiming for here is the ability to call:
291 #
291 #
292 # workingctx.status(parentctx)
292 # workingctx.status(parentctx)
293 #
293 #
294 # If we always built the manifest for each context and compared those,
294 # If we always built the manifest for each context and compared those,
295 # then we'd be done. But the special case of the above call means we
295 # then we'd be done. But the special case of the above call means we
296 # just copy the manifest of the parent.
296 # just copy the manifest of the parent.
297 reversed = False
297 reversed = False
298 if (not isinstance(ctx1, changectx)
298 if (not isinstance(ctx1, changectx)
299 and isinstance(ctx2, changectx)):
299 and isinstance(ctx2, changectx)):
300 reversed = True
300 reversed = True
301 ctx1, ctx2 = ctx2, ctx1
301 ctx1, ctx2 = ctx2, ctx1
302
302
303 match = ctx2._matchstatus(ctx1, match)
303 match = ctx2._matchstatus(ctx1, match)
304 r = [[], [], [], [], [], [], []]
304 r = scmutil.status([], [], [], [], [], [], [])
305 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
305 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
306 listunknown)
306 listunknown)
307
307
308 if reversed:
308 if reversed:
309 # Reverse added and removed. Clear deleted, unknown and ignored as
309 # Reverse added and removed. Clear deleted, unknown and ignored as
310 # these make no sense to reverse.
310 # these make no sense to reverse.
311 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
311 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
312 r.clean)
312 r.clean)
313
313
314 if listsubrepos:
314 if listsubrepos:
315 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
315 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
316 rev2 = ctx2.subrev(subpath)
316 rev2 = ctx2.subrev(subpath)
317 try:
317 try:
318 submatch = matchmod.narrowmatcher(subpath, match)
318 submatch = matchmod.narrowmatcher(subpath, match)
319 s = sub.status(rev2, match=submatch, ignored=listignored,
319 s = sub.status(rev2, match=submatch, ignored=listignored,
320 clean=listclean, unknown=listunknown,
320 clean=listclean, unknown=listunknown,
321 listsubrepos=True)
321 listsubrepos=True)
322 for rfiles, sfiles in zip(r, s):
322 for rfiles, sfiles in zip(r, s):
323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
324 except error.LookupError:
324 except error.LookupError:
325 self._repo.ui.status(_("skipping missing "
325 self._repo.ui.status(_("skipping missing "
326 "subrepository: %s\n") % subpath)
326 "subrepository: %s\n") % subpath)
327
327
328 for l in r:
328 for l in r:
329 l.sort()
329 l.sort()
330
330
331 return r
331 return r
332
332
333
333
334 def makememctx(repo, parents, text, user, date, branch, files, store,
334 def makememctx(repo, parents, text, user, date, branch, files, store,
335 editor=None):
335 editor=None):
336 def getfilectx(repo, memctx, path):
336 def getfilectx(repo, memctx, path):
337 data, mode, copied = store.getfile(path)
337 data, mode, copied = store.getfile(path)
338 if data is None:
338 if data is None:
339 return None
339 return None
340 islink, isexec = mode
340 islink, isexec = mode
341 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
341 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
342 copied=copied, memctx=memctx)
342 copied=copied, memctx=memctx)
343 extra = {}
343 extra = {}
344 if branch:
344 if branch:
345 extra['branch'] = encoding.fromlocal(branch)
345 extra['branch'] = encoding.fromlocal(branch)
346 ctx = memctx(repo, parents, text, files, getfilectx, user,
346 ctx = memctx(repo, parents, text, files, getfilectx, user,
347 date, extra, editor)
347 date, extra, editor)
348 return ctx
348 return ctx
349
349
350 class changectx(basectx):
350 class changectx(basectx):
351 """A changecontext object makes access to data related to a particular
351 """A changecontext object makes access to data related to a particular
352 changeset convenient. It represents a read-only context already present in
352 changeset convenient. It represents a read-only context already present in
353 the repo."""
353 the repo."""
354 def __init__(self, repo, changeid=''):
354 def __init__(self, repo, changeid=''):
355 """changeid is a revision number, node, or tag"""
355 """changeid is a revision number, node, or tag"""
356
356
357 # since basectx.__new__ already took care of copying the object, we
357 # since basectx.__new__ already took care of copying the object, we
358 # don't need to do anything in __init__, so we just exit here
358 # don't need to do anything in __init__, so we just exit here
359 if isinstance(changeid, basectx):
359 if isinstance(changeid, basectx):
360 return
360 return
361
361
362 if changeid == '':
362 if changeid == '':
363 changeid = '.'
363 changeid = '.'
364 self._repo = repo
364 self._repo = repo
365
365
366 try:
366 try:
367 if isinstance(changeid, int):
367 if isinstance(changeid, int):
368 self._node = repo.changelog.node(changeid)
368 self._node = repo.changelog.node(changeid)
369 self._rev = changeid
369 self._rev = changeid
370 return
370 return
371 if isinstance(changeid, long):
371 if isinstance(changeid, long):
372 changeid = str(changeid)
372 changeid = str(changeid)
373 if changeid == '.':
373 if changeid == '.':
374 self._node = repo.dirstate.p1()
374 self._node = repo.dirstate.p1()
375 self._rev = repo.changelog.rev(self._node)
375 self._rev = repo.changelog.rev(self._node)
376 return
376 return
377 if changeid == 'null':
377 if changeid == 'null':
378 self._node = nullid
378 self._node = nullid
379 self._rev = nullrev
379 self._rev = nullrev
380 return
380 return
381 if changeid == 'tip':
381 if changeid == 'tip':
382 self._node = repo.changelog.tip()
382 self._node = repo.changelog.tip()
383 self._rev = repo.changelog.rev(self._node)
383 self._rev = repo.changelog.rev(self._node)
384 return
384 return
385 if len(changeid) == 20:
385 if len(changeid) == 20:
386 try:
386 try:
387 self._node = changeid
387 self._node = changeid
388 self._rev = repo.changelog.rev(changeid)
388 self._rev = repo.changelog.rev(changeid)
389 return
389 return
390 except error.FilteredRepoLookupError:
390 except error.FilteredRepoLookupError:
391 raise
391 raise
392 except LookupError:
392 except LookupError:
393 pass
393 pass
394
394
395 try:
395 try:
396 r = int(changeid)
396 r = int(changeid)
397 if str(r) != changeid:
397 if str(r) != changeid:
398 raise ValueError
398 raise ValueError
399 l = len(repo.changelog)
399 l = len(repo.changelog)
400 if r < 0:
400 if r < 0:
401 r += l
401 r += l
402 if r < 0 or r >= l:
402 if r < 0 or r >= l:
403 raise ValueError
403 raise ValueError
404 self._rev = r
404 self._rev = r
405 self._node = repo.changelog.node(r)
405 self._node = repo.changelog.node(r)
406 return
406 return
407 except error.FilteredIndexError:
407 except error.FilteredIndexError:
408 raise
408 raise
409 except (ValueError, OverflowError, IndexError):
409 except (ValueError, OverflowError, IndexError):
410 pass
410 pass
411
411
412 if len(changeid) == 40:
412 if len(changeid) == 40:
413 try:
413 try:
414 self._node = bin(changeid)
414 self._node = bin(changeid)
415 self._rev = repo.changelog.rev(self._node)
415 self._rev = repo.changelog.rev(self._node)
416 return
416 return
417 except error.FilteredLookupError:
417 except error.FilteredLookupError:
418 raise
418 raise
419 except (TypeError, LookupError):
419 except (TypeError, LookupError):
420 pass
420 pass
421
421
422 if changeid in repo._bookmarks:
422 if changeid in repo._bookmarks:
423 self._node = repo._bookmarks[changeid]
423 self._node = repo._bookmarks[changeid]
424 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
425 return
425 return
426 if changeid in repo._tagscache.tags:
426 if changeid in repo._tagscache.tags:
427 self._node = repo._tagscache.tags[changeid]
427 self._node = repo._tagscache.tags[changeid]
428 self._rev = repo.changelog.rev(self._node)
428 self._rev = repo.changelog.rev(self._node)
429 return
429 return
430 try:
430 try:
431 self._node = repo.branchtip(changeid)
431 self._node = repo.branchtip(changeid)
432 self._rev = repo.changelog.rev(self._node)
432 self._rev = repo.changelog.rev(self._node)
433 return
433 return
434 except error.FilteredRepoLookupError:
434 except error.FilteredRepoLookupError:
435 raise
435 raise
436 except error.RepoLookupError:
436 except error.RepoLookupError:
437 pass
437 pass
438
438
439 self._node = repo.unfiltered().changelog._partialmatch(changeid)
439 self._node = repo.unfiltered().changelog._partialmatch(changeid)
440 if self._node is not None:
440 if self._node is not None:
441 self._rev = repo.changelog.rev(self._node)
441 self._rev = repo.changelog.rev(self._node)
442 return
442 return
443
443
444 # lookup failed
444 # lookup failed
445 # check if it might have come from damaged dirstate
445 # check if it might have come from damaged dirstate
446 #
446 #
447 # XXX we could avoid the unfiltered if we had a recognizable
447 # XXX we could avoid the unfiltered if we had a recognizable
448 # exception for filtered changeset access
448 # exception for filtered changeset access
449 if changeid in repo.unfiltered().dirstate.parents():
449 if changeid in repo.unfiltered().dirstate.parents():
450 msg = _("working directory has unknown parent '%s'!")
450 msg = _("working directory has unknown parent '%s'!")
451 raise error.Abort(msg % short(changeid))
451 raise error.Abort(msg % short(changeid))
452 try:
452 try:
453 if len(changeid) == 20:
453 if len(changeid) == 20:
454 changeid = hex(changeid)
454 changeid = hex(changeid)
455 except TypeError:
455 except TypeError:
456 pass
456 pass
457 except (error.FilteredIndexError, error.FilteredLookupError,
457 except (error.FilteredIndexError, error.FilteredLookupError,
458 error.FilteredRepoLookupError):
458 error.FilteredRepoLookupError):
459 if repo.filtername == 'visible':
459 if repo.filtername == 'visible':
460 msg = _("hidden revision '%s'") % changeid
460 msg = _("hidden revision '%s'") % changeid
461 hint = _('use --hidden to access hidden revisions')
461 hint = _('use --hidden to access hidden revisions')
462 raise error.FilteredRepoLookupError(msg, hint=hint)
462 raise error.FilteredRepoLookupError(msg, hint=hint)
463 msg = _("filtered revision '%s' (not in '%s' subset)")
463 msg = _("filtered revision '%s' (not in '%s' subset)")
464 msg %= (changeid, repo.filtername)
464 msg %= (changeid, repo.filtername)
465 raise error.FilteredRepoLookupError(msg)
465 raise error.FilteredRepoLookupError(msg)
466 except IndexError:
466 except IndexError:
467 pass
467 pass
468 raise error.RepoLookupError(
468 raise error.RepoLookupError(
469 _("unknown revision '%s'") % changeid)
469 _("unknown revision '%s'") % changeid)
470
470
471 def __hash__(self):
471 def __hash__(self):
472 try:
472 try:
473 return hash(self._rev)
473 return hash(self._rev)
474 except AttributeError:
474 except AttributeError:
475 return id(self)
475 return id(self)
476
476
477 def __nonzero__(self):
477 def __nonzero__(self):
478 return self._rev != nullrev
478 return self._rev != nullrev
479
479
480 @propertycache
480 @propertycache
481 def _changeset(self):
481 def _changeset(self):
482 return self._repo.changelog.read(self.rev())
482 return self._repo.changelog.read(self.rev())
483
483
484 @propertycache
484 @propertycache
485 def _manifest(self):
485 def _manifest(self):
486 return self._repo.manifest.read(self._changeset[0])
486 return self._repo.manifest.read(self._changeset[0])
487
487
488 @propertycache
488 @propertycache
489 def _manifestdelta(self):
489 def _manifestdelta(self):
490 return self._repo.manifest.readdelta(self._changeset[0])
490 return self._repo.manifest.readdelta(self._changeset[0])
491
491
492 @propertycache
492 @propertycache
493 def _parents(self):
493 def _parents(self):
494 p = self._repo.changelog.parentrevs(self._rev)
494 p = self._repo.changelog.parentrevs(self._rev)
495 if p[1] == nullrev:
495 if p[1] == nullrev:
496 p = p[:-1]
496 p = p[:-1]
497 return [changectx(self._repo, x) for x in p]
497 return [changectx(self._repo, x) for x in p]
498
498
499 def changeset(self):
499 def changeset(self):
500 return self._changeset
500 return self._changeset
501 def manifestnode(self):
501 def manifestnode(self):
502 return self._changeset[0]
502 return self._changeset[0]
503
503
504 def user(self):
504 def user(self):
505 return self._changeset[1]
505 return self._changeset[1]
506 def date(self):
506 def date(self):
507 return self._changeset[2]
507 return self._changeset[2]
508 def files(self):
508 def files(self):
509 return self._changeset[3]
509 return self._changeset[3]
510 def description(self):
510 def description(self):
511 return self._changeset[4]
511 return self._changeset[4]
512 def branch(self):
512 def branch(self):
513 return encoding.tolocal(self._changeset[5].get("branch"))
513 return encoding.tolocal(self._changeset[5].get("branch"))
514 def closesbranch(self):
514 def closesbranch(self):
515 return 'close' in self._changeset[5]
515 return 'close' in self._changeset[5]
516 def extra(self):
516 def extra(self):
517 return self._changeset[5]
517 return self._changeset[5]
518 def tags(self):
518 def tags(self):
519 return self._repo.nodetags(self._node)
519 return self._repo.nodetags(self._node)
520 def bookmarks(self):
520 def bookmarks(self):
521 return self._repo.nodebookmarks(self._node)
521 return self._repo.nodebookmarks(self._node)
522 def phase(self):
522 def phase(self):
523 return self._repo._phasecache.phase(self._repo, self._rev)
523 return self._repo._phasecache.phase(self._repo, self._rev)
524 def hidden(self):
524 def hidden(self):
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
526
526
527 def children(self):
527 def children(self):
528 """return contexts for each child changeset"""
528 """return contexts for each child changeset"""
529 c = self._repo.changelog.children(self._node)
529 c = self._repo.changelog.children(self._node)
530 return [changectx(self._repo, x) for x in c]
530 return [changectx(self._repo, x) for x in c]
531
531
532 def ancestors(self):
532 def ancestors(self):
533 for a in self._repo.changelog.ancestors([self._rev]):
533 for a in self._repo.changelog.ancestors([self._rev]):
534 yield changectx(self._repo, a)
534 yield changectx(self._repo, a)
535
535
536 def descendants(self):
536 def descendants(self):
537 for d in self._repo.changelog.descendants([self._rev]):
537 for d in self._repo.changelog.descendants([self._rev]):
538 yield changectx(self._repo, d)
538 yield changectx(self._repo, d)
539
539
540 def filectx(self, path, fileid=None, filelog=None):
540 def filectx(self, path, fileid=None, filelog=None):
541 """get a file context from this changeset"""
541 """get a file context from this changeset"""
542 if fileid is None:
542 if fileid is None:
543 fileid = self.filenode(path)
543 fileid = self.filenode(path)
544 return filectx(self._repo, path, fileid=fileid,
544 return filectx(self._repo, path, fileid=fileid,
545 changectx=self, filelog=filelog)
545 changectx=self, filelog=filelog)
546
546
547 def ancestor(self, c2, warn=False):
547 def ancestor(self, c2, warn=False):
548 """return the "best" ancestor context of self and c2
548 """return the "best" ancestor context of self and c2
549
549
550 If there are multiple candidates, it will show a message and check
550 If there are multiple candidates, it will show a message and check
551 merge.preferancestor configuration before falling back to the
551 merge.preferancestor configuration before falling back to the
552 revlog ancestor."""
552 revlog ancestor."""
553 # deal with workingctxs
553 # deal with workingctxs
554 n2 = c2._node
554 n2 = c2._node
555 if n2 is None:
555 if n2 is None:
556 n2 = c2._parents[0]._node
556 n2 = c2._parents[0]._node
557 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
557 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 if not cahs:
558 if not cahs:
559 anc = nullid
559 anc = nullid
560 elif len(cahs) == 1:
560 elif len(cahs) == 1:
561 anc = cahs[0]
561 anc = cahs[0]
562 else:
562 else:
563 for r in self._repo.ui.configlist('merge', 'preferancestor'):
563 for r in self._repo.ui.configlist('merge', 'preferancestor'):
564 try:
564 try:
565 ctx = changectx(self._repo, r)
565 ctx = changectx(self._repo, r)
566 except error.RepoLookupError:
566 except error.RepoLookupError:
567 continue
567 continue
568 anc = ctx.node()
568 anc = ctx.node()
569 if anc in cahs:
569 if anc in cahs:
570 break
570 break
571 else:
571 else:
572 anc = self._repo.changelog.ancestor(self._node, n2)
572 anc = self._repo.changelog.ancestor(self._node, n2)
573 if warn:
573 if warn:
574 self._repo.ui.status(
574 self._repo.ui.status(
575 (_("note: using %s as ancestor of %s and %s\n") %
575 (_("note: using %s as ancestor of %s and %s\n") %
576 (short(anc), short(self._node), short(n2))) +
576 (short(anc), short(self._node), short(n2))) +
577 ''.join(_(" alternatively, use --config "
577 ''.join(_(" alternatively, use --config "
578 "merge.preferancestor=%s\n") %
578 "merge.preferancestor=%s\n") %
579 short(n) for n in sorted(cahs) if n != anc))
579 short(n) for n in sorted(cahs) if n != anc))
580 return changectx(self._repo, anc)
580 return changectx(self._repo, anc)
581
581
582 def descendant(self, other):
582 def descendant(self, other):
583 """True if other is descendant of this changeset"""
583 """True if other is descendant of this changeset"""
584 return self._repo.changelog.descendant(self._rev, other._rev)
584 return self._repo.changelog.descendant(self._rev, other._rev)
585
585
586 def walk(self, match):
586 def walk(self, match):
587 fset = set(match.files())
587 fset = set(match.files())
588 # for dirstate.walk, files=['.'] means "walk the whole tree".
588 # for dirstate.walk, files=['.'] means "walk the whole tree".
589 # follow that here, too
589 # follow that here, too
590 fset.discard('.')
590 fset.discard('.')
591
591
592 # avoid the entire walk if we're only looking for specific files
592 # avoid the entire walk if we're only looking for specific files
593 if fset and not match.anypats():
593 if fset and not match.anypats():
594 if util.all([fn in self for fn in fset]):
594 if util.all([fn in self for fn in fset]):
595 for fn in sorted(fset):
595 for fn in sorted(fset):
596 if match(fn):
596 if match(fn):
597 yield fn
597 yield fn
598 raise StopIteration
598 raise StopIteration
599
599
600 for fn in self:
600 for fn in self:
601 if fn in fset:
601 if fn in fset:
602 # specified pattern is the exact name
602 # specified pattern is the exact name
603 fset.remove(fn)
603 fset.remove(fn)
604 if match(fn):
604 if match(fn):
605 yield fn
605 yield fn
606 for fn in sorted(fset):
606 for fn in sorted(fset):
607 if fn in self._dirs:
607 if fn in self._dirs:
608 # specified pattern is a directory
608 # specified pattern is a directory
609 continue
609 continue
610 match.bad(fn, _('no such file in rev %s') % self)
610 match.bad(fn, _('no such file in rev %s') % self)
611
611
612 def matches(self, match):
612 def matches(self, match):
613 return self.walk(match)
613 return self.walk(match)
614
614
615 class basefilectx(object):
615 class basefilectx(object):
616 """A filecontext object represents the common logic for its children:
616 """A filecontext object represents the common logic for its children:
617 filectx: read-only access to a filerevision that is already present
617 filectx: read-only access to a filerevision that is already present
618 in the repo,
618 in the repo,
619 workingfilectx: a filecontext that represents files from the working
619 workingfilectx: a filecontext that represents files from the working
620 directory,
620 directory,
621 memfilectx: a filecontext that represents files in-memory."""
621 memfilectx: a filecontext that represents files in-memory."""
622 def __new__(cls, repo, path, *args, **kwargs):
622 def __new__(cls, repo, path, *args, **kwargs):
623 return super(basefilectx, cls).__new__(cls)
623 return super(basefilectx, cls).__new__(cls)
624
624
625 @propertycache
625 @propertycache
626 def _filelog(self):
626 def _filelog(self):
627 return self._repo.file(self._path)
627 return self._repo.file(self._path)
628
628
629 @propertycache
629 @propertycache
630 def _changeid(self):
630 def _changeid(self):
631 if '_changeid' in self.__dict__:
631 if '_changeid' in self.__dict__:
632 return self._changeid
632 return self._changeid
633 elif '_changectx' in self.__dict__:
633 elif '_changectx' in self.__dict__:
634 return self._changectx.rev()
634 return self._changectx.rev()
635 else:
635 else:
636 return self._filelog.linkrev(self._filerev)
636 return self._filelog.linkrev(self._filerev)
637
637
638 @propertycache
638 @propertycache
639 def _filenode(self):
639 def _filenode(self):
640 if '_fileid' in self.__dict__:
640 if '_fileid' in self.__dict__:
641 return self._filelog.lookup(self._fileid)
641 return self._filelog.lookup(self._fileid)
642 else:
642 else:
643 return self._changectx.filenode(self._path)
643 return self._changectx.filenode(self._path)
644
644
645 @propertycache
645 @propertycache
646 def _filerev(self):
646 def _filerev(self):
647 return self._filelog.rev(self._filenode)
647 return self._filelog.rev(self._filenode)
648
648
649 @propertycache
649 @propertycache
650 def _repopath(self):
650 def _repopath(self):
651 return self._path
651 return self._path
652
652
653 def __nonzero__(self):
653 def __nonzero__(self):
654 try:
654 try:
655 self._filenode
655 self._filenode
656 return True
656 return True
657 except error.LookupError:
657 except error.LookupError:
658 # file is missing
658 # file is missing
659 return False
659 return False
660
660
661 def __str__(self):
661 def __str__(self):
662 return "%s@%s" % (self.path(), self._changectx)
662 return "%s@%s" % (self.path(), self._changectx)
663
663
664 def __repr__(self):
664 def __repr__(self):
665 return "<%s %s>" % (type(self).__name__, str(self))
665 return "<%s %s>" % (type(self).__name__, str(self))
666
666
667 def __hash__(self):
667 def __hash__(self):
668 try:
668 try:
669 return hash((self._path, self._filenode))
669 return hash((self._path, self._filenode))
670 except AttributeError:
670 except AttributeError:
671 return id(self)
671 return id(self)
672
672
673 def __eq__(self, other):
673 def __eq__(self, other):
674 try:
674 try:
675 return (type(self) == type(other) and self._path == other._path
675 return (type(self) == type(other) and self._path == other._path
676 and self._filenode == other._filenode)
676 and self._filenode == other._filenode)
677 except AttributeError:
677 except AttributeError:
678 return False
678 return False
679
679
680 def __ne__(self, other):
680 def __ne__(self, other):
681 return not (self == other)
681 return not (self == other)
682
682
683 def filerev(self):
683 def filerev(self):
684 return self._filerev
684 return self._filerev
685 def filenode(self):
685 def filenode(self):
686 return self._filenode
686 return self._filenode
687 def flags(self):
687 def flags(self):
688 return self._changectx.flags(self._path)
688 return self._changectx.flags(self._path)
689 def filelog(self):
689 def filelog(self):
690 return self._filelog
690 return self._filelog
691 def rev(self):
691 def rev(self):
692 return self._changeid
692 return self._changeid
693 def linkrev(self):
693 def linkrev(self):
694 return self._filelog.linkrev(self._filerev)
694 return self._filelog.linkrev(self._filerev)
695 def node(self):
695 def node(self):
696 return self._changectx.node()
696 return self._changectx.node()
697 def hex(self):
697 def hex(self):
698 return self._changectx.hex()
698 return self._changectx.hex()
699 def user(self):
699 def user(self):
700 return self._changectx.user()
700 return self._changectx.user()
701 def date(self):
701 def date(self):
702 return self._changectx.date()
702 return self._changectx.date()
703 def files(self):
703 def files(self):
704 return self._changectx.files()
704 return self._changectx.files()
705 def description(self):
705 def description(self):
706 return self._changectx.description()
706 return self._changectx.description()
707 def branch(self):
707 def branch(self):
708 return self._changectx.branch()
708 return self._changectx.branch()
709 def extra(self):
709 def extra(self):
710 return self._changectx.extra()
710 return self._changectx.extra()
711 def phase(self):
711 def phase(self):
712 return self._changectx.phase()
712 return self._changectx.phase()
713 def phasestr(self):
713 def phasestr(self):
714 return self._changectx.phasestr()
714 return self._changectx.phasestr()
715 def manifest(self):
715 def manifest(self):
716 return self._changectx.manifest()
716 return self._changectx.manifest()
717 def changectx(self):
717 def changectx(self):
718 return self._changectx
718 return self._changectx
719
719
720 def path(self):
720 def path(self):
721 return self._path
721 return self._path
722
722
723 def isbinary(self):
723 def isbinary(self):
724 try:
724 try:
725 return util.binary(self.data())
725 return util.binary(self.data())
726 except IOError:
726 except IOError:
727 return False
727 return False
728 def isexec(self):
728 def isexec(self):
729 return 'x' in self.flags()
729 return 'x' in self.flags()
730 def islink(self):
730 def islink(self):
731 return 'l' in self.flags()
731 return 'l' in self.flags()
732
732
733 def cmp(self, fctx):
733 def cmp(self, fctx):
734 """compare with other file context
734 """compare with other file context
735
735
736 returns True if different than fctx.
736 returns True if different than fctx.
737 """
737 """
738 if (fctx._filerev is None
738 if (fctx._filerev is None
739 and (self._repo._encodefilterpats
739 and (self._repo._encodefilterpats
740 # if file data starts with '\1\n', empty metadata block is
740 # if file data starts with '\1\n', empty metadata block is
741 # prepended, which adds 4 bytes to filelog.size().
741 # prepended, which adds 4 bytes to filelog.size().
742 or self.size() - 4 == fctx.size())
742 or self.size() - 4 == fctx.size())
743 or self.size() == fctx.size()):
743 or self.size() == fctx.size()):
744 return self._filelog.cmp(self._filenode, fctx.data())
744 return self._filelog.cmp(self._filenode, fctx.data())
745
745
746 return True
746 return True
747
747
748 def parents(self):
748 def parents(self):
749 _path = self._path
749 _path = self._path
750 fl = self._filelog
750 fl = self._filelog
751 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
751 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
752
752
753 r = self._filelog.renamed(self._filenode)
753 r = self._filelog.renamed(self._filenode)
754 if r:
754 if r:
755 pl[0] = (r[0], r[1], None)
755 pl[0] = (r[0], r[1], None)
756
756
757 return [filectx(self._repo, p, fileid=n, filelog=l)
757 return [filectx(self._repo, p, fileid=n, filelog=l)
758 for p, n, l in pl if n != nullid]
758 for p, n, l in pl if n != nullid]
759
759
760 def p1(self):
760 def p1(self):
761 return self.parents()[0]
761 return self.parents()[0]
762
762
763 def p2(self):
763 def p2(self):
764 p = self.parents()
764 p = self.parents()
765 if len(p) == 2:
765 if len(p) == 2:
766 return p[1]
766 return p[1]
767 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
767 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
768
768
769 def annotate(self, follow=False, linenumber=None, diffopts=None):
769 def annotate(self, follow=False, linenumber=None, diffopts=None):
770 '''returns a list of tuples of (ctx, line) for each line
770 '''returns a list of tuples of (ctx, line) for each line
771 in the file, where ctx is the filectx of the node where
771 in the file, where ctx is the filectx of the node where
772 that line was last changed.
772 that line was last changed.
773 This returns tuples of ((ctx, linenumber), line) for each line,
773 This returns tuples of ((ctx, linenumber), line) for each line,
774 if "linenumber" parameter is NOT "None".
774 if "linenumber" parameter is NOT "None".
775 In such tuples, linenumber means one at the first appearance
775 In such tuples, linenumber means one at the first appearance
776 in the managed file.
776 in the managed file.
777 To reduce annotation cost,
777 To reduce annotation cost,
778 this returns fixed value(False is used) as linenumber,
778 this returns fixed value(False is used) as linenumber,
779 if "linenumber" parameter is "False".'''
779 if "linenumber" parameter is "False".'''
780
780
781 if linenumber is None:
781 if linenumber is None:
782 def decorate(text, rev):
782 def decorate(text, rev):
783 return ([rev] * len(text.splitlines()), text)
783 return ([rev] * len(text.splitlines()), text)
784 elif linenumber:
784 elif linenumber:
785 def decorate(text, rev):
785 def decorate(text, rev):
786 size = len(text.splitlines())
786 size = len(text.splitlines())
787 return ([(rev, i) for i in xrange(1, size + 1)], text)
787 return ([(rev, i) for i in xrange(1, size + 1)], text)
788 else:
788 else:
789 def decorate(text, rev):
789 def decorate(text, rev):
790 return ([(rev, False)] * len(text.splitlines()), text)
790 return ([(rev, False)] * len(text.splitlines()), text)
791
791
792 def pair(parent, child):
792 def pair(parent, child):
793 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
793 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
794 refine=True)
794 refine=True)
795 for (a1, a2, b1, b2), t in blocks:
795 for (a1, a2, b1, b2), t in blocks:
796 # Changed blocks ('!') or blocks made only of blank lines ('~')
796 # Changed blocks ('!') or blocks made only of blank lines ('~')
797 # belong to the child.
797 # belong to the child.
798 if t == '=':
798 if t == '=':
799 child[0][b1:b2] = parent[0][a1:a2]
799 child[0][b1:b2] = parent[0][a1:a2]
800 return child
800 return child
801
801
802 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
802 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
803
803
804 def parents(f):
804 def parents(f):
805 pl = f.parents()
805 pl = f.parents()
806
806
807 # Don't return renamed parents if we aren't following.
807 # Don't return renamed parents if we aren't following.
808 if not follow:
808 if not follow:
809 pl = [p for p in pl if p.path() == f.path()]
809 pl = [p for p in pl if p.path() == f.path()]
810
810
811 # renamed filectx won't have a filelog yet, so set it
811 # renamed filectx won't have a filelog yet, so set it
812 # from the cache to save time
812 # from the cache to save time
813 for p in pl:
813 for p in pl:
814 if not '_filelog' in p.__dict__:
814 if not '_filelog' in p.__dict__:
815 p._filelog = getlog(p.path())
815 p._filelog = getlog(p.path())
816
816
817 return pl
817 return pl
818
818
819 # use linkrev to find the first changeset where self appeared
819 # use linkrev to find the first changeset where self appeared
820 if self.rev() != self.linkrev():
820 if self.rev() != self.linkrev():
821 base = self.filectx(self.filenode())
821 base = self.filectx(self.filenode())
822 else:
822 else:
823 base = self
823 base = self
824
824
825 # This algorithm would prefer to be recursive, but Python is a
825 # This algorithm would prefer to be recursive, but Python is a
826 # bit recursion-hostile. Instead we do an iterative
826 # bit recursion-hostile. Instead we do an iterative
827 # depth-first search.
827 # depth-first search.
828
828
829 visit = [base]
829 visit = [base]
830 hist = {}
830 hist = {}
831 pcache = {}
831 pcache = {}
832 needed = {base: 1}
832 needed = {base: 1}
833 while visit:
833 while visit:
834 f = visit[-1]
834 f = visit[-1]
835 pcached = f in pcache
835 pcached = f in pcache
836 if not pcached:
836 if not pcached:
837 pcache[f] = parents(f)
837 pcache[f] = parents(f)
838
838
839 ready = True
839 ready = True
840 pl = pcache[f]
840 pl = pcache[f]
841 for p in pl:
841 for p in pl:
842 if p not in hist:
842 if p not in hist:
843 ready = False
843 ready = False
844 visit.append(p)
844 visit.append(p)
845 if not pcached:
845 if not pcached:
846 needed[p] = needed.get(p, 0) + 1
846 needed[p] = needed.get(p, 0) + 1
847 if ready:
847 if ready:
848 visit.pop()
848 visit.pop()
849 reusable = f in hist
849 reusable = f in hist
850 if reusable:
850 if reusable:
851 curr = hist[f]
851 curr = hist[f]
852 else:
852 else:
853 curr = decorate(f.data(), f)
853 curr = decorate(f.data(), f)
854 for p in pl:
854 for p in pl:
855 if not reusable:
855 if not reusable:
856 curr = pair(hist[p], curr)
856 curr = pair(hist[p], curr)
857 if needed[p] == 1:
857 if needed[p] == 1:
858 del hist[p]
858 del hist[p]
859 del needed[p]
859 del needed[p]
860 else:
860 else:
861 needed[p] -= 1
861 needed[p] -= 1
862
862
863 hist[f] = curr
863 hist[f] = curr
864 pcache[f] = []
864 pcache[f] = []
865
865
866 return zip(hist[base][0], hist[base][1].splitlines(True))
866 return zip(hist[base][0], hist[base][1].splitlines(True))
867
867
868 def ancestors(self, followfirst=False):
868 def ancestors(self, followfirst=False):
869 visit = {}
869 visit = {}
870 c = self
870 c = self
871 cut = followfirst and 1 or None
871 cut = followfirst and 1 or None
872 while True:
872 while True:
873 for parent in c.parents()[:cut]:
873 for parent in c.parents()[:cut]:
874 visit[(parent.rev(), parent.node())] = parent
874 visit[(parent.rev(), parent.node())] = parent
875 if not visit:
875 if not visit:
876 break
876 break
877 c = visit.pop(max(visit))
877 c = visit.pop(max(visit))
878 yield c
878 yield c
879
879
880 class filectx(basefilectx):
880 class filectx(basefilectx):
881 """A filecontext object makes access to data related to a particular
881 """A filecontext object makes access to data related to a particular
882 filerevision convenient."""
882 filerevision convenient."""
883 def __init__(self, repo, path, changeid=None, fileid=None,
883 def __init__(self, repo, path, changeid=None, fileid=None,
884 filelog=None, changectx=None):
884 filelog=None, changectx=None):
885 """changeid can be a changeset revision, node, or tag.
885 """changeid can be a changeset revision, node, or tag.
886 fileid can be a file revision or node."""
886 fileid can be a file revision or node."""
887 self._repo = repo
887 self._repo = repo
888 self._path = path
888 self._path = path
889
889
890 assert (changeid is not None
890 assert (changeid is not None
891 or fileid is not None
891 or fileid is not None
892 or changectx is not None), \
892 or changectx is not None), \
893 ("bad args: changeid=%r, fileid=%r, changectx=%r"
893 ("bad args: changeid=%r, fileid=%r, changectx=%r"
894 % (changeid, fileid, changectx))
894 % (changeid, fileid, changectx))
895
895
896 if filelog is not None:
896 if filelog is not None:
897 self._filelog = filelog
897 self._filelog = filelog
898
898
899 if changeid is not None:
899 if changeid is not None:
900 self._changeid = changeid
900 self._changeid = changeid
901 if changectx is not None:
901 if changectx is not None:
902 self._changectx = changectx
902 self._changectx = changectx
903 if fileid is not None:
903 if fileid is not None:
904 self._fileid = fileid
904 self._fileid = fileid
905
905
906 @propertycache
906 @propertycache
907 def _changectx(self):
907 def _changectx(self):
908 try:
908 try:
909 return changectx(self._repo, self._changeid)
909 return changectx(self._repo, self._changeid)
910 except error.RepoLookupError:
910 except error.RepoLookupError:
911 # Linkrev may point to any revision in the repository. When the
911 # Linkrev may point to any revision in the repository. When the
912 # repository is filtered this may lead to `filectx` trying to build
912 # repository is filtered this may lead to `filectx` trying to build
913 # `changectx` for filtered revision. In such case we fallback to
913 # `changectx` for filtered revision. In such case we fallback to
914 # creating `changectx` on the unfiltered version of the reposition.
914 # creating `changectx` on the unfiltered version of the reposition.
915 # This fallback should not be an issue because `changectx` from
915 # This fallback should not be an issue because `changectx` from
916 # `filectx` are not used in complex operations that care about
916 # `filectx` are not used in complex operations that care about
917 # filtering.
917 # filtering.
918 #
918 #
919 # This fallback is a cheap and dirty fix that prevent several
919 # This fallback is a cheap and dirty fix that prevent several
920 # crashes. It does not ensure the behavior is correct. However the
920 # crashes. It does not ensure the behavior is correct. However the
921 # behavior was not correct before filtering either and "incorrect
921 # behavior was not correct before filtering either and "incorrect
922 # behavior" is seen as better as "crash"
922 # behavior" is seen as better as "crash"
923 #
923 #
924 # Linkrevs have several serious troubles with filtering that are
924 # Linkrevs have several serious troubles with filtering that are
925 # complicated to solve. Proper handling of the issue here should be
925 # complicated to solve. Proper handling of the issue here should be
926 # considered when solving linkrev issue are on the table.
926 # considered when solving linkrev issue are on the table.
927 return changectx(self._repo.unfiltered(), self._changeid)
927 return changectx(self._repo.unfiltered(), self._changeid)
928
928
929 def filectx(self, fileid):
929 def filectx(self, fileid):
930 '''opens an arbitrary revision of the file without
930 '''opens an arbitrary revision of the file without
931 opening a new filelog'''
931 opening a new filelog'''
932 return filectx(self._repo, self._path, fileid=fileid,
932 return filectx(self._repo, self._path, fileid=fileid,
933 filelog=self._filelog)
933 filelog=self._filelog)
934
934
935 def data(self):
935 def data(self):
936 try:
936 try:
937 return self._filelog.read(self._filenode)
937 return self._filelog.read(self._filenode)
938 except error.CensoredNodeError:
938 except error.CensoredNodeError:
939 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
939 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
940 return ""
940 return ""
941 raise util.Abort(_("censored node: %s") % short(self._filenode),
941 raise util.Abort(_("censored node: %s") % short(self._filenode),
942 hint=_("set censor.policy to ignore errors"))
942 hint=_("set censor.policy to ignore errors"))
943
943
944 def size(self):
944 def size(self):
945 return self._filelog.size(self._filerev)
945 return self._filelog.size(self._filerev)
946
946
947 def renamed(self):
947 def renamed(self):
948 """check if file was actually renamed in this changeset revision
948 """check if file was actually renamed in this changeset revision
949
949
950 If rename logged in file revision, we report copy for changeset only
950 If rename logged in file revision, we report copy for changeset only
951 if file revisions linkrev points back to the changeset in question
951 if file revisions linkrev points back to the changeset in question
952 or both changeset parents contain different file revisions.
952 or both changeset parents contain different file revisions.
953 """
953 """
954
954
955 renamed = self._filelog.renamed(self._filenode)
955 renamed = self._filelog.renamed(self._filenode)
956 if not renamed:
956 if not renamed:
957 return renamed
957 return renamed
958
958
959 if self.rev() == self.linkrev():
959 if self.rev() == self.linkrev():
960 return renamed
960 return renamed
961
961
962 name = self.path()
962 name = self.path()
963 fnode = self._filenode
963 fnode = self._filenode
964 for p in self._changectx.parents():
964 for p in self._changectx.parents():
965 try:
965 try:
966 if fnode == p.filenode(name):
966 if fnode == p.filenode(name):
967 return None
967 return None
968 except error.LookupError:
968 except error.LookupError:
969 pass
969 pass
970 return renamed
970 return renamed
971
971
972 def children(self):
972 def children(self):
973 # hard for renames
973 # hard for renames
974 c = self._filelog.children(self._filenode)
974 c = self._filelog.children(self._filenode)
975 return [filectx(self._repo, self._path, fileid=x,
975 return [filectx(self._repo, self._path, fileid=x,
976 filelog=self._filelog) for x in c]
976 filelog=self._filelog) for x in c]
977
977
978 class committablectx(basectx):
978 class committablectx(basectx):
979 """A committablectx object provides common functionality for a context that
979 """A committablectx object provides common functionality for a context that
980 wants the ability to commit, e.g. workingctx or memctx."""
980 wants the ability to commit, e.g. workingctx or memctx."""
981 def __init__(self, repo, text="", user=None, date=None, extra=None,
981 def __init__(self, repo, text="", user=None, date=None, extra=None,
982 changes=None):
982 changes=None):
983 self._repo = repo
983 self._repo = repo
984 self._rev = None
984 self._rev = None
985 self._node = None
985 self._node = None
986 self._text = text
986 self._text = text
987 if date:
987 if date:
988 self._date = util.parsedate(date)
988 self._date = util.parsedate(date)
989 if user:
989 if user:
990 self._user = user
990 self._user = user
991 if changes:
991 if changes:
992 self._status = changes
992 self._status = changes
993
993
994 self._extra = {}
994 self._extra = {}
995 if extra:
995 if extra:
996 self._extra = extra.copy()
996 self._extra = extra.copy()
997 if 'branch' not in self._extra:
997 if 'branch' not in self._extra:
998 try:
998 try:
999 branch = encoding.fromlocal(self._repo.dirstate.branch())
999 branch = encoding.fromlocal(self._repo.dirstate.branch())
1000 except UnicodeDecodeError:
1000 except UnicodeDecodeError:
1001 raise util.Abort(_('branch name not in UTF-8!'))
1001 raise util.Abort(_('branch name not in UTF-8!'))
1002 self._extra['branch'] = branch
1002 self._extra['branch'] = branch
1003 if self._extra['branch'] == '':
1003 if self._extra['branch'] == '':
1004 self._extra['branch'] = 'default'
1004 self._extra['branch'] = 'default'
1005
1005
1006 def __str__(self):
1006 def __str__(self):
1007 return str(self._parents[0]) + "+"
1007 return str(self._parents[0]) + "+"
1008
1008
1009 def __nonzero__(self):
1009 def __nonzero__(self):
1010 return True
1010 return True
1011
1011
1012 def _buildflagfunc(self):
1012 def _buildflagfunc(self):
1013 # Create a fallback function for getting file flags when the
1013 # Create a fallback function for getting file flags when the
1014 # filesystem doesn't support them
1014 # filesystem doesn't support them
1015
1015
1016 copiesget = self._repo.dirstate.copies().get
1016 copiesget = self._repo.dirstate.copies().get
1017
1017
1018 if len(self._parents) < 2:
1018 if len(self._parents) < 2:
1019 # when we have one parent, it's easy: copy from parent
1019 # when we have one parent, it's easy: copy from parent
1020 man = self._parents[0].manifest()
1020 man = self._parents[0].manifest()
1021 def func(f):
1021 def func(f):
1022 f = copiesget(f, f)
1022 f = copiesget(f, f)
1023 return man.flags(f)
1023 return man.flags(f)
1024 else:
1024 else:
1025 # merges are tricky: we try to reconstruct the unstored
1025 # merges are tricky: we try to reconstruct the unstored
1026 # result from the merge (issue1802)
1026 # result from the merge (issue1802)
1027 p1, p2 = self._parents
1027 p1, p2 = self._parents
1028 pa = p1.ancestor(p2)
1028 pa = p1.ancestor(p2)
1029 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1029 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1030
1030
1031 def func(f):
1031 def func(f):
1032 f = copiesget(f, f) # may be wrong for merges with copies
1032 f = copiesget(f, f) # may be wrong for merges with copies
1033 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1033 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1034 if fl1 == fl2:
1034 if fl1 == fl2:
1035 return fl1
1035 return fl1
1036 if fl1 == fla:
1036 if fl1 == fla:
1037 return fl2
1037 return fl2
1038 if fl2 == fla:
1038 if fl2 == fla:
1039 return fl1
1039 return fl1
1040 return '' # punt for conflicts
1040 return '' # punt for conflicts
1041
1041
1042 return func
1042 return func
1043
1043
1044 @propertycache
1044 @propertycache
1045 def _flagfunc(self):
1045 def _flagfunc(self):
1046 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1046 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1047
1047
1048 @propertycache
1048 @propertycache
1049 def _manifest(self):
1049 def _manifest(self):
1050 """generate a manifest corresponding to the values in self._status"""
1050 """generate a manifest corresponding to the values in self._status"""
1051
1051
1052 man = self._parents[0].manifest().copy()
1052 man = self._parents[0].manifest().copy()
1053 if len(self._parents) > 1:
1053 if len(self._parents) > 1:
1054 man2 = self.p2().manifest()
1054 man2 = self.p2().manifest()
1055 def getman(f):
1055 def getman(f):
1056 if f in man:
1056 if f in man:
1057 return man
1057 return man
1058 return man2
1058 return man2
1059 else:
1059 else:
1060 getman = lambda f: man
1060 getman = lambda f: man
1061
1061
1062 copied = self._repo.dirstate.copies()
1062 copied = self._repo.dirstate.copies()
1063 ff = self._flagfunc
1063 ff = self._flagfunc
1064 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1064 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1065 for f in l:
1065 for f in l:
1066 orig = copied.get(f, f)
1066 orig = copied.get(f, f)
1067 man[f] = getman(orig).get(orig, nullid) + i
1067 man[f] = getman(orig).get(orig, nullid) + i
1068 try:
1068 try:
1069 man.setflag(f, ff(f))
1069 man.setflag(f, ff(f))
1070 except OSError:
1070 except OSError:
1071 pass
1071 pass
1072
1072
1073 for f in self._status.deleted + self._status.removed:
1073 for f in self._status.deleted + self._status.removed:
1074 if f in man:
1074 if f in man:
1075 del man[f]
1075 del man[f]
1076
1076
1077 return man
1077 return man
1078
1078
1079 @propertycache
1079 @propertycache
1080 def _status(self):
1080 def _status(self):
1081 return self._repo.status()
1081 return self._repo.status()
1082
1082
1083 @propertycache
1083 @propertycache
1084 def _user(self):
1084 def _user(self):
1085 return self._repo.ui.username()
1085 return self._repo.ui.username()
1086
1086
1087 @propertycache
1087 @propertycache
1088 def _date(self):
1088 def _date(self):
1089 return util.makedate()
1089 return util.makedate()
1090
1090
1091 def subrev(self, subpath):
1091 def subrev(self, subpath):
1092 return None
1092 return None
1093
1093
1094 def user(self):
1094 def user(self):
1095 return self._user or self._repo.ui.username()
1095 return self._user or self._repo.ui.username()
1096 def date(self):
1096 def date(self):
1097 return self._date
1097 return self._date
1098 def description(self):
1098 def description(self):
1099 return self._text
1099 return self._text
1100 def files(self):
1100 def files(self):
1101 return sorted(self._status.modified + self._status.added +
1101 return sorted(self._status.modified + self._status.added +
1102 self._status.removed)
1102 self._status.removed)
1103
1103
1104 def modified(self):
1104 def modified(self):
1105 return self._status.modified
1105 return self._status.modified
1106 def added(self):
1106 def added(self):
1107 return self._status.added
1107 return self._status.added
1108 def removed(self):
1108 def removed(self):
1109 return self._status.removed
1109 return self._status.removed
1110 def deleted(self):
1110 def deleted(self):
1111 return self._status.deleted
1111 return self._status.deleted
1112 def unknown(self):
1112 def unknown(self):
1113 return self._status.unknown
1113 return self._status.unknown
1114 def ignored(self):
1114 def ignored(self):
1115 return self._status.ignored
1115 return self._status.ignored
1116 def clean(self):
1116 def clean(self):
1117 return self._status.clean
1117 return self._status.clean
1118 def branch(self):
1118 def branch(self):
1119 return encoding.tolocal(self._extra['branch'])
1119 return encoding.tolocal(self._extra['branch'])
1120 def closesbranch(self):
1120 def closesbranch(self):
1121 return 'close' in self._extra
1121 return 'close' in self._extra
1122 def extra(self):
1122 def extra(self):
1123 return self._extra
1123 return self._extra
1124
1124
1125 def tags(self):
1125 def tags(self):
1126 t = []
1126 t = []
1127 for p in self.parents():
1127 for p in self.parents():
1128 t.extend(p.tags())
1128 t.extend(p.tags())
1129 return t
1129 return t
1130
1130
1131 def bookmarks(self):
1131 def bookmarks(self):
1132 b = []
1132 b = []
1133 for p in self.parents():
1133 for p in self.parents():
1134 b.extend(p.bookmarks())
1134 b.extend(p.bookmarks())
1135 return b
1135 return b
1136
1136
1137 def phase(self):
1137 def phase(self):
1138 phase = phases.draft # default phase to draft
1138 phase = phases.draft # default phase to draft
1139 for p in self.parents():
1139 for p in self.parents():
1140 phase = max(phase, p.phase())
1140 phase = max(phase, p.phase())
1141 return phase
1141 return phase
1142
1142
1143 def hidden(self):
1143 def hidden(self):
1144 return False
1144 return False
1145
1145
1146 def children(self):
1146 def children(self):
1147 return []
1147 return []
1148
1148
1149 def flags(self, path):
1149 def flags(self, path):
1150 if '_manifest' in self.__dict__:
1150 if '_manifest' in self.__dict__:
1151 try:
1151 try:
1152 return self._manifest.flags(path)
1152 return self._manifest.flags(path)
1153 except KeyError:
1153 except KeyError:
1154 return ''
1154 return ''
1155
1155
1156 try:
1156 try:
1157 return self._flagfunc(path)
1157 return self._flagfunc(path)
1158 except OSError:
1158 except OSError:
1159 return ''
1159 return ''
1160
1160
1161 def ancestor(self, c2):
1161 def ancestor(self, c2):
1162 """return the "best" ancestor context of self and c2"""
1162 """return the "best" ancestor context of self and c2"""
1163 return self._parents[0].ancestor(c2) # punt on two parents for now
1163 return self._parents[0].ancestor(c2) # punt on two parents for now
1164
1164
1165 def walk(self, match):
1165 def walk(self, match):
1166 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1166 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1167 True, False))
1167 True, False))
1168
1168
1169 def matches(self, match):
1169 def matches(self, match):
1170 return sorted(self._repo.dirstate.matches(match))
1170 return sorted(self._repo.dirstate.matches(match))
1171
1171
1172 def ancestors(self):
1172 def ancestors(self):
1173 for a in self._repo.changelog.ancestors(
1173 for a in self._repo.changelog.ancestors(
1174 [p.rev() for p in self._parents]):
1174 [p.rev() for p in self._parents]):
1175 yield changectx(self._repo, a)
1175 yield changectx(self._repo, a)
1176
1176
1177 def markcommitted(self, node):
1177 def markcommitted(self, node):
1178 """Perform post-commit cleanup necessary after committing this ctx
1178 """Perform post-commit cleanup necessary after committing this ctx
1179
1179
1180 Specifically, this updates backing stores this working context
1180 Specifically, this updates backing stores this working context
1181 wraps to reflect the fact that the changes reflected by this
1181 wraps to reflect the fact that the changes reflected by this
1182 workingctx have been committed. For example, it marks
1182 workingctx have been committed. For example, it marks
1183 modified and added files as normal in the dirstate.
1183 modified and added files as normal in the dirstate.
1184
1184
1185 """
1185 """
1186
1186
1187 self._repo.dirstate.beginparentchange()
1187 self._repo.dirstate.beginparentchange()
1188 for f in self.modified() + self.added():
1188 for f in self.modified() + self.added():
1189 self._repo.dirstate.normal(f)
1189 self._repo.dirstate.normal(f)
1190 for f in self.removed():
1190 for f in self.removed():
1191 self._repo.dirstate.drop(f)
1191 self._repo.dirstate.drop(f)
1192 self._repo.dirstate.setparents(node)
1192 self._repo.dirstate.setparents(node)
1193 self._repo.dirstate.endparentchange()
1193 self._repo.dirstate.endparentchange()
1194
1194
1195 def dirs(self):
1195 def dirs(self):
1196 return self._repo.dirstate.dirs()
1196 return self._repo.dirstate.dirs()
1197
1197
1198 class workingctx(committablectx):
1198 class workingctx(committablectx):
1199 """A workingctx object makes access to data related to
1199 """A workingctx object makes access to data related to
1200 the current working directory convenient.
1200 the current working directory convenient.
1201 date - any valid date string or (unixtime, offset), or None.
1201 date - any valid date string or (unixtime, offset), or None.
1202 user - username string, or None.
1202 user - username string, or None.
1203 extra - a dictionary of extra values, or None.
1203 extra - a dictionary of extra values, or None.
1204 changes - a list of file lists as returned by localrepo.status()
1204 changes - a list of file lists as returned by localrepo.status()
1205 or None to use the repository status.
1205 or None to use the repository status.
1206 """
1206 """
1207 def __init__(self, repo, text="", user=None, date=None, extra=None,
1207 def __init__(self, repo, text="", user=None, date=None, extra=None,
1208 changes=None):
1208 changes=None):
1209 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1209 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1210
1210
1211 def __iter__(self):
1211 def __iter__(self):
1212 d = self._repo.dirstate
1212 d = self._repo.dirstate
1213 for f in d:
1213 for f in d:
1214 if d[f] != 'r':
1214 if d[f] != 'r':
1215 yield f
1215 yield f
1216
1216
1217 def __contains__(self, key):
1217 def __contains__(self, key):
1218 return self._repo.dirstate[key] not in "?r"
1218 return self._repo.dirstate[key] not in "?r"
1219
1219
1220 @propertycache
1220 @propertycache
1221 def _parents(self):
1221 def _parents(self):
1222 p = self._repo.dirstate.parents()
1222 p = self._repo.dirstate.parents()
1223 if p[1] == nullid:
1223 if p[1] == nullid:
1224 p = p[:-1]
1224 p = p[:-1]
1225 return [changectx(self._repo, x) for x in p]
1225 return [changectx(self._repo, x) for x in p]
1226
1226
1227 def filectx(self, path, filelog=None):
1227 def filectx(self, path, filelog=None):
1228 """get a file context from the working directory"""
1228 """get a file context from the working directory"""
1229 return workingfilectx(self._repo, path, workingctx=self,
1229 return workingfilectx(self._repo, path, workingctx=self,
1230 filelog=filelog)
1230 filelog=filelog)
1231
1231
1232 def dirty(self, missing=False, merge=True, branch=True):
1232 def dirty(self, missing=False, merge=True, branch=True):
1233 "check whether a working directory is modified"
1233 "check whether a working directory is modified"
1234 # check subrepos first
1234 # check subrepos first
1235 for s in sorted(self.substate):
1235 for s in sorted(self.substate):
1236 if self.sub(s).dirty():
1236 if self.sub(s).dirty():
1237 return True
1237 return True
1238 # check current working dir
1238 # check current working dir
1239 return ((merge and self.p2()) or
1239 return ((merge and self.p2()) or
1240 (branch and self.branch() != self.p1().branch()) or
1240 (branch and self.branch() != self.p1().branch()) or
1241 self.modified() or self.added() or self.removed() or
1241 self.modified() or self.added() or self.removed() or
1242 (missing and self.deleted()))
1242 (missing and self.deleted()))
1243
1243
1244 def add(self, list, prefix=""):
1244 def add(self, list, prefix=""):
1245 join = lambda f: os.path.join(prefix, f)
1245 join = lambda f: os.path.join(prefix, f)
1246 wlock = self._repo.wlock()
1246 wlock = self._repo.wlock()
1247 ui, ds = self._repo.ui, self._repo.dirstate
1247 ui, ds = self._repo.ui, self._repo.dirstate
1248 try:
1248 try:
1249 rejected = []
1249 rejected = []
1250 lstat = self._repo.wvfs.lstat
1250 lstat = self._repo.wvfs.lstat
1251 for f in list:
1251 for f in list:
1252 scmutil.checkportable(ui, join(f))
1252 scmutil.checkportable(ui, join(f))
1253 try:
1253 try:
1254 st = lstat(f)
1254 st = lstat(f)
1255 except OSError:
1255 except OSError:
1256 ui.warn(_("%s does not exist!\n") % join(f))
1256 ui.warn(_("%s does not exist!\n") % join(f))
1257 rejected.append(f)
1257 rejected.append(f)
1258 continue
1258 continue
1259 if st.st_size > 10000000:
1259 if st.st_size > 10000000:
1260 ui.warn(_("%s: up to %d MB of RAM may be required "
1260 ui.warn(_("%s: up to %d MB of RAM may be required "
1261 "to manage this file\n"
1261 "to manage this file\n"
1262 "(use 'hg revert %s' to cancel the "
1262 "(use 'hg revert %s' to cancel the "
1263 "pending addition)\n")
1263 "pending addition)\n")
1264 % (f, 3 * st.st_size // 1000000, join(f)))
1264 % (f, 3 * st.st_size // 1000000, join(f)))
1265 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1265 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1266 ui.warn(_("%s not added: only files and symlinks "
1266 ui.warn(_("%s not added: only files and symlinks "
1267 "supported currently\n") % join(f))
1267 "supported currently\n") % join(f))
1268 rejected.append(f)
1268 rejected.append(f)
1269 elif ds[f] in 'amn':
1269 elif ds[f] in 'amn':
1270 ui.warn(_("%s already tracked!\n") % join(f))
1270 ui.warn(_("%s already tracked!\n") % join(f))
1271 elif ds[f] == 'r':
1271 elif ds[f] == 'r':
1272 ds.normallookup(f)
1272 ds.normallookup(f)
1273 else:
1273 else:
1274 ds.add(f)
1274 ds.add(f)
1275 return rejected
1275 return rejected
1276 finally:
1276 finally:
1277 wlock.release()
1277 wlock.release()
1278
1278
1279 def forget(self, files, prefix=""):
1279 def forget(self, files, prefix=""):
1280 join = lambda f: os.path.join(prefix, f)
1280 join = lambda f: os.path.join(prefix, f)
1281 wlock = self._repo.wlock()
1281 wlock = self._repo.wlock()
1282 try:
1282 try:
1283 rejected = []
1283 rejected = []
1284 for f in files:
1284 for f in files:
1285 if f not in self._repo.dirstate:
1285 if f not in self._repo.dirstate:
1286 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1286 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1287 rejected.append(f)
1287 rejected.append(f)
1288 elif self._repo.dirstate[f] != 'a':
1288 elif self._repo.dirstate[f] != 'a':
1289 self._repo.dirstate.remove(f)
1289 self._repo.dirstate.remove(f)
1290 else:
1290 else:
1291 self._repo.dirstate.drop(f)
1291 self._repo.dirstate.drop(f)
1292 return rejected
1292 return rejected
1293 finally:
1293 finally:
1294 wlock.release()
1294 wlock.release()
1295
1295
1296 def undelete(self, list):
1296 def undelete(self, list):
1297 pctxs = self.parents()
1297 pctxs = self.parents()
1298 wlock = self._repo.wlock()
1298 wlock = self._repo.wlock()
1299 try:
1299 try:
1300 for f in list:
1300 for f in list:
1301 if self._repo.dirstate[f] != 'r':
1301 if self._repo.dirstate[f] != 'r':
1302 self._repo.ui.warn(_("%s not removed!\n") % f)
1302 self._repo.ui.warn(_("%s not removed!\n") % f)
1303 else:
1303 else:
1304 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1304 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1305 t = fctx.data()
1305 t = fctx.data()
1306 self._repo.wwrite(f, t, fctx.flags())
1306 self._repo.wwrite(f, t, fctx.flags())
1307 self._repo.dirstate.normal(f)
1307 self._repo.dirstate.normal(f)
1308 finally:
1308 finally:
1309 wlock.release()
1309 wlock.release()
1310
1310
1311 def copy(self, source, dest):
1311 def copy(self, source, dest):
1312 try:
1312 try:
1313 st = self._repo.wvfs.lstat(dest)
1313 st = self._repo.wvfs.lstat(dest)
1314 except OSError, err:
1314 except OSError, err:
1315 if err.errno != errno.ENOENT:
1315 if err.errno != errno.ENOENT:
1316 raise
1316 raise
1317 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1317 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1318 return
1318 return
1319 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1319 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1320 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1320 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1321 "symbolic link\n") % dest)
1321 "symbolic link\n") % dest)
1322 else:
1322 else:
1323 wlock = self._repo.wlock()
1323 wlock = self._repo.wlock()
1324 try:
1324 try:
1325 if self._repo.dirstate[dest] in '?r':
1325 if self._repo.dirstate[dest] in '?r':
1326 self._repo.dirstate.add(dest)
1326 self._repo.dirstate.add(dest)
1327 self._repo.dirstate.copy(source, dest)
1327 self._repo.dirstate.copy(source, dest)
1328 finally:
1328 finally:
1329 wlock.release()
1329 wlock.release()
1330
1330
1331 def _filtersuspectsymlink(self, files):
1331 def _filtersuspectsymlink(self, files):
1332 if not files or self._repo.dirstate._checklink:
1332 if not files or self._repo.dirstate._checklink:
1333 return files
1333 return files
1334
1334
1335 # Symlink placeholders may get non-symlink-like contents
1335 # Symlink placeholders may get non-symlink-like contents
1336 # via user error or dereferencing by NFS or Samba servers,
1336 # via user error or dereferencing by NFS or Samba servers,
1337 # so we filter out any placeholders that don't look like a
1337 # so we filter out any placeholders that don't look like a
1338 # symlink
1338 # symlink
1339 sane = []
1339 sane = []
1340 for f in files:
1340 for f in files:
1341 if self.flags(f) == 'l':
1341 if self.flags(f) == 'l':
1342 d = self[f].data()
1342 d = self[f].data()
1343 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1343 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1344 self._repo.ui.debug('ignoring suspect symlink placeholder'
1344 self._repo.ui.debug('ignoring suspect symlink placeholder'
1345 ' "%s"\n' % f)
1345 ' "%s"\n' % f)
1346 continue
1346 continue
1347 sane.append(f)
1347 sane.append(f)
1348 return sane
1348 return sane
1349
1349
1350 def _checklookup(self, files):
1350 def _checklookup(self, files):
1351 # check for any possibly clean files
1351 # check for any possibly clean files
1352 if not files:
1352 if not files:
1353 return [], []
1353 return [], []
1354
1354
1355 modified = []
1355 modified = []
1356 fixup = []
1356 fixup = []
1357 pctx = self._parents[0]
1357 pctx = self._parents[0]
1358 # do a full compare of any files that might have changed
1358 # do a full compare of any files that might have changed
1359 for f in sorted(files):
1359 for f in sorted(files):
1360 if (f not in pctx or self.flags(f) != pctx.flags(f)
1360 if (f not in pctx or self.flags(f) != pctx.flags(f)
1361 or pctx[f].cmp(self[f])):
1361 or pctx[f].cmp(self[f])):
1362 modified.append(f)
1362 modified.append(f)
1363 else:
1363 else:
1364 fixup.append(f)
1364 fixup.append(f)
1365
1365
1366 # update dirstate for files that are actually clean
1366 # update dirstate for files that are actually clean
1367 if fixup:
1367 if fixup:
1368 try:
1368 try:
1369 # updating the dirstate is optional
1369 # updating the dirstate is optional
1370 # so we don't wait on the lock
1370 # so we don't wait on the lock
1371 # wlock can invalidate the dirstate, so cache normal _after_
1371 # wlock can invalidate the dirstate, so cache normal _after_
1372 # taking the lock
1372 # taking the lock
1373 wlock = self._repo.wlock(False)
1373 wlock = self._repo.wlock(False)
1374 normal = self._repo.dirstate.normal
1374 normal = self._repo.dirstate.normal
1375 try:
1375 try:
1376 for f in fixup:
1376 for f in fixup:
1377 normal(f)
1377 normal(f)
1378 finally:
1378 finally:
1379 wlock.release()
1379 wlock.release()
1380 except error.LockError:
1380 except error.LockError:
1381 pass
1381 pass
1382 return modified, fixup
1382 return modified, fixup
1383
1383
1384 def _manifestmatches(self, match, s):
1384 def _manifestmatches(self, match, s):
1385 """Slow path for workingctx
1385 """Slow path for workingctx
1386
1386
1387 The fast path is when we compare the working directory to its parent
1387 The fast path is when we compare the working directory to its parent
1388 which means this function is comparing with a non-parent; therefore we
1388 which means this function is comparing with a non-parent; therefore we
1389 need to build a manifest and return what matches.
1389 need to build a manifest and return what matches.
1390 """
1390 """
1391 mf = self._repo['.']._manifestmatches(match, s)
1391 mf = self._repo['.']._manifestmatches(match, s)
1392 modified, added, removed = s[0:3]
1392 for f in s.modified + s.added:
1393 for f in modified + added:
1394 mf[f] = None
1393 mf[f] = None
1395 mf.setflag(f, self.flags(f))
1394 mf.setflag(f, self.flags(f))
1396 for f in removed:
1395 for f in s.removed:
1397 if f in mf:
1396 if f in mf:
1398 del mf[f]
1397 del mf[f]
1399 return mf
1398 return mf
1400
1399
1401 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1400 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1402 unknown=False):
1401 unknown=False):
1403 '''Gets the status from the dirstate -- internal use only.'''
1402 '''Gets the status from the dirstate -- internal use only.'''
1404 listignored, listclean, listunknown = ignored, clean, unknown
1403 listignored, listclean, listunknown = ignored, clean, unknown
1405 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1404 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1406 subrepos = []
1405 subrepos = []
1407 if '.hgsub' in self:
1406 if '.hgsub' in self:
1408 subrepos = sorted(self.substate)
1407 subrepos = sorted(self.substate)
1409 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1408 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1410 listclean, listunknown)
1409 listclean, listunknown)
1411
1410
1412 # check for any possibly clean files
1411 # check for any possibly clean files
1413 if cmp:
1412 if cmp:
1414 modified2, fixup = self._checklookup(cmp)
1413 modified2, fixup = self._checklookup(cmp)
1415 s.modified.extend(modified2)
1414 s.modified.extend(modified2)
1416
1415
1417 # update dirstate for files that are actually clean
1416 # update dirstate for files that are actually clean
1418 if fixup and listclean:
1417 if fixup and listclean:
1419 s.clean.extend(fixup)
1418 s.clean.extend(fixup)
1420
1419
1421 return s
1420 return s
1422
1421
1423 def _buildstatus(self, other, s, match, listignored, listclean,
1422 def _buildstatus(self, other, s, match, listignored, listclean,
1424 listunknown):
1423 listunknown):
1425 """build a status with respect to another context
1424 """build a status with respect to another context
1426
1425
1427 This includes logic for maintaining the fast path of status when
1426 This includes logic for maintaining the fast path of status when
1428 comparing the working directory against its parent, which is to skip
1427 comparing the working directory against its parent, which is to skip
1429 building a new manifest if self (working directory) is not comparing
1428 building a new manifest if self (working directory) is not comparing
1430 against its parent (repo['.']).
1429 against its parent (repo['.']).
1431 """
1430 """
1432 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1431 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1433 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1432 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1434 # might have accidentally ended up with the entire contents of the file
1433 # might have accidentally ended up with the entire contents of the file
1435 # they are susposed to be linking to.
1434 # they are susposed to be linking to.
1436 s.modified[:] = self._filtersuspectsymlink(s.modified)
1435 s.modified[:] = self._filtersuspectsymlink(s.modified)
1437 if other != self._repo['.']:
1436 if other != self._repo['.']:
1438 s = super(workingctx, self)._buildstatus(other, s, match,
1437 s = super(workingctx, self)._buildstatus(other, s, match,
1439 listignored, listclean,
1438 listignored, listclean,
1440 listunknown)
1439 listunknown)
1441 self._status = s
1440 self._status = s
1442 return s
1441 return s
1443
1442
1444 def _matchstatus(self, other, match):
1443 def _matchstatus(self, other, match):
1445 """override the match method with a filter for directory patterns
1444 """override the match method with a filter for directory patterns
1446
1445
1447 We use inheritance to customize the match.bad method only in cases of
1446 We use inheritance to customize the match.bad method only in cases of
1448 workingctx since it belongs only to the working directory when
1447 workingctx since it belongs only to the working directory when
1449 comparing against the parent changeset.
1448 comparing against the parent changeset.
1450
1449
1451 If we aren't comparing against the working directory's parent, then we
1450 If we aren't comparing against the working directory's parent, then we
1452 just use the default match object sent to us.
1451 just use the default match object sent to us.
1453 """
1452 """
1454 superself = super(workingctx, self)
1453 superself = super(workingctx, self)
1455 match = superself._matchstatus(other, match)
1454 match = superself._matchstatus(other, match)
1456 if other != self._repo['.']:
1455 if other != self._repo['.']:
1457 def bad(f, msg):
1456 def bad(f, msg):
1458 # 'f' may be a directory pattern from 'match.files()',
1457 # 'f' may be a directory pattern from 'match.files()',
1459 # so 'f not in ctx1' is not enough
1458 # so 'f not in ctx1' is not enough
1460 if f not in other and f not in other.dirs():
1459 if f not in other and f not in other.dirs():
1461 self._repo.ui.warn('%s: %s\n' %
1460 self._repo.ui.warn('%s: %s\n' %
1462 (self._repo.dirstate.pathto(f), msg))
1461 (self._repo.dirstate.pathto(f), msg))
1463 match.bad = bad
1462 match.bad = bad
1464 return match
1463 return match
1465
1464
1466 class committablefilectx(basefilectx):
1465 class committablefilectx(basefilectx):
1467 """A committablefilectx provides common functionality for a file context
1466 """A committablefilectx provides common functionality for a file context
1468 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1467 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1469 def __init__(self, repo, path, filelog=None, ctx=None):
1468 def __init__(self, repo, path, filelog=None, ctx=None):
1470 self._repo = repo
1469 self._repo = repo
1471 self._path = path
1470 self._path = path
1472 self._changeid = None
1471 self._changeid = None
1473 self._filerev = self._filenode = None
1472 self._filerev = self._filenode = None
1474
1473
1475 if filelog is not None:
1474 if filelog is not None:
1476 self._filelog = filelog
1475 self._filelog = filelog
1477 if ctx:
1476 if ctx:
1478 self._changectx = ctx
1477 self._changectx = ctx
1479
1478
1480 def __nonzero__(self):
1479 def __nonzero__(self):
1481 return True
1480 return True
1482
1481
1483 def parents(self):
1482 def parents(self):
1484 '''return parent filectxs, following copies if necessary'''
1483 '''return parent filectxs, following copies if necessary'''
1485 def filenode(ctx, path):
1484 def filenode(ctx, path):
1486 return ctx._manifest.get(path, nullid)
1485 return ctx._manifest.get(path, nullid)
1487
1486
1488 path = self._path
1487 path = self._path
1489 fl = self._filelog
1488 fl = self._filelog
1490 pcl = self._changectx._parents
1489 pcl = self._changectx._parents
1491 renamed = self.renamed()
1490 renamed = self.renamed()
1492
1491
1493 if renamed:
1492 if renamed:
1494 pl = [renamed + (None,)]
1493 pl = [renamed + (None,)]
1495 else:
1494 else:
1496 pl = [(path, filenode(pcl[0], path), fl)]
1495 pl = [(path, filenode(pcl[0], path), fl)]
1497
1496
1498 for pc in pcl[1:]:
1497 for pc in pcl[1:]:
1499 pl.append((path, filenode(pc, path), fl))
1498 pl.append((path, filenode(pc, path), fl))
1500
1499
1501 return [filectx(self._repo, p, fileid=n, filelog=l)
1500 return [filectx(self._repo, p, fileid=n, filelog=l)
1502 for p, n, l in pl if n != nullid]
1501 for p, n, l in pl if n != nullid]
1503
1502
1504 def children(self):
1503 def children(self):
1505 return []
1504 return []
1506
1505
1507 class workingfilectx(committablefilectx):
1506 class workingfilectx(committablefilectx):
1508 """A workingfilectx object makes access to data related to a particular
1507 """A workingfilectx object makes access to data related to a particular
1509 file in the working directory convenient."""
1508 file in the working directory convenient."""
1510 def __init__(self, repo, path, filelog=None, workingctx=None):
1509 def __init__(self, repo, path, filelog=None, workingctx=None):
1511 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1510 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1512
1511
1513 @propertycache
1512 @propertycache
1514 def _changectx(self):
1513 def _changectx(self):
1515 return workingctx(self._repo)
1514 return workingctx(self._repo)
1516
1515
1517 def data(self):
1516 def data(self):
1518 return self._repo.wread(self._path)
1517 return self._repo.wread(self._path)
1519 def renamed(self):
1518 def renamed(self):
1520 rp = self._repo.dirstate.copied(self._path)
1519 rp = self._repo.dirstate.copied(self._path)
1521 if not rp:
1520 if not rp:
1522 return None
1521 return None
1523 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1522 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1524
1523
1525 def size(self):
1524 def size(self):
1526 return self._repo.wvfs.lstat(self._path).st_size
1525 return self._repo.wvfs.lstat(self._path).st_size
1527 def date(self):
1526 def date(self):
1528 t, tz = self._changectx.date()
1527 t, tz = self._changectx.date()
1529 try:
1528 try:
1530 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1529 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1531 except OSError, err:
1530 except OSError, err:
1532 if err.errno != errno.ENOENT:
1531 if err.errno != errno.ENOENT:
1533 raise
1532 raise
1534 return (t, tz)
1533 return (t, tz)
1535
1534
1536 def cmp(self, fctx):
1535 def cmp(self, fctx):
1537 """compare with other file context
1536 """compare with other file context
1538
1537
1539 returns True if different than fctx.
1538 returns True if different than fctx.
1540 """
1539 """
1541 # fctx should be a filectx (not a workingfilectx)
1540 # fctx should be a filectx (not a workingfilectx)
1542 # invert comparison to reuse the same code path
1541 # invert comparison to reuse the same code path
1543 return fctx.cmp(self)
1542 return fctx.cmp(self)
1544
1543
1545 def remove(self, ignoremissing=False):
1544 def remove(self, ignoremissing=False):
1546 """wraps unlink for a repo's working directory"""
1545 """wraps unlink for a repo's working directory"""
1547 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1546 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1548
1547
1549 def write(self, data, flags):
1548 def write(self, data, flags):
1550 """wraps repo.wwrite"""
1549 """wraps repo.wwrite"""
1551 self._repo.wwrite(self._path, data, flags)
1550 self._repo.wwrite(self._path, data, flags)
1552
1551
1553 class memctx(committablectx):
1552 class memctx(committablectx):
1554 """Use memctx to perform in-memory commits via localrepo.commitctx().
1553 """Use memctx to perform in-memory commits via localrepo.commitctx().
1555
1554
1556 Revision information is supplied at initialization time while
1555 Revision information is supplied at initialization time while
1557 related files data and is made available through a callback
1556 related files data and is made available through a callback
1558 mechanism. 'repo' is the current localrepo, 'parents' is a
1557 mechanism. 'repo' is the current localrepo, 'parents' is a
1559 sequence of two parent revisions identifiers (pass None for every
1558 sequence of two parent revisions identifiers (pass None for every
1560 missing parent), 'text' is the commit message and 'files' lists
1559 missing parent), 'text' is the commit message and 'files' lists
1561 names of files touched by the revision (normalized and relative to
1560 names of files touched by the revision (normalized and relative to
1562 repository root).
1561 repository root).
1563
1562
1564 filectxfn(repo, memctx, path) is a callable receiving the
1563 filectxfn(repo, memctx, path) is a callable receiving the
1565 repository, the current memctx object and the normalized path of
1564 repository, the current memctx object and the normalized path of
1566 requested file, relative to repository root. It is fired by the
1565 requested file, relative to repository root. It is fired by the
1567 commit function for every file in 'files', but calls order is
1566 commit function for every file in 'files', but calls order is
1568 undefined. If the file is available in the revision being
1567 undefined. If the file is available in the revision being
1569 committed (updated or added), filectxfn returns a memfilectx
1568 committed (updated or added), filectxfn returns a memfilectx
1570 object. If the file was removed, filectxfn raises an
1569 object. If the file was removed, filectxfn raises an
1571 IOError. Moved files are represented by marking the source file
1570 IOError. Moved files are represented by marking the source file
1572 removed and the new file added with copy information (see
1571 removed and the new file added with copy information (see
1573 memfilectx).
1572 memfilectx).
1574
1573
1575 user receives the committer name and defaults to current
1574 user receives the committer name and defaults to current
1576 repository username, date is the commit date in any format
1575 repository username, date is the commit date in any format
1577 supported by util.parsedate() and defaults to current date, extra
1576 supported by util.parsedate() and defaults to current date, extra
1578 is a dictionary of metadata or is left empty.
1577 is a dictionary of metadata or is left empty.
1579 """
1578 """
1580
1579
1581 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1580 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1582 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1581 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1583 # this field to determine what to do in filectxfn.
1582 # this field to determine what to do in filectxfn.
1584 _returnnoneformissingfiles = True
1583 _returnnoneformissingfiles = True
1585
1584
1586 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1585 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1587 date=None, extra=None, editor=False):
1586 date=None, extra=None, editor=False):
1588 super(memctx, self).__init__(repo, text, user, date, extra)
1587 super(memctx, self).__init__(repo, text, user, date, extra)
1589 self._rev = None
1588 self._rev = None
1590 self._node = None
1589 self._node = None
1591 parents = [(p or nullid) for p in parents]
1590 parents = [(p or nullid) for p in parents]
1592 p1, p2 = parents
1591 p1, p2 = parents
1593 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1592 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1594 files = sorted(set(files))
1593 files = sorted(set(files))
1595 self._status = scmutil.status(files, [], [], [], [], [], [])
1594 self._status = scmutil.status(files, [], [], [], [], [], [])
1596 self._filectxfn = filectxfn
1595 self._filectxfn = filectxfn
1597 self.substate = {}
1596 self.substate = {}
1598
1597
1599 # if store is not callable, wrap it in a function
1598 # if store is not callable, wrap it in a function
1600 if not callable(filectxfn):
1599 if not callable(filectxfn):
1601 def getfilectx(repo, memctx, path):
1600 def getfilectx(repo, memctx, path):
1602 fctx = filectxfn[path]
1601 fctx = filectxfn[path]
1603 # this is weird but apparently we only keep track of one parent
1602 # this is weird but apparently we only keep track of one parent
1604 # (why not only store that instead of a tuple?)
1603 # (why not only store that instead of a tuple?)
1605 copied = fctx.renamed()
1604 copied = fctx.renamed()
1606 if copied:
1605 if copied:
1607 copied = copied[0]
1606 copied = copied[0]
1608 return memfilectx(repo, path, fctx.data(),
1607 return memfilectx(repo, path, fctx.data(),
1609 islink=fctx.islink(), isexec=fctx.isexec(),
1608 islink=fctx.islink(), isexec=fctx.isexec(),
1610 copied=copied, memctx=memctx)
1609 copied=copied, memctx=memctx)
1611 self._filectxfn = getfilectx
1610 self._filectxfn = getfilectx
1612
1611
1613 self._extra = extra and extra.copy() or {}
1612 self._extra = extra and extra.copy() or {}
1614 if self._extra.get('branch', '') == '':
1613 if self._extra.get('branch', '') == '':
1615 self._extra['branch'] = 'default'
1614 self._extra['branch'] = 'default'
1616
1615
1617 if editor:
1616 if editor:
1618 self._text = editor(self._repo, self, [])
1617 self._text = editor(self._repo, self, [])
1619 self._repo.savecommitmessage(self._text)
1618 self._repo.savecommitmessage(self._text)
1620
1619
1621 def filectx(self, path, filelog=None):
1620 def filectx(self, path, filelog=None):
1622 """get a file context from the working directory
1621 """get a file context from the working directory
1623
1622
1624 Returns None if file doesn't exist and should be removed."""
1623 Returns None if file doesn't exist and should be removed."""
1625 return self._filectxfn(self._repo, self, path)
1624 return self._filectxfn(self._repo, self, path)
1626
1625
1627 def commit(self):
1626 def commit(self):
1628 """commit context to the repo"""
1627 """commit context to the repo"""
1629 return self._repo.commitctx(self)
1628 return self._repo.commitctx(self)
1630
1629
1631 @propertycache
1630 @propertycache
1632 def _manifest(self):
1631 def _manifest(self):
1633 """generate a manifest based on the return values of filectxfn"""
1632 """generate a manifest based on the return values of filectxfn"""
1634
1633
1635 # keep this simple for now; just worry about p1
1634 # keep this simple for now; just worry about p1
1636 pctx = self._parents[0]
1635 pctx = self._parents[0]
1637 man = pctx.manifest().copy()
1636 man = pctx.manifest().copy()
1638
1637
1639 for f, fnode in man.iteritems():
1638 for f, fnode in man.iteritems():
1640 p1node = nullid
1639 p1node = nullid
1641 p2node = nullid
1640 p2node = nullid
1642 p = pctx[f].parents() # if file isn't in pctx, check p2?
1641 p = pctx[f].parents() # if file isn't in pctx, check p2?
1643 if len(p) > 0:
1642 if len(p) > 0:
1644 p1node = p[0].node()
1643 p1node = p[0].node()
1645 if len(p) > 1:
1644 if len(p) > 1:
1646 p2node = p[1].node()
1645 p2node = p[1].node()
1647 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1646 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1648
1647
1649 return man
1648 return man
1650
1649
1651
1650
1652 class memfilectx(committablefilectx):
1651 class memfilectx(committablefilectx):
1653 """memfilectx represents an in-memory file to commit.
1652 """memfilectx represents an in-memory file to commit.
1654
1653
1655 See memctx and committablefilectx for more details.
1654 See memctx and committablefilectx for more details.
1656 """
1655 """
1657 def __init__(self, repo, path, data, islink=False,
1656 def __init__(self, repo, path, data, islink=False,
1658 isexec=False, copied=None, memctx=None):
1657 isexec=False, copied=None, memctx=None):
1659 """
1658 """
1660 path is the normalized file path relative to repository root.
1659 path is the normalized file path relative to repository root.
1661 data is the file content as a string.
1660 data is the file content as a string.
1662 islink is True if the file is a symbolic link.
1661 islink is True if the file is a symbolic link.
1663 isexec is True if the file is executable.
1662 isexec is True if the file is executable.
1664 copied is the source file path if current file was copied in the
1663 copied is the source file path if current file was copied in the
1665 revision being committed, or None."""
1664 revision being committed, or None."""
1666 super(memfilectx, self).__init__(repo, path, None, memctx)
1665 super(memfilectx, self).__init__(repo, path, None, memctx)
1667 self._data = data
1666 self._data = data
1668 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1667 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1669 self._copied = None
1668 self._copied = None
1670 if copied:
1669 if copied:
1671 self._copied = (copied, nullid)
1670 self._copied = (copied, nullid)
1672
1671
1673 def data(self):
1672 def data(self):
1674 return self._data
1673 return self._data
1675 def size(self):
1674 def size(self):
1676 return len(self.data())
1675 return len(self.data())
1677 def flags(self):
1676 def flags(self):
1678 return self._flags
1677 return self._flags
1679 def renamed(self):
1678 def renamed(self):
1680 return self._copied
1679 return self._copied
1681
1680
1682 def remove(self, ignoremissing=False):
1681 def remove(self, ignoremissing=False):
1683 """wraps unlink for a repo's working directory"""
1682 """wraps unlink for a repo's working directory"""
1684 # need to figure out what to do here
1683 # need to figure out what to do here
1685 del self._changectx[self._path]
1684 del self._changectx[self._path]
1686
1685
1687 def write(self, data, flags):
1686 def write(self, data, flags):
1688 """wraps repo.wwrite"""
1687 """wraps repo.wwrite"""
1689 self._data = data
1688 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now