##// END OF EJS Templates
context.status: explain "caching reasons" more fully...
Martin von Zweigbergk -
r23257:37c57a7c default
parent child Browse files
Show More
@@ -1,1683 +1,1688
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class basectx(object):
20 class basectx(object):
21 """A basectx object represents the common logic for its children:
21 """A basectx object represents the common logic for its children:
22 changectx: read-only context that is already present in the repo,
22 changectx: read-only context that is already present in the repo,
23 workingctx: a context that represents the working directory and can
23 workingctx: a context that represents the working directory and can
24 be committed,
24 be committed,
25 memctx: a context that represents changes in-memory and can also
25 memctx: a context that represents changes in-memory and can also
26 be committed."""
26 be committed."""
27 def __new__(cls, repo, changeid='', *args, **kwargs):
27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 if isinstance(changeid, basectx):
28 if isinstance(changeid, basectx):
29 return changeid
29 return changeid
30
30
31 o = super(basectx, cls).__new__(cls)
31 o = super(basectx, cls).__new__(cls)
32
32
33 o._repo = repo
33 o._repo = repo
34 o._rev = nullrev
34 o._rev = nullrev
35 o._node = nullid
35 o._node = nullid
36
36
37 return o
37 return o
38
38
39 def __str__(self):
39 def __str__(self):
40 return short(self.node())
40 return short(self.node())
41
41
42 def __int__(self):
42 def __int__(self):
43 return self.rev()
43 return self.rev()
44
44
45 def __repr__(self):
45 def __repr__(self):
46 return "<%s %s>" % (type(self).__name__, str(self))
46 return "<%s %s>" % (type(self).__name__, str(self))
47
47
48 def __eq__(self, other):
48 def __eq__(self, other):
49 try:
49 try:
50 return type(self) == type(other) and self._rev == other._rev
50 return type(self) == type(other) and self._rev == other._rev
51 except AttributeError:
51 except AttributeError:
52 return False
52 return False
53
53
54 def __ne__(self, other):
54 def __ne__(self, other):
55 return not (self == other)
55 return not (self == other)
56
56
57 def __contains__(self, key):
57 def __contains__(self, key):
58 return key in self._manifest
58 return key in self._manifest
59
59
60 def __getitem__(self, key):
60 def __getitem__(self, key):
61 return self.filectx(key)
61 return self.filectx(key)
62
62
63 def __iter__(self):
63 def __iter__(self):
64 for f in sorted(self._manifest):
64 for f in sorted(self._manifest):
65 yield f
65 yield f
66
66
67 def _manifestmatches(self, match, s):
67 def _manifestmatches(self, match, s):
68 """generate a new manifest filtered by the match argument
68 """generate a new manifest filtered by the match argument
69
69
70 This method is for internal use only and mainly exists to provide an
70 This method is for internal use only and mainly exists to provide an
71 object oriented way for other contexts to customize the manifest
71 object oriented way for other contexts to customize the manifest
72 generation.
72 generation.
73 """
73 """
74 if match.always():
74 if match.always():
75 return self.manifest().copy()
75 return self.manifest().copy()
76
76
77 files = match.files()
77 files = match.files()
78 if (match.matchfn == match.exact or
78 if (match.matchfn == match.exact or
79 (not match.anypats() and util.all(fn in self for fn in files))):
79 (not match.anypats() and util.all(fn in self for fn in files))):
80 return self.manifest().intersectfiles(files)
80 return self.manifest().intersectfiles(files)
81
81
82 mf = self.manifest().copy()
82 mf = self.manifest().copy()
83 for fn in mf.keys():
83 for fn in mf.keys():
84 if not match(fn):
84 if not match(fn):
85 del mf[fn]
85 del mf[fn]
86 return mf
86 return mf
87
87
88 def _matchstatus(self, other, match):
88 def _matchstatus(self, other, match):
89 """return match.always if match is none
89 """return match.always if match is none
90
90
91 This internal method provides a way for child objects to override the
91 This internal method provides a way for child objects to override the
92 match operator.
92 match operator.
93 """
93 """
94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95
95
96 def _buildstatus(self, other, s, match, listignored, listclean,
96 def _buildstatus(self, other, s, match, listignored, listclean,
97 listunknown):
97 listunknown):
98 """build a status with respect to another context"""
98 """build a status with respect to another context"""
99 # load earliest manifest first for caching reasons
99 # Load earliest manifest first for caching reasons. More specifically,
100 # if you have revisions 1000 and 1001, 1001 is probably stored as a
101 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
102 # 1000 and cache it so that when you read 1001, we just need to apply a
103 # delta to what's in the cache. So that's one full reconstruction + one
104 # delta application.
100 if self.rev() is not None and self.rev() < other.rev():
105 if self.rev() is not None and self.rev() < other.rev():
101 self.manifest()
106 self.manifest()
102 mf1 = other._manifestmatches(match, s)
107 mf1 = other._manifestmatches(match, s)
103 mf2 = self._manifestmatches(match, s)
108 mf2 = self._manifestmatches(match, s)
104
109
105 modified, added, clean = [], [], []
110 modified, added, clean = [], [], []
106 deleted, unknown, ignored = s[3], s[4], s[5]
111 deleted, unknown, ignored = s[3], s[4], s[5]
107 deletedset = set(deleted)
112 deletedset = set(deleted)
108 withflags = mf1.withflags() | mf2.withflags()
113 withflags = mf1.withflags() | mf2.withflags()
109 for fn, mf2node in mf2.iteritems():
114 for fn, mf2node in mf2.iteritems():
110 if fn in mf1:
115 if fn in mf1:
111 if (fn not in deletedset and
116 if (fn not in deletedset and
112 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
117 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
113 (mf1[fn] != mf2node and
118 (mf1[fn] != mf2node and
114 (mf2node or self[fn].cmp(other[fn]))))):
119 (mf2node or self[fn].cmp(other[fn]))))):
115 modified.append(fn)
120 modified.append(fn)
116 elif listclean:
121 elif listclean:
117 clean.append(fn)
122 clean.append(fn)
118 del mf1[fn]
123 del mf1[fn]
119 elif fn not in deletedset:
124 elif fn not in deletedset:
120 added.append(fn)
125 added.append(fn)
121 removed = mf1.keys()
126 removed = mf1.keys()
122 if removed:
127 if removed:
123 # need to filter files if they are already reported as removed
128 # need to filter files if they are already reported as removed
124 unknown = [fn for fn in unknown if fn not in mf1]
129 unknown = [fn for fn in unknown if fn not in mf1]
125 ignored = [fn for fn in ignored if fn not in mf1]
130 ignored = [fn for fn in ignored if fn not in mf1]
126
131
127 return [modified, added, removed, deleted, unknown, ignored, clean]
132 return [modified, added, removed, deleted, unknown, ignored, clean]
128
133
129 @propertycache
134 @propertycache
130 def substate(self):
135 def substate(self):
131 return subrepo.state(self, self._repo.ui)
136 return subrepo.state(self, self._repo.ui)
132
137
133 def subrev(self, subpath):
138 def subrev(self, subpath):
134 return self.substate[subpath][1]
139 return self.substate[subpath][1]
135
140
136 def rev(self):
141 def rev(self):
137 return self._rev
142 return self._rev
138 def node(self):
143 def node(self):
139 return self._node
144 return self._node
140 def hex(self):
145 def hex(self):
141 return hex(self.node())
146 return hex(self.node())
142 def manifest(self):
147 def manifest(self):
143 return self._manifest
148 return self._manifest
144 def phasestr(self):
149 def phasestr(self):
145 return phases.phasenames[self.phase()]
150 return phases.phasenames[self.phase()]
146 def mutable(self):
151 def mutable(self):
147 return self.phase() > phases.public
152 return self.phase() > phases.public
148
153
149 def getfileset(self, expr):
154 def getfileset(self, expr):
150 return fileset.getfileset(self, expr)
155 return fileset.getfileset(self, expr)
151
156
152 def obsolete(self):
157 def obsolete(self):
153 """True if the changeset is obsolete"""
158 """True if the changeset is obsolete"""
154 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
159 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
155
160
156 def extinct(self):
161 def extinct(self):
157 """True if the changeset is extinct"""
162 """True if the changeset is extinct"""
158 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
163 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
159
164
160 def unstable(self):
165 def unstable(self):
161 """True if the changeset is not obsolete but it's ancestor are"""
166 """True if the changeset is not obsolete but it's ancestor are"""
162 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
167 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
163
168
164 def bumped(self):
169 def bumped(self):
165 """True if the changeset try to be a successor of a public changeset
170 """True if the changeset try to be a successor of a public changeset
166
171
167 Only non-public and non-obsolete changesets may be bumped.
172 Only non-public and non-obsolete changesets may be bumped.
168 """
173 """
169 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
174 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
170
175
171 def divergent(self):
176 def divergent(self):
172 """Is a successors of a changeset with multiple possible successors set
177 """Is a successors of a changeset with multiple possible successors set
173
178
174 Only non-public and non-obsolete changesets may be divergent.
179 Only non-public and non-obsolete changesets may be divergent.
175 """
180 """
176 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
181 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
177
182
178 def troubled(self):
183 def troubled(self):
179 """True if the changeset is either unstable, bumped or divergent"""
184 """True if the changeset is either unstable, bumped or divergent"""
180 return self.unstable() or self.bumped() or self.divergent()
185 return self.unstable() or self.bumped() or self.divergent()
181
186
182 def troubles(self):
187 def troubles(self):
183 """return the list of troubles affecting this changesets.
188 """return the list of troubles affecting this changesets.
184
189
185 Troubles are returned as strings. possible values are:
190 Troubles are returned as strings. possible values are:
186 - unstable,
191 - unstable,
187 - bumped,
192 - bumped,
188 - divergent.
193 - divergent.
189 """
194 """
190 troubles = []
195 troubles = []
191 if self.unstable():
196 if self.unstable():
192 troubles.append('unstable')
197 troubles.append('unstable')
193 if self.bumped():
198 if self.bumped():
194 troubles.append('bumped')
199 troubles.append('bumped')
195 if self.divergent():
200 if self.divergent():
196 troubles.append('divergent')
201 troubles.append('divergent')
197 return troubles
202 return troubles
198
203
199 def parents(self):
204 def parents(self):
200 """return contexts for each parent changeset"""
205 """return contexts for each parent changeset"""
201 return self._parents
206 return self._parents
202
207
203 def p1(self):
208 def p1(self):
204 return self._parents[0]
209 return self._parents[0]
205
210
206 def p2(self):
211 def p2(self):
207 if len(self._parents) == 2:
212 if len(self._parents) == 2:
208 return self._parents[1]
213 return self._parents[1]
209 return changectx(self._repo, -1)
214 return changectx(self._repo, -1)
210
215
211 def _fileinfo(self, path):
216 def _fileinfo(self, path):
212 if '_manifest' in self.__dict__:
217 if '_manifest' in self.__dict__:
213 try:
218 try:
214 return self._manifest[path], self._manifest.flags(path)
219 return self._manifest[path], self._manifest.flags(path)
215 except KeyError:
220 except KeyError:
216 raise error.ManifestLookupError(self._node, path,
221 raise error.ManifestLookupError(self._node, path,
217 _('not found in manifest'))
222 _('not found in manifest'))
218 if '_manifestdelta' in self.__dict__ or path in self.files():
223 if '_manifestdelta' in self.__dict__ or path in self.files():
219 if path in self._manifestdelta:
224 if path in self._manifestdelta:
220 return (self._manifestdelta[path],
225 return (self._manifestdelta[path],
221 self._manifestdelta.flags(path))
226 self._manifestdelta.flags(path))
222 node, flag = self._repo.manifest.find(self._changeset[0], path)
227 node, flag = self._repo.manifest.find(self._changeset[0], path)
223 if not node:
228 if not node:
224 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
225 _('not found in manifest'))
230 _('not found in manifest'))
226
231
227 return node, flag
232 return node, flag
228
233
229 def filenode(self, path):
234 def filenode(self, path):
230 return self._fileinfo(path)[0]
235 return self._fileinfo(path)[0]
231
236
232 def flags(self, path):
237 def flags(self, path):
233 try:
238 try:
234 return self._fileinfo(path)[1]
239 return self._fileinfo(path)[1]
235 except error.LookupError:
240 except error.LookupError:
236 return ''
241 return ''
237
242
238 def sub(self, path):
243 def sub(self, path):
239 return subrepo.subrepo(self, path)
244 return subrepo.subrepo(self, path)
240
245
241 def match(self, pats=[], include=None, exclude=None, default='glob'):
246 def match(self, pats=[], include=None, exclude=None, default='glob'):
242 r = self._repo
247 r = self._repo
243 return matchmod.match(r.root, r.getcwd(), pats,
248 return matchmod.match(r.root, r.getcwd(), pats,
244 include, exclude, default,
249 include, exclude, default,
245 auditor=r.auditor, ctx=self)
250 auditor=r.auditor, ctx=self)
246
251
247 def diff(self, ctx2=None, match=None, **opts):
252 def diff(self, ctx2=None, match=None, **opts):
248 """Returns a diff generator for the given contexts and matcher"""
253 """Returns a diff generator for the given contexts and matcher"""
249 if ctx2 is None:
254 if ctx2 is None:
250 ctx2 = self.p1()
255 ctx2 = self.p1()
251 if ctx2 is not None:
256 if ctx2 is not None:
252 ctx2 = self._repo[ctx2]
257 ctx2 = self._repo[ctx2]
253 diffopts = patch.diffopts(self._repo.ui, opts)
258 diffopts = patch.diffopts(self._repo.ui, opts)
254 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
259 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
255
260
256 @propertycache
261 @propertycache
257 def _dirs(self):
262 def _dirs(self):
258 return scmutil.dirs(self._manifest)
263 return scmutil.dirs(self._manifest)
259
264
260 def dirs(self):
265 def dirs(self):
261 return self._dirs
266 return self._dirs
262
267
263 def dirty(self, missing=False, merge=True, branch=True):
268 def dirty(self, missing=False, merge=True, branch=True):
264 return False
269 return False
265
270
266 def status(self, other=None, match=None, listignored=False,
271 def status(self, other=None, match=None, listignored=False,
267 listclean=False, listunknown=False, listsubrepos=False):
272 listclean=False, listunknown=False, listsubrepos=False):
268 """return status of files between two nodes or node and working
273 """return status of files between two nodes or node and working
269 directory.
274 directory.
270
275
271 If other is None, compare this node with working directory.
276 If other is None, compare this node with working directory.
272
277
273 returns (modified, added, removed, deleted, unknown, ignored, clean)
278 returns (modified, added, removed, deleted, unknown, ignored, clean)
274 """
279 """
275
280
276 ctx1 = self
281 ctx1 = self
277 ctx2 = self._repo[other]
282 ctx2 = self._repo[other]
278
283
279 # This next code block is, admittedly, fragile logic that tests for
284 # This next code block is, admittedly, fragile logic that tests for
280 # reversing the contexts and wouldn't need to exist if it weren't for
285 # reversing the contexts and wouldn't need to exist if it weren't for
281 # the fast (and common) code path of comparing the working directory
286 # the fast (and common) code path of comparing the working directory
282 # with its first parent.
287 # with its first parent.
283 #
288 #
284 # What we're aiming for here is the ability to call:
289 # What we're aiming for here is the ability to call:
285 #
290 #
286 # workingctx.status(parentctx)
291 # workingctx.status(parentctx)
287 #
292 #
288 # If we always built the manifest for each context and compared those,
293 # If we always built the manifest for each context and compared those,
289 # then we'd be done. But the special case of the above call means we
294 # then we'd be done. But the special case of the above call means we
290 # just copy the manifest of the parent.
295 # just copy the manifest of the parent.
291 reversed = False
296 reversed = False
292 if (not isinstance(ctx1, changectx)
297 if (not isinstance(ctx1, changectx)
293 and isinstance(ctx2, changectx)):
298 and isinstance(ctx2, changectx)):
294 reversed = True
299 reversed = True
295 ctx1, ctx2 = ctx2, ctx1
300 ctx1, ctx2 = ctx2, ctx1
296
301
297 match = ctx2._matchstatus(ctx1, match)
302 match = ctx2._matchstatus(ctx1, match)
298 r = [[], [], [], [], [], [], []]
303 r = [[], [], [], [], [], [], []]
299 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
304 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
300 listunknown)
305 listunknown)
301
306
302 if reversed:
307 if reversed:
303 # reverse added and removed
308 # reverse added and removed
304 r[1], r[2] = r[2], r[1]
309 r[1], r[2] = r[2], r[1]
305
310
306 if listsubrepos:
311 if listsubrepos:
307 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
312 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
308 rev2 = ctx2.subrev(subpath)
313 rev2 = ctx2.subrev(subpath)
309 try:
314 try:
310 submatch = matchmod.narrowmatcher(subpath, match)
315 submatch = matchmod.narrowmatcher(subpath, match)
311 s = sub.status(rev2, match=submatch, ignored=listignored,
316 s = sub.status(rev2, match=submatch, ignored=listignored,
312 clean=listclean, unknown=listunknown,
317 clean=listclean, unknown=listunknown,
313 listsubrepos=True)
318 listsubrepos=True)
314 for rfiles, sfiles in zip(r, s):
319 for rfiles, sfiles in zip(r, s):
315 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
320 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
316 except error.LookupError:
321 except error.LookupError:
317 self._repo.ui.status(_("skipping missing "
322 self._repo.ui.status(_("skipping missing "
318 "subrepository: %s\n") % subpath)
323 "subrepository: %s\n") % subpath)
319
324
320 for l in r:
325 for l in r:
321 l.sort()
326 l.sort()
322
327
323 # we return a tuple to signify that this list isn't changing
328 # we return a tuple to signify that this list isn't changing
324 return scmutil.status(*r)
329 return scmutil.status(*r)
325
330
326
331
327 def makememctx(repo, parents, text, user, date, branch, files, store,
332 def makememctx(repo, parents, text, user, date, branch, files, store,
328 editor=None):
333 editor=None):
329 def getfilectx(repo, memctx, path):
334 def getfilectx(repo, memctx, path):
330 data, mode, copied = store.getfile(path)
335 data, mode, copied = store.getfile(path)
331 if data is None:
336 if data is None:
332 return None
337 return None
333 islink, isexec = mode
338 islink, isexec = mode
334 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
339 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
335 copied=copied, memctx=memctx)
340 copied=copied, memctx=memctx)
336 extra = {}
341 extra = {}
337 if branch:
342 if branch:
338 extra['branch'] = encoding.fromlocal(branch)
343 extra['branch'] = encoding.fromlocal(branch)
339 ctx = memctx(repo, parents, text, files, getfilectx, user,
344 ctx = memctx(repo, parents, text, files, getfilectx, user,
340 date, extra, editor)
345 date, extra, editor)
341 return ctx
346 return ctx
342
347
343 class changectx(basectx):
348 class changectx(basectx):
344 """A changecontext object makes access to data related to a particular
349 """A changecontext object makes access to data related to a particular
345 changeset convenient. It represents a read-only context already present in
350 changeset convenient. It represents a read-only context already present in
346 the repo."""
351 the repo."""
347 def __init__(self, repo, changeid=''):
352 def __init__(self, repo, changeid=''):
348 """changeid is a revision number, node, or tag"""
353 """changeid is a revision number, node, or tag"""
349
354
350 # since basectx.__new__ already took care of copying the object, we
355 # since basectx.__new__ already took care of copying the object, we
351 # don't need to do anything in __init__, so we just exit here
356 # don't need to do anything in __init__, so we just exit here
352 if isinstance(changeid, basectx):
357 if isinstance(changeid, basectx):
353 return
358 return
354
359
355 if changeid == '':
360 if changeid == '':
356 changeid = '.'
361 changeid = '.'
357 self._repo = repo
362 self._repo = repo
358
363
359 try:
364 try:
360 if isinstance(changeid, int):
365 if isinstance(changeid, int):
361 self._node = repo.changelog.node(changeid)
366 self._node = repo.changelog.node(changeid)
362 self._rev = changeid
367 self._rev = changeid
363 return
368 return
364 if isinstance(changeid, long):
369 if isinstance(changeid, long):
365 changeid = str(changeid)
370 changeid = str(changeid)
366 if changeid == '.':
371 if changeid == '.':
367 self._node = repo.dirstate.p1()
372 self._node = repo.dirstate.p1()
368 self._rev = repo.changelog.rev(self._node)
373 self._rev = repo.changelog.rev(self._node)
369 return
374 return
370 if changeid == 'null':
375 if changeid == 'null':
371 self._node = nullid
376 self._node = nullid
372 self._rev = nullrev
377 self._rev = nullrev
373 return
378 return
374 if changeid == 'tip':
379 if changeid == 'tip':
375 self._node = repo.changelog.tip()
380 self._node = repo.changelog.tip()
376 self._rev = repo.changelog.rev(self._node)
381 self._rev = repo.changelog.rev(self._node)
377 return
382 return
378 if len(changeid) == 20:
383 if len(changeid) == 20:
379 try:
384 try:
380 self._node = changeid
385 self._node = changeid
381 self._rev = repo.changelog.rev(changeid)
386 self._rev = repo.changelog.rev(changeid)
382 return
387 return
383 except error.FilteredRepoLookupError:
388 except error.FilteredRepoLookupError:
384 raise
389 raise
385 except LookupError:
390 except LookupError:
386 pass
391 pass
387
392
388 try:
393 try:
389 r = int(changeid)
394 r = int(changeid)
390 if str(r) != changeid:
395 if str(r) != changeid:
391 raise ValueError
396 raise ValueError
392 l = len(repo.changelog)
397 l = len(repo.changelog)
393 if r < 0:
398 if r < 0:
394 r += l
399 r += l
395 if r < 0 or r >= l:
400 if r < 0 or r >= l:
396 raise ValueError
401 raise ValueError
397 self._rev = r
402 self._rev = r
398 self._node = repo.changelog.node(r)
403 self._node = repo.changelog.node(r)
399 return
404 return
400 except error.FilteredIndexError:
405 except error.FilteredIndexError:
401 raise
406 raise
402 except (ValueError, OverflowError, IndexError):
407 except (ValueError, OverflowError, IndexError):
403 pass
408 pass
404
409
405 if len(changeid) == 40:
410 if len(changeid) == 40:
406 try:
411 try:
407 self._node = bin(changeid)
412 self._node = bin(changeid)
408 self._rev = repo.changelog.rev(self._node)
413 self._rev = repo.changelog.rev(self._node)
409 return
414 return
410 except error.FilteredLookupError:
415 except error.FilteredLookupError:
411 raise
416 raise
412 except (TypeError, LookupError):
417 except (TypeError, LookupError):
413 pass
418 pass
414
419
415 if changeid in repo._bookmarks:
420 if changeid in repo._bookmarks:
416 self._node = repo._bookmarks[changeid]
421 self._node = repo._bookmarks[changeid]
417 self._rev = repo.changelog.rev(self._node)
422 self._rev = repo.changelog.rev(self._node)
418 return
423 return
419 if changeid in repo._tagscache.tags:
424 if changeid in repo._tagscache.tags:
420 self._node = repo._tagscache.tags[changeid]
425 self._node = repo._tagscache.tags[changeid]
421 self._rev = repo.changelog.rev(self._node)
426 self._rev = repo.changelog.rev(self._node)
422 return
427 return
423 try:
428 try:
424 self._node = repo.branchtip(changeid)
429 self._node = repo.branchtip(changeid)
425 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
426 return
431 return
427 except error.FilteredRepoLookupError:
432 except error.FilteredRepoLookupError:
428 raise
433 raise
429 except error.RepoLookupError:
434 except error.RepoLookupError:
430 pass
435 pass
431
436
432 self._node = repo.unfiltered().changelog._partialmatch(changeid)
437 self._node = repo.unfiltered().changelog._partialmatch(changeid)
433 if self._node is not None:
438 if self._node is not None:
434 self._rev = repo.changelog.rev(self._node)
439 self._rev = repo.changelog.rev(self._node)
435 return
440 return
436
441
437 # lookup failed
442 # lookup failed
438 # check if it might have come from damaged dirstate
443 # check if it might have come from damaged dirstate
439 #
444 #
440 # XXX we could avoid the unfiltered if we had a recognizable
445 # XXX we could avoid the unfiltered if we had a recognizable
441 # exception for filtered changeset access
446 # exception for filtered changeset access
442 if changeid in repo.unfiltered().dirstate.parents():
447 if changeid in repo.unfiltered().dirstate.parents():
443 msg = _("working directory has unknown parent '%s'!")
448 msg = _("working directory has unknown parent '%s'!")
444 raise error.Abort(msg % short(changeid))
449 raise error.Abort(msg % short(changeid))
445 try:
450 try:
446 if len(changeid) == 20:
451 if len(changeid) == 20:
447 changeid = hex(changeid)
452 changeid = hex(changeid)
448 except TypeError:
453 except TypeError:
449 pass
454 pass
450 except (error.FilteredIndexError, error.FilteredLookupError,
455 except (error.FilteredIndexError, error.FilteredLookupError,
451 error.FilteredRepoLookupError):
456 error.FilteredRepoLookupError):
452 if repo.filtername == 'visible':
457 if repo.filtername == 'visible':
453 msg = _("hidden revision '%s'") % changeid
458 msg = _("hidden revision '%s'") % changeid
454 hint = _('use --hidden to access hidden revisions')
459 hint = _('use --hidden to access hidden revisions')
455 raise error.FilteredRepoLookupError(msg, hint=hint)
460 raise error.FilteredRepoLookupError(msg, hint=hint)
456 msg = _("filtered revision '%s' (not in '%s' subset)")
461 msg = _("filtered revision '%s' (not in '%s' subset)")
457 msg %= (changeid, repo.filtername)
462 msg %= (changeid, repo.filtername)
458 raise error.FilteredRepoLookupError(msg)
463 raise error.FilteredRepoLookupError(msg)
459 except IndexError:
464 except IndexError:
460 pass
465 pass
461 raise error.RepoLookupError(
466 raise error.RepoLookupError(
462 _("unknown revision '%s'") % changeid)
467 _("unknown revision '%s'") % changeid)
463
468
464 def __hash__(self):
469 def __hash__(self):
465 try:
470 try:
466 return hash(self._rev)
471 return hash(self._rev)
467 except AttributeError:
472 except AttributeError:
468 return id(self)
473 return id(self)
469
474
470 def __nonzero__(self):
475 def __nonzero__(self):
471 return self._rev != nullrev
476 return self._rev != nullrev
472
477
473 @propertycache
478 @propertycache
474 def _changeset(self):
479 def _changeset(self):
475 return self._repo.changelog.read(self.rev())
480 return self._repo.changelog.read(self.rev())
476
481
477 @propertycache
482 @propertycache
478 def _manifest(self):
483 def _manifest(self):
479 return self._repo.manifest.read(self._changeset[0])
484 return self._repo.manifest.read(self._changeset[0])
480
485
481 @propertycache
486 @propertycache
482 def _manifestdelta(self):
487 def _manifestdelta(self):
483 return self._repo.manifest.readdelta(self._changeset[0])
488 return self._repo.manifest.readdelta(self._changeset[0])
484
489
485 @propertycache
490 @propertycache
486 def _parents(self):
491 def _parents(self):
487 p = self._repo.changelog.parentrevs(self._rev)
492 p = self._repo.changelog.parentrevs(self._rev)
488 if p[1] == nullrev:
493 if p[1] == nullrev:
489 p = p[:-1]
494 p = p[:-1]
490 return [changectx(self._repo, x) for x in p]
495 return [changectx(self._repo, x) for x in p]
491
496
492 def changeset(self):
497 def changeset(self):
493 return self._changeset
498 return self._changeset
494 def manifestnode(self):
499 def manifestnode(self):
495 return self._changeset[0]
500 return self._changeset[0]
496
501
497 def user(self):
502 def user(self):
498 return self._changeset[1]
503 return self._changeset[1]
499 def date(self):
504 def date(self):
500 return self._changeset[2]
505 return self._changeset[2]
501 def files(self):
506 def files(self):
502 return self._changeset[3]
507 return self._changeset[3]
503 def description(self):
508 def description(self):
504 return self._changeset[4]
509 return self._changeset[4]
505 def branch(self):
510 def branch(self):
506 return encoding.tolocal(self._changeset[5].get("branch"))
511 return encoding.tolocal(self._changeset[5].get("branch"))
507 def closesbranch(self):
512 def closesbranch(self):
508 return 'close' in self._changeset[5]
513 return 'close' in self._changeset[5]
509 def extra(self):
514 def extra(self):
510 return self._changeset[5]
515 return self._changeset[5]
511 def tags(self):
516 def tags(self):
512 return self._repo.nodetags(self._node)
517 return self._repo.nodetags(self._node)
513 def bookmarks(self):
518 def bookmarks(self):
514 return self._repo.nodebookmarks(self._node)
519 return self._repo.nodebookmarks(self._node)
515 def phase(self):
520 def phase(self):
516 return self._repo._phasecache.phase(self._repo, self._rev)
521 return self._repo._phasecache.phase(self._repo, self._rev)
517 def hidden(self):
522 def hidden(self):
518 return self._rev in repoview.filterrevs(self._repo, 'visible')
523 return self._rev in repoview.filterrevs(self._repo, 'visible')
519
524
520 def children(self):
525 def children(self):
521 """return contexts for each child changeset"""
526 """return contexts for each child changeset"""
522 c = self._repo.changelog.children(self._node)
527 c = self._repo.changelog.children(self._node)
523 return [changectx(self._repo, x) for x in c]
528 return [changectx(self._repo, x) for x in c]
524
529
525 def ancestors(self):
530 def ancestors(self):
526 for a in self._repo.changelog.ancestors([self._rev]):
531 for a in self._repo.changelog.ancestors([self._rev]):
527 yield changectx(self._repo, a)
532 yield changectx(self._repo, a)
528
533
529 def descendants(self):
534 def descendants(self):
530 for d in self._repo.changelog.descendants([self._rev]):
535 for d in self._repo.changelog.descendants([self._rev]):
531 yield changectx(self._repo, d)
536 yield changectx(self._repo, d)
532
537
533 def filectx(self, path, fileid=None, filelog=None):
538 def filectx(self, path, fileid=None, filelog=None):
534 """get a file context from this changeset"""
539 """get a file context from this changeset"""
535 if fileid is None:
540 if fileid is None:
536 fileid = self.filenode(path)
541 fileid = self.filenode(path)
537 return filectx(self._repo, path, fileid=fileid,
542 return filectx(self._repo, path, fileid=fileid,
538 changectx=self, filelog=filelog)
543 changectx=self, filelog=filelog)
539
544
540 def ancestor(self, c2, warn=False):
545 def ancestor(self, c2, warn=False):
541 """return the "best" ancestor context of self and c2
546 """return the "best" ancestor context of self and c2
542
547
543 If there are multiple candidates, it will show a message and check
548 If there are multiple candidates, it will show a message and check
544 merge.preferancestor configuration before falling back to the
549 merge.preferancestor configuration before falling back to the
545 revlog ancestor."""
550 revlog ancestor."""
546 # deal with workingctxs
551 # deal with workingctxs
547 n2 = c2._node
552 n2 = c2._node
548 if n2 is None:
553 if n2 is None:
549 n2 = c2._parents[0]._node
554 n2 = c2._parents[0]._node
550 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
555 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
551 if not cahs:
556 if not cahs:
552 anc = nullid
557 anc = nullid
553 elif len(cahs) == 1:
558 elif len(cahs) == 1:
554 anc = cahs[0]
559 anc = cahs[0]
555 else:
560 else:
556 for r in self._repo.ui.configlist('merge', 'preferancestor'):
561 for r in self._repo.ui.configlist('merge', 'preferancestor'):
557 try:
562 try:
558 ctx = changectx(self._repo, r)
563 ctx = changectx(self._repo, r)
559 except error.RepoLookupError:
564 except error.RepoLookupError:
560 continue
565 continue
561 anc = ctx.node()
566 anc = ctx.node()
562 if anc in cahs:
567 if anc in cahs:
563 break
568 break
564 else:
569 else:
565 anc = self._repo.changelog.ancestor(self._node, n2)
570 anc = self._repo.changelog.ancestor(self._node, n2)
566 if warn:
571 if warn:
567 self._repo.ui.status(
572 self._repo.ui.status(
568 (_("note: using %s as ancestor of %s and %s\n") %
573 (_("note: using %s as ancestor of %s and %s\n") %
569 (short(anc), short(self._node), short(n2))) +
574 (short(anc), short(self._node), short(n2))) +
570 ''.join(_(" alternatively, use --config "
575 ''.join(_(" alternatively, use --config "
571 "merge.preferancestor=%s\n") %
576 "merge.preferancestor=%s\n") %
572 short(n) for n in sorted(cahs) if n != anc))
577 short(n) for n in sorted(cahs) if n != anc))
573 return changectx(self._repo, anc)
578 return changectx(self._repo, anc)
574
579
575 def descendant(self, other):
580 def descendant(self, other):
576 """True if other is descendant of this changeset"""
581 """True if other is descendant of this changeset"""
577 return self._repo.changelog.descendant(self._rev, other._rev)
582 return self._repo.changelog.descendant(self._rev, other._rev)
578
583
579 def walk(self, match):
584 def walk(self, match):
580 fset = set(match.files())
585 fset = set(match.files())
581 # for dirstate.walk, files=['.'] means "walk the whole tree".
586 # for dirstate.walk, files=['.'] means "walk the whole tree".
582 # follow that here, too
587 # follow that here, too
583 fset.discard('.')
588 fset.discard('.')
584
589
585 # avoid the entire walk if we're only looking for specific files
590 # avoid the entire walk if we're only looking for specific files
586 if fset and not match.anypats():
591 if fset and not match.anypats():
587 if util.all([fn in self for fn in fset]):
592 if util.all([fn in self for fn in fset]):
588 for fn in sorted(fset):
593 for fn in sorted(fset):
589 if match(fn):
594 if match(fn):
590 yield fn
595 yield fn
591 raise StopIteration
596 raise StopIteration
592
597
593 for fn in self:
598 for fn in self:
594 if fn in fset:
599 if fn in fset:
595 # specified pattern is the exact name
600 # specified pattern is the exact name
596 fset.remove(fn)
601 fset.remove(fn)
597 if match(fn):
602 if match(fn):
598 yield fn
603 yield fn
599 for fn in sorted(fset):
604 for fn in sorted(fset):
600 if fn in self._dirs:
605 if fn in self._dirs:
601 # specified pattern is a directory
606 # specified pattern is a directory
602 continue
607 continue
603 match.bad(fn, _('no such file in rev %s') % self)
608 match.bad(fn, _('no such file in rev %s') % self)
604
609
605 def matches(self, match):
610 def matches(self, match):
606 return self.walk(match)
611 return self.walk(match)
607
612
608 class basefilectx(object):
613 class basefilectx(object):
609 """A filecontext object represents the common logic for its children:
614 """A filecontext object represents the common logic for its children:
610 filectx: read-only access to a filerevision that is already present
615 filectx: read-only access to a filerevision that is already present
611 in the repo,
616 in the repo,
612 workingfilectx: a filecontext that represents files from the working
617 workingfilectx: a filecontext that represents files from the working
613 directory,
618 directory,
614 memfilectx: a filecontext that represents files in-memory."""
619 memfilectx: a filecontext that represents files in-memory."""
615 def __new__(cls, repo, path, *args, **kwargs):
620 def __new__(cls, repo, path, *args, **kwargs):
616 return super(basefilectx, cls).__new__(cls)
621 return super(basefilectx, cls).__new__(cls)
617
622
618 @propertycache
623 @propertycache
619 def _filelog(self):
624 def _filelog(self):
620 return self._repo.file(self._path)
625 return self._repo.file(self._path)
621
626
622 @propertycache
627 @propertycache
623 def _changeid(self):
628 def _changeid(self):
624 if '_changeid' in self.__dict__:
629 if '_changeid' in self.__dict__:
625 return self._changeid
630 return self._changeid
626 elif '_changectx' in self.__dict__:
631 elif '_changectx' in self.__dict__:
627 return self._changectx.rev()
632 return self._changectx.rev()
628 else:
633 else:
629 return self._filelog.linkrev(self._filerev)
634 return self._filelog.linkrev(self._filerev)
630
635
631 @propertycache
636 @propertycache
632 def _filenode(self):
637 def _filenode(self):
633 if '_fileid' in self.__dict__:
638 if '_fileid' in self.__dict__:
634 return self._filelog.lookup(self._fileid)
639 return self._filelog.lookup(self._fileid)
635 else:
640 else:
636 return self._changectx.filenode(self._path)
641 return self._changectx.filenode(self._path)
637
642
638 @propertycache
643 @propertycache
639 def _filerev(self):
644 def _filerev(self):
640 return self._filelog.rev(self._filenode)
645 return self._filelog.rev(self._filenode)
641
646
642 @propertycache
647 @propertycache
643 def _repopath(self):
648 def _repopath(self):
644 return self._path
649 return self._path
645
650
646 def __nonzero__(self):
651 def __nonzero__(self):
647 try:
652 try:
648 self._filenode
653 self._filenode
649 return True
654 return True
650 except error.LookupError:
655 except error.LookupError:
651 # file is missing
656 # file is missing
652 return False
657 return False
653
658
654 def __str__(self):
659 def __str__(self):
655 return "%s@%s" % (self.path(), self._changectx)
660 return "%s@%s" % (self.path(), self._changectx)
656
661
657 def __repr__(self):
662 def __repr__(self):
658 return "<%s %s>" % (type(self).__name__, str(self))
663 return "<%s %s>" % (type(self).__name__, str(self))
659
664
660 def __hash__(self):
665 def __hash__(self):
661 try:
666 try:
662 return hash((self._path, self._filenode))
667 return hash((self._path, self._filenode))
663 except AttributeError:
668 except AttributeError:
664 return id(self)
669 return id(self)
665
670
666 def __eq__(self, other):
671 def __eq__(self, other):
667 try:
672 try:
668 return (type(self) == type(other) and self._path == other._path
673 return (type(self) == type(other) and self._path == other._path
669 and self._filenode == other._filenode)
674 and self._filenode == other._filenode)
670 except AttributeError:
675 except AttributeError:
671 return False
676 return False
672
677
673 def __ne__(self, other):
678 def __ne__(self, other):
674 return not (self == other)
679 return not (self == other)
675
680
676 def filerev(self):
681 def filerev(self):
677 return self._filerev
682 return self._filerev
678 def filenode(self):
683 def filenode(self):
679 return self._filenode
684 return self._filenode
680 def flags(self):
685 def flags(self):
681 return self._changectx.flags(self._path)
686 return self._changectx.flags(self._path)
682 def filelog(self):
687 def filelog(self):
683 return self._filelog
688 return self._filelog
684 def rev(self):
689 def rev(self):
685 return self._changeid
690 return self._changeid
686 def linkrev(self):
691 def linkrev(self):
687 return self._filelog.linkrev(self._filerev)
692 return self._filelog.linkrev(self._filerev)
688 def node(self):
693 def node(self):
689 return self._changectx.node()
694 return self._changectx.node()
690 def hex(self):
695 def hex(self):
691 return self._changectx.hex()
696 return self._changectx.hex()
692 def user(self):
697 def user(self):
693 return self._changectx.user()
698 return self._changectx.user()
694 def date(self):
699 def date(self):
695 return self._changectx.date()
700 return self._changectx.date()
696 def files(self):
701 def files(self):
697 return self._changectx.files()
702 return self._changectx.files()
698 def description(self):
703 def description(self):
699 return self._changectx.description()
704 return self._changectx.description()
700 def branch(self):
705 def branch(self):
701 return self._changectx.branch()
706 return self._changectx.branch()
702 def extra(self):
707 def extra(self):
703 return self._changectx.extra()
708 return self._changectx.extra()
704 def phase(self):
709 def phase(self):
705 return self._changectx.phase()
710 return self._changectx.phase()
706 def phasestr(self):
711 def phasestr(self):
707 return self._changectx.phasestr()
712 return self._changectx.phasestr()
708 def manifest(self):
713 def manifest(self):
709 return self._changectx.manifest()
714 return self._changectx.manifest()
710 def changectx(self):
715 def changectx(self):
711 return self._changectx
716 return self._changectx
712
717
713 def path(self):
718 def path(self):
714 return self._path
719 return self._path
715
720
716 def isbinary(self):
721 def isbinary(self):
717 try:
722 try:
718 return util.binary(self.data())
723 return util.binary(self.data())
719 except IOError:
724 except IOError:
720 return False
725 return False
721 def isexec(self):
726 def isexec(self):
722 return 'x' in self.flags()
727 return 'x' in self.flags()
723 def islink(self):
728 def islink(self):
724 return 'l' in self.flags()
729 return 'l' in self.flags()
725
730
726 def cmp(self, fctx):
731 def cmp(self, fctx):
727 """compare with other file context
732 """compare with other file context
728
733
729 returns True if different than fctx.
734 returns True if different than fctx.
730 """
735 """
731 if (fctx._filerev is None
736 if (fctx._filerev is None
732 and (self._repo._encodefilterpats
737 and (self._repo._encodefilterpats
733 # if file data starts with '\1\n', empty metadata block is
738 # if file data starts with '\1\n', empty metadata block is
734 # prepended, which adds 4 bytes to filelog.size().
739 # prepended, which adds 4 bytes to filelog.size().
735 or self.size() - 4 == fctx.size())
740 or self.size() - 4 == fctx.size())
736 or self.size() == fctx.size()):
741 or self.size() == fctx.size()):
737 return self._filelog.cmp(self._filenode, fctx.data())
742 return self._filelog.cmp(self._filenode, fctx.data())
738
743
739 return True
744 return True
740
745
741 def parents(self):
746 def parents(self):
742 _path = self._path
747 _path = self._path
743 fl = self._filelog
748 fl = self._filelog
744 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
749 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
745
750
746 r = self._filelog.renamed(self._filenode)
751 r = self._filelog.renamed(self._filenode)
747 if r:
752 if r:
748 pl[0] = (r[0], r[1], None)
753 pl[0] = (r[0], r[1], None)
749
754
750 return [filectx(self._repo, p, fileid=n, filelog=l)
755 return [filectx(self._repo, p, fileid=n, filelog=l)
751 for p, n, l in pl if n != nullid]
756 for p, n, l in pl if n != nullid]
752
757
753 def p1(self):
758 def p1(self):
754 return self.parents()[0]
759 return self.parents()[0]
755
760
756 def p2(self):
761 def p2(self):
757 p = self.parents()
762 p = self.parents()
758 if len(p) == 2:
763 if len(p) == 2:
759 return p[1]
764 return p[1]
760 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
765 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
761
766
762 def annotate(self, follow=False, linenumber=None, diffopts=None):
767 def annotate(self, follow=False, linenumber=None, diffopts=None):
763 '''returns a list of tuples of (ctx, line) for each line
768 '''returns a list of tuples of (ctx, line) for each line
764 in the file, where ctx is the filectx of the node where
769 in the file, where ctx is the filectx of the node where
765 that line was last changed.
770 that line was last changed.
766 This returns tuples of ((ctx, linenumber), line) for each line,
771 This returns tuples of ((ctx, linenumber), line) for each line,
767 if "linenumber" parameter is NOT "None".
772 if "linenumber" parameter is NOT "None".
768 In such tuples, linenumber means one at the first appearance
773 In such tuples, linenumber means one at the first appearance
769 in the managed file.
774 in the managed file.
770 To reduce annotation cost,
775 To reduce annotation cost,
771 this returns fixed value(False is used) as linenumber,
776 this returns fixed value(False is used) as linenumber,
772 if "linenumber" parameter is "False".'''
777 if "linenumber" parameter is "False".'''
773
778
774 if linenumber is None:
779 if linenumber is None:
775 def decorate(text, rev):
780 def decorate(text, rev):
776 return ([rev] * len(text.splitlines()), text)
781 return ([rev] * len(text.splitlines()), text)
777 elif linenumber:
782 elif linenumber:
778 def decorate(text, rev):
783 def decorate(text, rev):
779 size = len(text.splitlines())
784 size = len(text.splitlines())
780 return ([(rev, i) for i in xrange(1, size + 1)], text)
785 return ([(rev, i) for i in xrange(1, size + 1)], text)
781 else:
786 else:
782 def decorate(text, rev):
787 def decorate(text, rev):
783 return ([(rev, False)] * len(text.splitlines()), text)
788 return ([(rev, False)] * len(text.splitlines()), text)
784
789
785 def pair(parent, child):
790 def pair(parent, child):
786 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
791 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
787 refine=True)
792 refine=True)
788 for (a1, a2, b1, b2), t in blocks:
793 for (a1, a2, b1, b2), t in blocks:
789 # Changed blocks ('!') or blocks made only of blank lines ('~')
794 # Changed blocks ('!') or blocks made only of blank lines ('~')
790 # belong to the child.
795 # belong to the child.
791 if t == '=':
796 if t == '=':
792 child[0][b1:b2] = parent[0][a1:a2]
797 child[0][b1:b2] = parent[0][a1:a2]
793 return child
798 return child
794
799
795 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
800 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
796
801
797 def parents(f):
802 def parents(f):
798 pl = f.parents()
803 pl = f.parents()
799
804
800 # Don't return renamed parents if we aren't following.
805 # Don't return renamed parents if we aren't following.
801 if not follow:
806 if not follow:
802 pl = [p for p in pl if p.path() == f.path()]
807 pl = [p for p in pl if p.path() == f.path()]
803
808
804 # renamed filectx won't have a filelog yet, so set it
809 # renamed filectx won't have a filelog yet, so set it
805 # from the cache to save time
810 # from the cache to save time
806 for p in pl:
811 for p in pl:
807 if not '_filelog' in p.__dict__:
812 if not '_filelog' in p.__dict__:
808 p._filelog = getlog(p.path())
813 p._filelog = getlog(p.path())
809
814
810 return pl
815 return pl
811
816
812 # use linkrev to find the first changeset where self appeared
817 # use linkrev to find the first changeset where self appeared
813 if self.rev() != self.linkrev():
818 if self.rev() != self.linkrev():
814 base = self.filectx(self.filenode())
819 base = self.filectx(self.filenode())
815 else:
820 else:
816 base = self
821 base = self
817
822
818 # This algorithm would prefer to be recursive, but Python is a
823 # This algorithm would prefer to be recursive, but Python is a
819 # bit recursion-hostile. Instead we do an iterative
824 # bit recursion-hostile. Instead we do an iterative
820 # depth-first search.
825 # depth-first search.
821
826
822 visit = [base]
827 visit = [base]
823 hist = {}
828 hist = {}
824 pcache = {}
829 pcache = {}
825 needed = {base: 1}
830 needed = {base: 1}
826 while visit:
831 while visit:
827 f = visit[-1]
832 f = visit[-1]
828 pcached = f in pcache
833 pcached = f in pcache
829 if not pcached:
834 if not pcached:
830 pcache[f] = parents(f)
835 pcache[f] = parents(f)
831
836
832 ready = True
837 ready = True
833 pl = pcache[f]
838 pl = pcache[f]
834 for p in pl:
839 for p in pl:
835 if p not in hist:
840 if p not in hist:
836 ready = False
841 ready = False
837 visit.append(p)
842 visit.append(p)
838 if not pcached:
843 if not pcached:
839 needed[p] = needed.get(p, 0) + 1
844 needed[p] = needed.get(p, 0) + 1
840 if ready:
845 if ready:
841 visit.pop()
846 visit.pop()
842 reusable = f in hist
847 reusable = f in hist
843 if reusable:
848 if reusable:
844 curr = hist[f]
849 curr = hist[f]
845 else:
850 else:
846 curr = decorate(f.data(), f)
851 curr = decorate(f.data(), f)
847 for p in pl:
852 for p in pl:
848 if not reusable:
853 if not reusable:
849 curr = pair(hist[p], curr)
854 curr = pair(hist[p], curr)
850 if needed[p] == 1:
855 if needed[p] == 1:
851 del hist[p]
856 del hist[p]
852 del needed[p]
857 del needed[p]
853 else:
858 else:
854 needed[p] -= 1
859 needed[p] -= 1
855
860
856 hist[f] = curr
861 hist[f] = curr
857 pcache[f] = []
862 pcache[f] = []
858
863
859 return zip(hist[base][0], hist[base][1].splitlines(True))
864 return zip(hist[base][0], hist[base][1].splitlines(True))
860
865
861 def ancestors(self, followfirst=False):
866 def ancestors(self, followfirst=False):
862 visit = {}
867 visit = {}
863 c = self
868 c = self
864 cut = followfirst and 1 or None
869 cut = followfirst and 1 or None
865 while True:
870 while True:
866 for parent in c.parents()[:cut]:
871 for parent in c.parents()[:cut]:
867 visit[(parent.rev(), parent.node())] = parent
872 visit[(parent.rev(), parent.node())] = parent
868 if not visit:
873 if not visit:
869 break
874 break
870 c = visit.pop(max(visit))
875 c = visit.pop(max(visit))
871 yield c
876 yield c
872
877
873 class filectx(basefilectx):
878 class filectx(basefilectx):
874 """A filecontext object makes access to data related to a particular
879 """A filecontext object makes access to data related to a particular
875 filerevision convenient."""
880 filerevision convenient."""
876 def __init__(self, repo, path, changeid=None, fileid=None,
881 def __init__(self, repo, path, changeid=None, fileid=None,
877 filelog=None, changectx=None):
882 filelog=None, changectx=None):
878 """changeid can be a changeset revision, node, or tag.
883 """changeid can be a changeset revision, node, or tag.
879 fileid can be a file revision or node."""
884 fileid can be a file revision or node."""
880 self._repo = repo
885 self._repo = repo
881 self._path = path
886 self._path = path
882
887
883 assert (changeid is not None
888 assert (changeid is not None
884 or fileid is not None
889 or fileid is not None
885 or changectx is not None), \
890 or changectx is not None), \
886 ("bad args: changeid=%r, fileid=%r, changectx=%r"
891 ("bad args: changeid=%r, fileid=%r, changectx=%r"
887 % (changeid, fileid, changectx))
892 % (changeid, fileid, changectx))
888
893
889 if filelog is not None:
894 if filelog is not None:
890 self._filelog = filelog
895 self._filelog = filelog
891
896
892 if changeid is not None:
897 if changeid is not None:
893 self._changeid = changeid
898 self._changeid = changeid
894 if changectx is not None:
899 if changectx is not None:
895 self._changectx = changectx
900 self._changectx = changectx
896 if fileid is not None:
901 if fileid is not None:
897 self._fileid = fileid
902 self._fileid = fileid
898
903
899 @propertycache
904 @propertycache
900 def _changectx(self):
905 def _changectx(self):
901 try:
906 try:
902 return changectx(self._repo, self._changeid)
907 return changectx(self._repo, self._changeid)
903 except error.RepoLookupError:
908 except error.RepoLookupError:
904 # Linkrev may point to any revision in the repository. When the
909 # Linkrev may point to any revision in the repository. When the
905 # repository is filtered this may lead to `filectx` trying to build
910 # repository is filtered this may lead to `filectx` trying to build
906 # `changectx` for filtered revision. In such case we fallback to
911 # `changectx` for filtered revision. In such case we fallback to
907 # creating `changectx` on the unfiltered version of the reposition.
912 # creating `changectx` on the unfiltered version of the reposition.
908 # This fallback should not be an issue because `changectx` from
913 # This fallback should not be an issue because `changectx` from
909 # `filectx` are not used in complex operations that care about
914 # `filectx` are not used in complex operations that care about
910 # filtering.
915 # filtering.
911 #
916 #
912 # This fallback is a cheap and dirty fix that prevent several
917 # This fallback is a cheap and dirty fix that prevent several
913 # crashes. It does not ensure the behavior is correct. However the
918 # crashes. It does not ensure the behavior is correct. However the
914 # behavior was not correct before filtering either and "incorrect
919 # behavior was not correct before filtering either and "incorrect
915 # behavior" is seen as better as "crash"
920 # behavior" is seen as better as "crash"
916 #
921 #
917 # Linkrevs have several serious troubles with filtering that are
922 # Linkrevs have several serious troubles with filtering that are
918 # complicated to solve. Proper handling of the issue here should be
923 # complicated to solve. Proper handling of the issue here should be
919 # considered when solving linkrev issue are on the table.
924 # considered when solving linkrev issue are on the table.
920 return changectx(self._repo.unfiltered(), self._changeid)
925 return changectx(self._repo.unfiltered(), self._changeid)
921
926
922 def filectx(self, fileid):
927 def filectx(self, fileid):
923 '''opens an arbitrary revision of the file without
928 '''opens an arbitrary revision of the file without
924 opening a new filelog'''
929 opening a new filelog'''
925 return filectx(self._repo, self._path, fileid=fileid,
930 return filectx(self._repo, self._path, fileid=fileid,
926 filelog=self._filelog)
931 filelog=self._filelog)
927
932
928 def data(self):
933 def data(self):
929 try:
934 try:
930 return self._filelog.read(self._filenode)
935 return self._filelog.read(self._filenode)
931 except error.CensoredNodeError:
936 except error.CensoredNodeError:
932 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
937 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
933 return ""
938 return ""
934 raise util.Abort(_("censored node: %s") % short(self._filenode),
939 raise util.Abort(_("censored node: %s") % short(self._filenode),
935 hint=_("set censor.policy to ignore errors"))
940 hint=_("set censor.policy to ignore errors"))
936
941
937 def size(self):
942 def size(self):
938 return self._filelog.size(self._filerev)
943 return self._filelog.size(self._filerev)
939
944
940 def renamed(self):
945 def renamed(self):
941 """check if file was actually renamed in this changeset revision
946 """check if file was actually renamed in this changeset revision
942
947
943 If rename logged in file revision, we report copy for changeset only
948 If rename logged in file revision, we report copy for changeset only
944 if file revisions linkrev points back to the changeset in question
949 if file revisions linkrev points back to the changeset in question
945 or both changeset parents contain different file revisions.
950 or both changeset parents contain different file revisions.
946 """
951 """
947
952
948 renamed = self._filelog.renamed(self._filenode)
953 renamed = self._filelog.renamed(self._filenode)
949 if not renamed:
954 if not renamed:
950 return renamed
955 return renamed
951
956
952 if self.rev() == self.linkrev():
957 if self.rev() == self.linkrev():
953 return renamed
958 return renamed
954
959
955 name = self.path()
960 name = self.path()
956 fnode = self._filenode
961 fnode = self._filenode
957 for p in self._changectx.parents():
962 for p in self._changectx.parents():
958 try:
963 try:
959 if fnode == p.filenode(name):
964 if fnode == p.filenode(name):
960 return None
965 return None
961 except error.LookupError:
966 except error.LookupError:
962 pass
967 pass
963 return renamed
968 return renamed
964
969
965 def children(self):
970 def children(self):
966 # hard for renames
971 # hard for renames
967 c = self._filelog.children(self._filenode)
972 c = self._filelog.children(self._filenode)
968 return [filectx(self._repo, self._path, fileid=x,
973 return [filectx(self._repo, self._path, fileid=x,
969 filelog=self._filelog) for x in c]
974 filelog=self._filelog) for x in c]
970
975
971 class committablectx(basectx):
976 class committablectx(basectx):
972 """A committablectx object provides common functionality for a context that
977 """A committablectx object provides common functionality for a context that
973 wants the ability to commit, e.g. workingctx or memctx."""
978 wants the ability to commit, e.g. workingctx or memctx."""
974 def __init__(self, repo, text="", user=None, date=None, extra=None,
979 def __init__(self, repo, text="", user=None, date=None, extra=None,
975 changes=None):
980 changes=None):
976 self._repo = repo
981 self._repo = repo
977 self._rev = None
982 self._rev = None
978 self._node = None
983 self._node = None
979 self._text = text
984 self._text = text
980 if date:
985 if date:
981 self._date = util.parsedate(date)
986 self._date = util.parsedate(date)
982 if user:
987 if user:
983 self._user = user
988 self._user = user
984 if changes:
989 if changes:
985 self._status = changes
990 self._status = changes
986
991
987 self._extra = {}
992 self._extra = {}
988 if extra:
993 if extra:
989 self._extra = extra.copy()
994 self._extra = extra.copy()
990 if 'branch' not in self._extra:
995 if 'branch' not in self._extra:
991 try:
996 try:
992 branch = encoding.fromlocal(self._repo.dirstate.branch())
997 branch = encoding.fromlocal(self._repo.dirstate.branch())
993 except UnicodeDecodeError:
998 except UnicodeDecodeError:
994 raise util.Abort(_('branch name not in UTF-8!'))
999 raise util.Abort(_('branch name not in UTF-8!'))
995 self._extra['branch'] = branch
1000 self._extra['branch'] = branch
996 if self._extra['branch'] == '':
1001 if self._extra['branch'] == '':
997 self._extra['branch'] = 'default'
1002 self._extra['branch'] = 'default'
998
1003
999 def __str__(self):
1004 def __str__(self):
1000 return str(self._parents[0]) + "+"
1005 return str(self._parents[0]) + "+"
1001
1006
1002 def __nonzero__(self):
1007 def __nonzero__(self):
1003 return True
1008 return True
1004
1009
1005 def _buildflagfunc(self):
1010 def _buildflagfunc(self):
1006 # Create a fallback function for getting file flags when the
1011 # Create a fallback function for getting file flags when the
1007 # filesystem doesn't support them
1012 # filesystem doesn't support them
1008
1013
1009 copiesget = self._repo.dirstate.copies().get
1014 copiesget = self._repo.dirstate.copies().get
1010
1015
1011 if len(self._parents) < 2:
1016 if len(self._parents) < 2:
1012 # when we have one parent, it's easy: copy from parent
1017 # when we have one parent, it's easy: copy from parent
1013 man = self._parents[0].manifest()
1018 man = self._parents[0].manifest()
1014 def func(f):
1019 def func(f):
1015 f = copiesget(f, f)
1020 f = copiesget(f, f)
1016 return man.flags(f)
1021 return man.flags(f)
1017 else:
1022 else:
1018 # merges are tricky: we try to reconstruct the unstored
1023 # merges are tricky: we try to reconstruct the unstored
1019 # result from the merge (issue1802)
1024 # result from the merge (issue1802)
1020 p1, p2 = self._parents
1025 p1, p2 = self._parents
1021 pa = p1.ancestor(p2)
1026 pa = p1.ancestor(p2)
1022 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1027 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1023
1028
1024 def func(f):
1029 def func(f):
1025 f = copiesget(f, f) # may be wrong for merges with copies
1030 f = copiesget(f, f) # may be wrong for merges with copies
1026 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1031 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1027 if fl1 == fl2:
1032 if fl1 == fl2:
1028 return fl1
1033 return fl1
1029 if fl1 == fla:
1034 if fl1 == fla:
1030 return fl2
1035 return fl2
1031 if fl2 == fla:
1036 if fl2 == fla:
1032 return fl1
1037 return fl1
1033 return '' # punt for conflicts
1038 return '' # punt for conflicts
1034
1039
1035 return func
1040 return func
1036
1041
1037 @propertycache
1042 @propertycache
1038 def _flagfunc(self):
1043 def _flagfunc(self):
1039 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1044 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1040
1045
1041 @propertycache
1046 @propertycache
1042 def _manifest(self):
1047 def _manifest(self):
1043 """generate a manifest corresponding to the values in self._status"""
1048 """generate a manifest corresponding to the values in self._status"""
1044
1049
1045 man = self._parents[0].manifest().copy()
1050 man = self._parents[0].manifest().copy()
1046 if len(self._parents) > 1:
1051 if len(self._parents) > 1:
1047 man2 = self.p2().manifest()
1052 man2 = self.p2().manifest()
1048 def getman(f):
1053 def getman(f):
1049 if f in man:
1054 if f in man:
1050 return man
1055 return man
1051 return man2
1056 return man2
1052 else:
1057 else:
1053 getman = lambda f: man
1058 getman = lambda f: man
1054
1059
1055 copied = self._repo.dirstate.copies()
1060 copied = self._repo.dirstate.copies()
1056 ff = self._flagfunc
1061 ff = self._flagfunc
1057 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1062 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1058 for f in l:
1063 for f in l:
1059 orig = copied.get(f, f)
1064 orig = copied.get(f, f)
1060 man[f] = getman(orig).get(orig, nullid) + i
1065 man[f] = getman(orig).get(orig, nullid) + i
1061 try:
1066 try:
1062 man.setflag(f, ff(f))
1067 man.setflag(f, ff(f))
1063 except OSError:
1068 except OSError:
1064 pass
1069 pass
1065
1070
1066 for f in self._status.deleted + self._status.removed:
1071 for f in self._status.deleted + self._status.removed:
1067 if f in man:
1072 if f in man:
1068 del man[f]
1073 del man[f]
1069
1074
1070 return man
1075 return man
1071
1076
1072 @propertycache
1077 @propertycache
1073 def _status(self):
1078 def _status(self):
1074 return self._repo.status()
1079 return self._repo.status()
1075
1080
1076 @propertycache
1081 @propertycache
1077 def _user(self):
1082 def _user(self):
1078 return self._repo.ui.username()
1083 return self._repo.ui.username()
1079
1084
1080 @propertycache
1085 @propertycache
1081 def _date(self):
1086 def _date(self):
1082 return util.makedate()
1087 return util.makedate()
1083
1088
1084 def subrev(self, subpath):
1089 def subrev(self, subpath):
1085 return None
1090 return None
1086
1091
1087 def user(self):
1092 def user(self):
1088 return self._user or self._repo.ui.username()
1093 return self._user or self._repo.ui.username()
1089 def date(self):
1094 def date(self):
1090 return self._date
1095 return self._date
1091 def description(self):
1096 def description(self):
1092 return self._text
1097 return self._text
1093 def files(self):
1098 def files(self):
1094 return sorted(self._status.modified + self._status.added +
1099 return sorted(self._status.modified + self._status.added +
1095 self._status.removed)
1100 self._status.removed)
1096
1101
1097 def modified(self):
1102 def modified(self):
1098 return self._status.modified
1103 return self._status.modified
1099 def added(self):
1104 def added(self):
1100 return self._status.added
1105 return self._status.added
1101 def removed(self):
1106 def removed(self):
1102 return self._status.removed
1107 return self._status.removed
1103 def deleted(self):
1108 def deleted(self):
1104 return self._status.deleted
1109 return self._status.deleted
1105 def unknown(self):
1110 def unknown(self):
1106 return self._status.unknown
1111 return self._status.unknown
1107 def ignored(self):
1112 def ignored(self):
1108 return self._status.ignored
1113 return self._status.ignored
1109 def clean(self):
1114 def clean(self):
1110 return self._status.clean
1115 return self._status.clean
1111 def branch(self):
1116 def branch(self):
1112 return encoding.tolocal(self._extra['branch'])
1117 return encoding.tolocal(self._extra['branch'])
1113 def closesbranch(self):
1118 def closesbranch(self):
1114 return 'close' in self._extra
1119 return 'close' in self._extra
1115 def extra(self):
1120 def extra(self):
1116 return self._extra
1121 return self._extra
1117
1122
1118 def tags(self):
1123 def tags(self):
1119 t = []
1124 t = []
1120 for p in self.parents():
1125 for p in self.parents():
1121 t.extend(p.tags())
1126 t.extend(p.tags())
1122 return t
1127 return t
1123
1128
1124 def bookmarks(self):
1129 def bookmarks(self):
1125 b = []
1130 b = []
1126 for p in self.parents():
1131 for p in self.parents():
1127 b.extend(p.bookmarks())
1132 b.extend(p.bookmarks())
1128 return b
1133 return b
1129
1134
1130 def phase(self):
1135 def phase(self):
1131 phase = phases.draft # default phase to draft
1136 phase = phases.draft # default phase to draft
1132 for p in self.parents():
1137 for p in self.parents():
1133 phase = max(phase, p.phase())
1138 phase = max(phase, p.phase())
1134 return phase
1139 return phase
1135
1140
1136 def hidden(self):
1141 def hidden(self):
1137 return False
1142 return False
1138
1143
1139 def children(self):
1144 def children(self):
1140 return []
1145 return []
1141
1146
1142 def flags(self, path):
1147 def flags(self, path):
1143 if '_manifest' in self.__dict__:
1148 if '_manifest' in self.__dict__:
1144 try:
1149 try:
1145 return self._manifest.flags(path)
1150 return self._manifest.flags(path)
1146 except KeyError:
1151 except KeyError:
1147 return ''
1152 return ''
1148
1153
1149 try:
1154 try:
1150 return self._flagfunc(path)
1155 return self._flagfunc(path)
1151 except OSError:
1156 except OSError:
1152 return ''
1157 return ''
1153
1158
1154 def ancestor(self, c2):
1159 def ancestor(self, c2):
1155 """return the "best" ancestor context of self and c2"""
1160 """return the "best" ancestor context of self and c2"""
1156 return self._parents[0].ancestor(c2) # punt on two parents for now
1161 return self._parents[0].ancestor(c2) # punt on two parents for now
1157
1162
1158 def walk(self, match):
1163 def walk(self, match):
1159 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1164 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1160 True, False))
1165 True, False))
1161
1166
1162 def matches(self, match):
1167 def matches(self, match):
1163 return sorted(self._repo.dirstate.matches(match))
1168 return sorted(self._repo.dirstate.matches(match))
1164
1169
1165 def ancestors(self):
1170 def ancestors(self):
1166 for a in self._repo.changelog.ancestors(
1171 for a in self._repo.changelog.ancestors(
1167 [p.rev() for p in self._parents]):
1172 [p.rev() for p in self._parents]):
1168 yield changectx(self._repo, a)
1173 yield changectx(self._repo, a)
1169
1174
1170 def markcommitted(self, node):
1175 def markcommitted(self, node):
1171 """Perform post-commit cleanup necessary after committing this ctx
1176 """Perform post-commit cleanup necessary after committing this ctx
1172
1177
1173 Specifically, this updates backing stores this working context
1178 Specifically, this updates backing stores this working context
1174 wraps to reflect the fact that the changes reflected by this
1179 wraps to reflect the fact that the changes reflected by this
1175 workingctx have been committed. For example, it marks
1180 workingctx have been committed. For example, it marks
1176 modified and added files as normal in the dirstate.
1181 modified and added files as normal in the dirstate.
1177
1182
1178 """
1183 """
1179
1184
1180 self._repo.dirstate.beginparentchange()
1185 self._repo.dirstate.beginparentchange()
1181 for f in self.modified() + self.added():
1186 for f in self.modified() + self.added():
1182 self._repo.dirstate.normal(f)
1187 self._repo.dirstate.normal(f)
1183 for f in self.removed():
1188 for f in self.removed():
1184 self._repo.dirstate.drop(f)
1189 self._repo.dirstate.drop(f)
1185 self._repo.dirstate.setparents(node)
1190 self._repo.dirstate.setparents(node)
1186 self._repo.dirstate.endparentchange()
1191 self._repo.dirstate.endparentchange()
1187
1192
1188 def dirs(self):
1193 def dirs(self):
1189 return self._repo.dirstate.dirs()
1194 return self._repo.dirstate.dirs()
1190
1195
1191 class workingctx(committablectx):
1196 class workingctx(committablectx):
1192 """A workingctx object makes access to data related to
1197 """A workingctx object makes access to data related to
1193 the current working directory convenient.
1198 the current working directory convenient.
1194 date - any valid date string or (unixtime, offset), or None.
1199 date - any valid date string or (unixtime, offset), or None.
1195 user - username string, or None.
1200 user - username string, or None.
1196 extra - a dictionary of extra values, or None.
1201 extra - a dictionary of extra values, or None.
1197 changes - a list of file lists as returned by localrepo.status()
1202 changes - a list of file lists as returned by localrepo.status()
1198 or None to use the repository status.
1203 or None to use the repository status.
1199 """
1204 """
1200 def __init__(self, repo, text="", user=None, date=None, extra=None,
1205 def __init__(self, repo, text="", user=None, date=None, extra=None,
1201 changes=None):
1206 changes=None):
1202 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1207 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1203
1208
1204 def __iter__(self):
1209 def __iter__(self):
1205 d = self._repo.dirstate
1210 d = self._repo.dirstate
1206 for f in d:
1211 for f in d:
1207 if d[f] != 'r':
1212 if d[f] != 'r':
1208 yield f
1213 yield f
1209
1214
1210 def __contains__(self, key):
1215 def __contains__(self, key):
1211 return self._repo.dirstate[key] not in "?r"
1216 return self._repo.dirstate[key] not in "?r"
1212
1217
1213 @propertycache
1218 @propertycache
1214 def _parents(self):
1219 def _parents(self):
1215 p = self._repo.dirstate.parents()
1220 p = self._repo.dirstate.parents()
1216 if p[1] == nullid:
1221 if p[1] == nullid:
1217 p = p[:-1]
1222 p = p[:-1]
1218 return [changectx(self._repo, x) for x in p]
1223 return [changectx(self._repo, x) for x in p]
1219
1224
1220 def filectx(self, path, filelog=None):
1225 def filectx(self, path, filelog=None):
1221 """get a file context from the working directory"""
1226 """get a file context from the working directory"""
1222 return workingfilectx(self._repo, path, workingctx=self,
1227 return workingfilectx(self._repo, path, workingctx=self,
1223 filelog=filelog)
1228 filelog=filelog)
1224
1229
1225 def dirty(self, missing=False, merge=True, branch=True):
1230 def dirty(self, missing=False, merge=True, branch=True):
1226 "check whether a working directory is modified"
1231 "check whether a working directory is modified"
1227 # check subrepos first
1232 # check subrepos first
1228 for s in sorted(self.substate):
1233 for s in sorted(self.substate):
1229 if self.sub(s).dirty():
1234 if self.sub(s).dirty():
1230 return True
1235 return True
1231 # check current working dir
1236 # check current working dir
1232 return ((merge and self.p2()) or
1237 return ((merge and self.p2()) or
1233 (branch and self.branch() != self.p1().branch()) or
1238 (branch and self.branch() != self.p1().branch()) or
1234 self.modified() or self.added() or self.removed() or
1239 self.modified() or self.added() or self.removed() or
1235 (missing and self.deleted()))
1240 (missing and self.deleted()))
1236
1241
1237 def add(self, list, prefix=""):
1242 def add(self, list, prefix=""):
1238 join = lambda f: os.path.join(prefix, f)
1243 join = lambda f: os.path.join(prefix, f)
1239 wlock = self._repo.wlock()
1244 wlock = self._repo.wlock()
1240 ui, ds = self._repo.ui, self._repo.dirstate
1245 ui, ds = self._repo.ui, self._repo.dirstate
1241 try:
1246 try:
1242 rejected = []
1247 rejected = []
1243 lstat = self._repo.wvfs.lstat
1248 lstat = self._repo.wvfs.lstat
1244 for f in list:
1249 for f in list:
1245 scmutil.checkportable(ui, join(f))
1250 scmutil.checkportable(ui, join(f))
1246 try:
1251 try:
1247 st = lstat(f)
1252 st = lstat(f)
1248 except OSError:
1253 except OSError:
1249 ui.warn(_("%s does not exist!\n") % join(f))
1254 ui.warn(_("%s does not exist!\n") % join(f))
1250 rejected.append(f)
1255 rejected.append(f)
1251 continue
1256 continue
1252 if st.st_size > 10000000:
1257 if st.st_size > 10000000:
1253 ui.warn(_("%s: up to %d MB of RAM may be required "
1258 ui.warn(_("%s: up to %d MB of RAM may be required "
1254 "to manage this file\n"
1259 "to manage this file\n"
1255 "(use 'hg revert %s' to cancel the "
1260 "(use 'hg revert %s' to cancel the "
1256 "pending addition)\n")
1261 "pending addition)\n")
1257 % (f, 3 * st.st_size // 1000000, join(f)))
1262 % (f, 3 * st.st_size // 1000000, join(f)))
1258 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1263 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1259 ui.warn(_("%s not added: only files and symlinks "
1264 ui.warn(_("%s not added: only files and symlinks "
1260 "supported currently\n") % join(f))
1265 "supported currently\n") % join(f))
1261 rejected.append(f)
1266 rejected.append(f)
1262 elif ds[f] in 'amn':
1267 elif ds[f] in 'amn':
1263 ui.warn(_("%s already tracked!\n") % join(f))
1268 ui.warn(_("%s already tracked!\n") % join(f))
1264 elif ds[f] == 'r':
1269 elif ds[f] == 'r':
1265 ds.normallookup(f)
1270 ds.normallookup(f)
1266 else:
1271 else:
1267 ds.add(f)
1272 ds.add(f)
1268 return rejected
1273 return rejected
1269 finally:
1274 finally:
1270 wlock.release()
1275 wlock.release()
1271
1276
1272 def forget(self, files, prefix=""):
1277 def forget(self, files, prefix=""):
1273 join = lambda f: os.path.join(prefix, f)
1278 join = lambda f: os.path.join(prefix, f)
1274 wlock = self._repo.wlock()
1279 wlock = self._repo.wlock()
1275 try:
1280 try:
1276 rejected = []
1281 rejected = []
1277 for f in files:
1282 for f in files:
1278 if f not in self._repo.dirstate:
1283 if f not in self._repo.dirstate:
1279 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1284 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1280 rejected.append(f)
1285 rejected.append(f)
1281 elif self._repo.dirstate[f] != 'a':
1286 elif self._repo.dirstate[f] != 'a':
1282 self._repo.dirstate.remove(f)
1287 self._repo.dirstate.remove(f)
1283 else:
1288 else:
1284 self._repo.dirstate.drop(f)
1289 self._repo.dirstate.drop(f)
1285 return rejected
1290 return rejected
1286 finally:
1291 finally:
1287 wlock.release()
1292 wlock.release()
1288
1293
1289 def undelete(self, list):
1294 def undelete(self, list):
1290 pctxs = self.parents()
1295 pctxs = self.parents()
1291 wlock = self._repo.wlock()
1296 wlock = self._repo.wlock()
1292 try:
1297 try:
1293 for f in list:
1298 for f in list:
1294 if self._repo.dirstate[f] != 'r':
1299 if self._repo.dirstate[f] != 'r':
1295 self._repo.ui.warn(_("%s not removed!\n") % f)
1300 self._repo.ui.warn(_("%s not removed!\n") % f)
1296 else:
1301 else:
1297 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1302 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1298 t = fctx.data()
1303 t = fctx.data()
1299 self._repo.wwrite(f, t, fctx.flags())
1304 self._repo.wwrite(f, t, fctx.flags())
1300 self._repo.dirstate.normal(f)
1305 self._repo.dirstate.normal(f)
1301 finally:
1306 finally:
1302 wlock.release()
1307 wlock.release()
1303
1308
1304 def copy(self, source, dest):
1309 def copy(self, source, dest):
1305 try:
1310 try:
1306 st = self._repo.wvfs.lstat(dest)
1311 st = self._repo.wvfs.lstat(dest)
1307 except OSError, err:
1312 except OSError, err:
1308 if err.errno != errno.ENOENT:
1313 if err.errno != errno.ENOENT:
1309 raise
1314 raise
1310 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1315 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1311 return
1316 return
1312 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1317 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1313 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1318 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1314 "symbolic link\n") % dest)
1319 "symbolic link\n") % dest)
1315 else:
1320 else:
1316 wlock = self._repo.wlock()
1321 wlock = self._repo.wlock()
1317 try:
1322 try:
1318 if self._repo.dirstate[dest] in '?r':
1323 if self._repo.dirstate[dest] in '?r':
1319 self._repo.dirstate.add(dest)
1324 self._repo.dirstate.add(dest)
1320 self._repo.dirstate.copy(source, dest)
1325 self._repo.dirstate.copy(source, dest)
1321 finally:
1326 finally:
1322 wlock.release()
1327 wlock.release()
1323
1328
1324 def _filtersuspectsymlink(self, files):
1329 def _filtersuspectsymlink(self, files):
1325 if not files or self._repo.dirstate._checklink:
1330 if not files or self._repo.dirstate._checklink:
1326 return files
1331 return files
1327
1332
1328 # Symlink placeholders may get non-symlink-like contents
1333 # Symlink placeholders may get non-symlink-like contents
1329 # via user error or dereferencing by NFS or Samba servers,
1334 # via user error or dereferencing by NFS or Samba servers,
1330 # so we filter out any placeholders that don't look like a
1335 # so we filter out any placeholders that don't look like a
1331 # symlink
1336 # symlink
1332 sane = []
1337 sane = []
1333 for f in files:
1338 for f in files:
1334 if self.flags(f) == 'l':
1339 if self.flags(f) == 'l':
1335 d = self[f].data()
1340 d = self[f].data()
1336 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1341 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1337 self._repo.ui.debug('ignoring suspect symlink placeholder'
1342 self._repo.ui.debug('ignoring suspect symlink placeholder'
1338 ' "%s"\n' % f)
1343 ' "%s"\n' % f)
1339 continue
1344 continue
1340 sane.append(f)
1345 sane.append(f)
1341 return sane
1346 return sane
1342
1347
1343 def _checklookup(self, files):
1348 def _checklookup(self, files):
1344 # check for any possibly clean files
1349 # check for any possibly clean files
1345 if not files:
1350 if not files:
1346 return [], []
1351 return [], []
1347
1352
1348 modified = []
1353 modified = []
1349 fixup = []
1354 fixup = []
1350 pctx = self._parents[0]
1355 pctx = self._parents[0]
1351 # do a full compare of any files that might have changed
1356 # do a full compare of any files that might have changed
1352 for f in sorted(files):
1357 for f in sorted(files):
1353 if (f not in pctx or self.flags(f) != pctx.flags(f)
1358 if (f not in pctx or self.flags(f) != pctx.flags(f)
1354 or pctx[f].cmp(self[f])):
1359 or pctx[f].cmp(self[f])):
1355 modified.append(f)
1360 modified.append(f)
1356 else:
1361 else:
1357 fixup.append(f)
1362 fixup.append(f)
1358
1363
1359 # update dirstate for files that are actually clean
1364 # update dirstate for files that are actually clean
1360 if fixup:
1365 if fixup:
1361 try:
1366 try:
1362 # updating the dirstate is optional
1367 # updating the dirstate is optional
1363 # so we don't wait on the lock
1368 # so we don't wait on the lock
1364 # wlock can invalidate the dirstate, so cache normal _after_
1369 # wlock can invalidate the dirstate, so cache normal _after_
1365 # taking the lock
1370 # taking the lock
1366 wlock = self._repo.wlock(False)
1371 wlock = self._repo.wlock(False)
1367 normal = self._repo.dirstate.normal
1372 normal = self._repo.dirstate.normal
1368 try:
1373 try:
1369 for f in fixup:
1374 for f in fixup:
1370 normal(f)
1375 normal(f)
1371 finally:
1376 finally:
1372 wlock.release()
1377 wlock.release()
1373 except error.LockError:
1378 except error.LockError:
1374 pass
1379 pass
1375 return modified, fixup
1380 return modified, fixup
1376
1381
1377 def _manifestmatches(self, match, s):
1382 def _manifestmatches(self, match, s):
1378 """Slow path for workingctx
1383 """Slow path for workingctx
1379
1384
1380 The fast path is when we compare the working directory to its parent
1385 The fast path is when we compare the working directory to its parent
1381 which means this function is comparing with a non-parent; therefore we
1386 which means this function is comparing with a non-parent; therefore we
1382 need to build a manifest and return what matches.
1387 need to build a manifest and return what matches.
1383 """
1388 """
1384 mf = self._repo['.']._manifestmatches(match, s)
1389 mf = self._repo['.']._manifestmatches(match, s)
1385 modified, added, removed = s[0:3]
1390 modified, added, removed = s[0:3]
1386 for f in modified + added:
1391 for f in modified + added:
1387 mf[f] = None
1392 mf[f] = None
1388 mf.setflag(f, self.flags(f))
1393 mf.setflag(f, self.flags(f))
1389 for f in removed:
1394 for f in removed:
1390 if f in mf:
1395 if f in mf:
1391 del mf[f]
1396 del mf[f]
1392 return mf
1397 return mf
1393
1398
1394 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1399 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1395 unknown=False):
1400 unknown=False):
1396 '''Gets the status from the dirstate -- internal use only.'''
1401 '''Gets the status from the dirstate -- internal use only.'''
1397 listignored, listclean, listunknown = ignored, clean, unknown
1402 listignored, listclean, listunknown = ignored, clean, unknown
1398 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1403 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1399 subrepos = []
1404 subrepos = []
1400 if '.hgsub' in self:
1405 if '.hgsub' in self:
1401 subrepos = sorted(self.substate)
1406 subrepos = sorted(self.substate)
1402 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1407 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1403 listclean, listunknown)
1408 listclean, listunknown)
1404 modified, added, removed, deleted, unknown, ignored, clean = s
1409 modified, added, removed, deleted, unknown, ignored, clean = s
1405
1410
1406 # check for any possibly clean files
1411 # check for any possibly clean files
1407 if cmp:
1412 if cmp:
1408 modified2, fixup = self._checklookup(cmp)
1413 modified2, fixup = self._checklookup(cmp)
1409 modified += modified2
1414 modified += modified2
1410
1415
1411 # update dirstate for files that are actually clean
1416 # update dirstate for files that are actually clean
1412 if fixup and listclean:
1417 if fixup and listclean:
1413 clean += fixup
1418 clean += fixup
1414
1419
1415 return [modified, added, removed, deleted, unknown, ignored, clean]
1420 return [modified, added, removed, deleted, unknown, ignored, clean]
1416
1421
1417 def _buildstatus(self, other, s, match, listignored, listclean,
1422 def _buildstatus(self, other, s, match, listignored, listclean,
1418 listunknown):
1423 listunknown):
1419 """build a status with respect to another context
1424 """build a status with respect to another context
1420
1425
1421 This includes logic for maintaining the fast path of status when
1426 This includes logic for maintaining the fast path of status when
1422 comparing the working directory against its parent, which is to skip
1427 comparing the working directory against its parent, which is to skip
1423 building a new manifest if self (working directory) is not comparing
1428 building a new manifest if self (working directory) is not comparing
1424 against its parent (repo['.']).
1429 against its parent (repo['.']).
1425 """
1430 """
1426 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1431 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1427 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1432 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1428 # might have accidentally ended up with the entire contents of the file
1433 # might have accidentally ended up with the entire contents of the file
1429 # they are susposed to be linking to.
1434 # they are susposed to be linking to.
1430 s[0] = self._filtersuspectsymlink(s[0])
1435 s[0] = self._filtersuspectsymlink(s[0])
1431 if other != self._repo['.']:
1436 if other != self._repo['.']:
1432 s = super(workingctx, self)._buildstatus(other, s, match,
1437 s = super(workingctx, self)._buildstatus(other, s, match,
1433 listignored, listclean,
1438 listignored, listclean,
1434 listunknown)
1439 listunknown)
1435 self._status = scmutil.status(*s)
1440 self._status = scmutil.status(*s)
1436 return s
1441 return s
1437
1442
1438 def _matchstatus(self, other, match):
1443 def _matchstatus(self, other, match):
1439 """override the match method with a filter for directory patterns
1444 """override the match method with a filter for directory patterns
1440
1445
1441 We use inheritance to customize the match.bad method only in cases of
1446 We use inheritance to customize the match.bad method only in cases of
1442 workingctx since it belongs only to the working directory when
1447 workingctx since it belongs only to the working directory when
1443 comparing against the parent changeset.
1448 comparing against the parent changeset.
1444
1449
1445 If we aren't comparing against the working directory's parent, then we
1450 If we aren't comparing against the working directory's parent, then we
1446 just use the default match object sent to us.
1451 just use the default match object sent to us.
1447 """
1452 """
1448 superself = super(workingctx, self)
1453 superself = super(workingctx, self)
1449 match = superself._matchstatus(other, match)
1454 match = superself._matchstatus(other, match)
1450 if other != self._repo['.']:
1455 if other != self._repo['.']:
1451 def bad(f, msg):
1456 def bad(f, msg):
1452 # 'f' may be a directory pattern from 'match.files()',
1457 # 'f' may be a directory pattern from 'match.files()',
1453 # so 'f not in ctx1' is not enough
1458 # so 'f not in ctx1' is not enough
1454 if f not in other and f not in other.dirs():
1459 if f not in other and f not in other.dirs():
1455 self._repo.ui.warn('%s: %s\n' %
1460 self._repo.ui.warn('%s: %s\n' %
1456 (self._repo.dirstate.pathto(f), msg))
1461 (self._repo.dirstate.pathto(f), msg))
1457 match.bad = bad
1462 match.bad = bad
1458 return match
1463 return match
1459
1464
1460 class committablefilectx(basefilectx):
1465 class committablefilectx(basefilectx):
1461 """A committablefilectx provides common functionality for a file context
1466 """A committablefilectx provides common functionality for a file context
1462 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1467 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1463 def __init__(self, repo, path, filelog=None, ctx=None):
1468 def __init__(self, repo, path, filelog=None, ctx=None):
1464 self._repo = repo
1469 self._repo = repo
1465 self._path = path
1470 self._path = path
1466 self._changeid = None
1471 self._changeid = None
1467 self._filerev = self._filenode = None
1472 self._filerev = self._filenode = None
1468
1473
1469 if filelog is not None:
1474 if filelog is not None:
1470 self._filelog = filelog
1475 self._filelog = filelog
1471 if ctx:
1476 if ctx:
1472 self._changectx = ctx
1477 self._changectx = ctx
1473
1478
1474 def __nonzero__(self):
1479 def __nonzero__(self):
1475 return True
1480 return True
1476
1481
1477 def parents(self):
1482 def parents(self):
1478 '''return parent filectxs, following copies if necessary'''
1483 '''return parent filectxs, following copies if necessary'''
1479 def filenode(ctx, path):
1484 def filenode(ctx, path):
1480 return ctx._manifest.get(path, nullid)
1485 return ctx._manifest.get(path, nullid)
1481
1486
1482 path = self._path
1487 path = self._path
1483 fl = self._filelog
1488 fl = self._filelog
1484 pcl = self._changectx._parents
1489 pcl = self._changectx._parents
1485 renamed = self.renamed()
1490 renamed = self.renamed()
1486
1491
1487 if renamed:
1492 if renamed:
1488 pl = [renamed + (None,)]
1493 pl = [renamed + (None,)]
1489 else:
1494 else:
1490 pl = [(path, filenode(pcl[0], path), fl)]
1495 pl = [(path, filenode(pcl[0], path), fl)]
1491
1496
1492 for pc in pcl[1:]:
1497 for pc in pcl[1:]:
1493 pl.append((path, filenode(pc, path), fl))
1498 pl.append((path, filenode(pc, path), fl))
1494
1499
1495 return [filectx(self._repo, p, fileid=n, filelog=l)
1500 return [filectx(self._repo, p, fileid=n, filelog=l)
1496 for p, n, l in pl if n != nullid]
1501 for p, n, l in pl if n != nullid]
1497
1502
1498 def children(self):
1503 def children(self):
1499 return []
1504 return []
1500
1505
1501 class workingfilectx(committablefilectx):
1506 class workingfilectx(committablefilectx):
1502 """A workingfilectx object makes access to data related to a particular
1507 """A workingfilectx object makes access to data related to a particular
1503 file in the working directory convenient."""
1508 file in the working directory convenient."""
1504 def __init__(self, repo, path, filelog=None, workingctx=None):
1509 def __init__(self, repo, path, filelog=None, workingctx=None):
1505 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1510 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1506
1511
1507 @propertycache
1512 @propertycache
1508 def _changectx(self):
1513 def _changectx(self):
1509 return workingctx(self._repo)
1514 return workingctx(self._repo)
1510
1515
1511 def data(self):
1516 def data(self):
1512 return self._repo.wread(self._path)
1517 return self._repo.wread(self._path)
1513 def renamed(self):
1518 def renamed(self):
1514 rp = self._repo.dirstate.copied(self._path)
1519 rp = self._repo.dirstate.copied(self._path)
1515 if not rp:
1520 if not rp:
1516 return None
1521 return None
1517 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1522 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1518
1523
1519 def size(self):
1524 def size(self):
1520 return self._repo.wvfs.lstat(self._path).st_size
1525 return self._repo.wvfs.lstat(self._path).st_size
1521 def date(self):
1526 def date(self):
1522 t, tz = self._changectx.date()
1527 t, tz = self._changectx.date()
1523 try:
1528 try:
1524 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1529 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1525 except OSError, err:
1530 except OSError, err:
1526 if err.errno != errno.ENOENT:
1531 if err.errno != errno.ENOENT:
1527 raise
1532 raise
1528 return (t, tz)
1533 return (t, tz)
1529
1534
1530 def cmp(self, fctx):
1535 def cmp(self, fctx):
1531 """compare with other file context
1536 """compare with other file context
1532
1537
1533 returns True if different than fctx.
1538 returns True if different than fctx.
1534 """
1539 """
1535 # fctx should be a filectx (not a workingfilectx)
1540 # fctx should be a filectx (not a workingfilectx)
1536 # invert comparison to reuse the same code path
1541 # invert comparison to reuse the same code path
1537 return fctx.cmp(self)
1542 return fctx.cmp(self)
1538
1543
1539 def remove(self, ignoremissing=False):
1544 def remove(self, ignoremissing=False):
1540 """wraps unlink for a repo's working directory"""
1545 """wraps unlink for a repo's working directory"""
1541 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1546 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1542
1547
1543 def write(self, data, flags):
1548 def write(self, data, flags):
1544 """wraps repo.wwrite"""
1549 """wraps repo.wwrite"""
1545 self._repo.wwrite(self._path, data, flags)
1550 self._repo.wwrite(self._path, data, flags)
1546
1551
1547 class memctx(committablectx):
1552 class memctx(committablectx):
1548 """Use memctx to perform in-memory commits via localrepo.commitctx().
1553 """Use memctx to perform in-memory commits via localrepo.commitctx().
1549
1554
1550 Revision information is supplied at initialization time while
1555 Revision information is supplied at initialization time while
1551 related files data and is made available through a callback
1556 related files data and is made available through a callback
1552 mechanism. 'repo' is the current localrepo, 'parents' is a
1557 mechanism. 'repo' is the current localrepo, 'parents' is a
1553 sequence of two parent revisions identifiers (pass None for every
1558 sequence of two parent revisions identifiers (pass None for every
1554 missing parent), 'text' is the commit message and 'files' lists
1559 missing parent), 'text' is the commit message and 'files' lists
1555 names of files touched by the revision (normalized and relative to
1560 names of files touched by the revision (normalized and relative to
1556 repository root).
1561 repository root).
1557
1562
1558 filectxfn(repo, memctx, path) is a callable receiving the
1563 filectxfn(repo, memctx, path) is a callable receiving the
1559 repository, the current memctx object and the normalized path of
1564 repository, the current memctx object and the normalized path of
1560 requested file, relative to repository root. It is fired by the
1565 requested file, relative to repository root. It is fired by the
1561 commit function for every file in 'files', but calls order is
1566 commit function for every file in 'files', but calls order is
1562 undefined. If the file is available in the revision being
1567 undefined. If the file is available in the revision being
1563 committed (updated or added), filectxfn returns a memfilectx
1568 committed (updated or added), filectxfn returns a memfilectx
1564 object. If the file was removed, filectxfn raises an
1569 object. If the file was removed, filectxfn raises an
1565 IOError. Moved files are represented by marking the source file
1570 IOError. Moved files are represented by marking the source file
1566 removed and the new file added with copy information (see
1571 removed and the new file added with copy information (see
1567 memfilectx).
1572 memfilectx).
1568
1573
1569 user receives the committer name and defaults to current
1574 user receives the committer name and defaults to current
1570 repository username, date is the commit date in any format
1575 repository username, date is the commit date in any format
1571 supported by util.parsedate() and defaults to current date, extra
1576 supported by util.parsedate() and defaults to current date, extra
1572 is a dictionary of metadata or is left empty.
1577 is a dictionary of metadata or is left empty.
1573 """
1578 """
1574
1579
1575 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1580 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1576 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1581 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1577 # this field to determine what to do in filectxfn.
1582 # this field to determine what to do in filectxfn.
1578 _returnnoneformissingfiles = True
1583 _returnnoneformissingfiles = True
1579
1584
1580 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1585 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1581 date=None, extra=None, editor=False):
1586 date=None, extra=None, editor=False):
1582 super(memctx, self).__init__(repo, text, user, date, extra)
1587 super(memctx, self).__init__(repo, text, user, date, extra)
1583 self._rev = None
1588 self._rev = None
1584 self._node = None
1589 self._node = None
1585 parents = [(p or nullid) for p in parents]
1590 parents = [(p or nullid) for p in parents]
1586 p1, p2 = parents
1591 p1, p2 = parents
1587 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1592 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1588 files = sorted(set(files))
1593 files = sorted(set(files))
1589 self._status = scmutil.status(files, [], [], [], [], [], [])
1594 self._status = scmutil.status(files, [], [], [], [], [], [])
1590 self._filectxfn = filectxfn
1595 self._filectxfn = filectxfn
1591 self.substate = {}
1596 self.substate = {}
1592
1597
1593 # if store is not callable, wrap it in a function
1598 # if store is not callable, wrap it in a function
1594 if not callable(filectxfn):
1599 if not callable(filectxfn):
1595 def getfilectx(repo, memctx, path):
1600 def getfilectx(repo, memctx, path):
1596 fctx = filectxfn[path]
1601 fctx = filectxfn[path]
1597 # this is weird but apparently we only keep track of one parent
1602 # this is weird but apparently we only keep track of one parent
1598 # (why not only store that instead of a tuple?)
1603 # (why not only store that instead of a tuple?)
1599 copied = fctx.renamed()
1604 copied = fctx.renamed()
1600 if copied:
1605 if copied:
1601 copied = copied[0]
1606 copied = copied[0]
1602 return memfilectx(repo, path, fctx.data(),
1607 return memfilectx(repo, path, fctx.data(),
1603 islink=fctx.islink(), isexec=fctx.isexec(),
1608 islink=fctx.islink(), isexec=fctx.isexec(),
1604 copied=copied, memctx=memctx)
1609 copied=copied, memctx=memctx)
1605 self._filectxfn = getfilectx
1610 self._filectxfn = getfilectx
1606
1611
1607 self._extra = extra and extra.copy() or {}
1612 self._extra = extra and extra.copy() or {}
1608 if self._extra.get('branch', '') == '':
1613 if self._extra.get('branch', '') == '':
1609 self._extra['branch'] = 'default'
1614 self._extra['branch'] = 'default'
1610
1615
1611 if editor:
1616 if editor:
1612 self._text = editor(self._repo, self, [])
1617 self._text = editor(self._repo, self, [])
1613 self._repo.savecommitmessage(self._text)
1618 self._repo.savecommitmessage(self._text)
1614
1619
1615 def filectx(self, path, filelog=None):
1620 def filectx(self, path, filelog=None):
1616 """get a file context from the working directory
1621 """get a file context from the working directory
1617
1622
1618 Returns None if file doesn't exist and should be removed."""
1623 Returns None if file doesn't exist and should be removed."""
1619 return self._filectxfn(self._repo, self, path)
1624 return self._filectxfn(self._repo, self, path)
1620
1625
1621 def commit(self):
1626 def commit(self):
1622 """commit context to the repo"""
1627 """commit context to the repo"""
1623 return self._repo.commitctx(self)
1628 return self._repo.commitctx(self)
1624
1629
1625 @propertycache
1630 @propertycache
1626 def _manifest(self):
1631 def _manifest(self):
1627 """generate a manifest based on the return values of filectxfn"""
1632 """generate a manifest based on the return values of filectxfn"""
1628
1633
1629 # keep this simple for now; just worry about p1
1634 # keep this simple for now; just worry about p1
1630 pctx = self._parents[0]
1635 pctx = self._parents[0]
1631 man = pctx.manifest().copy()
1636 man = pctx.manifest().copy()
1632
1637
1633 for f, fnode in man.iteritems():
1638 for f, fnode in man.iteritems():
1634 p1node = nullid
1639 p1node = nullid
1635 p2node = nullid
1640 p2node = nullid
1636 p = pctx[f].parents() # if file isn't in pctx, check p2?
1641 p = pctx[f].parents() # if file isn't in pctx, check p2?
1637 if len(p) > 0:
1642 if len(p) > 0:
1638 p1node = p[0].node()
1643 p1node = p[0].node()
1639 if len(p) > 1:
1644 if len(p) > 1:
1640 p2node = p[1].node()
1645 p2node = p[1].node()
1641 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1646 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1642
1647
1643 return man
1648 return man
1644
1649
1645
1650
1646 class memfilectx(committablefilectx):
1651 class memfilectx(committablefilectx):
1647 """memfilectx represents an in-memory file to commit.
1652 """memfilectx represents an in-memory file to commit.
1648
1653
1649 See memctx and committablefilectx for more details.
1654 See memctx and committablefilectx for more details.
1650 """
1655 """
1651 def __init__(self, repo, path, data, islink=False,
1656 def __init__(self, repo, path, data, islink=False,
1652 isexec=False, copied=None, memctx=None):
1657 isexec=False, copied=None, memctx=None):
1653 """
1658 """
1654 path is the normalized file path relative to repository root.
1659 path is the normalized file path relative to repository root.
1655 data is the file content as a string.
1660 data is the file content as a string.
1656 islink is True if the file is a symbolic link.
1661 islink is True if the file is a symbolic link.
1657 isexec is True if the file is executable.
1662 isexec is True if the file is executable.
1658 copied is the source file path if current file was copied in the
1663 copied is the source file path if current file was copied in the
1659 revision being committed, or None."""
1664 revision being committed, or None."""
1660 super(memfilectx, self).__init__(repo, path, None, memctx)
1665 super(memfilectx, self).__init__(repo, path, None, memctx)
1661 self._data = data
1666 self._data = data
1662 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1667 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1663 self._copied = None
1668 self._copied = None
1664 if copied:
1669 if copied:
1665 self._copied = (copied, nullid)
1670 self._copied = (copied, nullid)
1666
1671
1667 def data(self):
1672 def data(self):
1668 return self._data
1673 return self._data
1669 def size(self):
1674 def size(self):
1670 return len(self.data())
1675 return len(self.data())
1671 def flags(self):
1676 def flags(self):
1672 return self._flags
1677 return self._flags
1673 def renamed(self):
1678 def renamed(self):
1674 return self._copied
1679 return self._copied
1675
1680
1676 def remove(self, ignoremissing=False):
1681 def remove(self, ignoremissing=False):
1677 """wraps unlink for a repo's working directory"""
1682 """wraps unlink for a repo's working directory"""
1678 # need to figure out what to do here
1683 # need to figure out what to do here
1679 del self._changectx[self._path]
1684 del self._changectx[self._path]
1680
1685
1681 def write(self, data, flags):
1686 def write(self, data, flags):
1682 """wraps repo.wwrite"""
1687 """wraps repo.wwrite"""
1683 self._data = data
1688 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now