##// END OF EJS Templates
manifest: add matches() method...
Martin von Zweigbergk -
r23305:0cc283f4 default
parent child Browse files
Show More
@@ -1,1688 +1,1676 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class basectx(object):
20 class basectx(object):
21 """A basectx object represents the common logic for its children:
21 """A basectx object represents the common logic for its children:
22 changectx: read-only context that is already present in the repo,
22 changectx: read-only context that is already present in the repo,
23 workingctx: a context that represents the working directory and can
23 workingctx: a context that represents the working directory and can
24 be committed,
24 be committed,
25 memctx: a context that represents changes in-memory and can also
25 memctx: a context that represents changes in-memory and can also
26 be committed."""
26 be committed."""
27 def __new__(cls, repo, changeid='', *args, **kwargs):
27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 if isinstance(changeid, basectx):
28 if isinstance(changeid, basectx):
29 return changeid
29 return changeid
30
30
31 o = super(basectx, cls).__new__(cls)
31 o = super(basectx, cls).__new__(cls)
32
32
33 o._repo = repo
33 o._repo = repo
34 o._rev = nullrev
34 o._rev = nullrev
35 o._node = nullid
35 o._node = nullid
36
36
37 return o
37 return o
38
38
39 def __str__(self):
39 def __str__(self):
40 return short(self.node())
40 return short(self.node())
41
41
42 def __int__(self):
42 def __int__(self):
43 return self.rev()
43 return self.rev()
44
44
45 def __repr__(self):
45 def __repr__(self):
46 return "<%s %s>" % (type(self).__name__, str(self))
46 return "<%s %s>" % (type(self).__name__, str(self))
47
47
48 def __eq__(self, other):
48 def __eq__(self, other):
49 try:
49 try:
50 return type(self) == type(other) and self._rev == other._rev
50 return type(self) == type(other) and self._rev == other._rev
51 except AttributeError:
51 except AttributeError:
52 return False
52 return False
53
53
54 def __ne__(self, other):
54 def __ne__(self, other):
55 return not (self == other)
55 return not (self == other)
56
56
57 def __contains__(self, key):
57 def __contains__(self, key):
58 return key in self._manifest
58 return key in self._manifest
59
59
60 def __getitem__(self, key):
60 def __getitem__(self, key):
61 return self.filectx(key)
61 return self.filectx(key)
62
62
63 def __iter__(self):
63 def __iter__(self):
64 for f in sorted(self._manifest):
64 for f in sorted(self._manifest):
65 yield f
65 yield f
66
66
67 def _manifestmatches(self, match, s):
67 def _manifestmatches(self, match, s):
68 """generate a new manifest filtered by the match argument
68 """generate a new manifest filtered by the match argument
69
69
70 This method is for internal use only and mainly exists to provide an
70 This method is for internal use only and mainly exists to provide an
71 object oriented way for other contexts to customize the manifest
71 object oriented way for other contexts to customize the manifest
72 generation.
72 generation.
73 """
73 """
74 if match.always():
74 return self.manifest().matches(match)
75 return self.manifest().copy()
76
77 files = match.files()
78 if (match.matchfn == match.exact or
79 (not match.anypats() and util.all(fn in self for fn in files))):
80 return self.manifest().intersectfiles(files)
81
82 mf = self.manifest().copy()
83 for fn in mf.keys():
84 if not match(fn):
85 del mf[fn]
86 return mf
87
75
88 def _matchstatus(self, other, match):
76 def _matchstatus(self, other, match):
89 """return match.always if match is none
77 """return match.always if match is none
90
78
91 This internal method provides a way for child objects to override the
79 This internal method provides a way for child objects to override the
92 match operator.
80 match operator.
93 """
81 """
94 return match or matchmod.always(self._repo.root, self._repo.getcwd())
82 return match or matchmod.always(self._repo.root, self._repo.getcwd())
95
83
96 def _buildstatus(self, other, s, match, listignored, listclean,
84 def _buildstatus(self, other, s, match, listignored, listclean,
97 listunknown):
85 listunknown):
98 """build a status with respect to another context"""
86 """build a status with respect to another context"""
99 # Load earliest manifest first for caching reasons. More specifically,
87 # Load earliest manifest first for caching reasons. More specifically,
100 # if you have revisions 1000 and 1001, 1001 is probably stored as a
88 # if you have revisions 1000 and 1001, 1001 is probably stored as a
101 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
89 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
102 # 1000 and cache it so that when you read 1001, we just need to apply a
90 # 1000 and cache it so that when you read 1001, we just need to apply a
103 # delta to what's in the cache. So that's one full reconstruction + one
91 # delta to what's in the cache. So that's one full reconstruction + one
104 # delta application.
92 # delta application.
105 if self.rev() is not None and self.rev() < other.rev():
93 if self.rev() is not None and self.rev() < other.rev():
106 self.manifest()
94 self.manifest()
107 mf1 = other._manifestmatches(match, s)
95 mf1 = other._manifestmatches(match, s)
108 mf2 = self._manifestmatches(match, s)
96 mf2 = self._manifestmatches(match, s)
109
97
110 modified, added, clean = [], [], []
98 modified, added, clean = [], [], []
111 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
99 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
112 deletedset = set(deleted)
100 deletedset = set(deleted)
113 withflags = mf1.withflags() | mf2.withflags()
101 withflags = mf1.withflags() | mf2.withflags()
114 for fn, mf2node in mf2.iteritems():
102 for fn, mf2node in mf2.iteritems():
115 if fn in mf1:
103 if fn in mf1:
116 if (fn not in deletedset and
104 if (fn not in deletedset and
117 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
105 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
118 (mf1[fn] != mf2node and
106 (mf1[fn] != mf2node and
119 (mf2node or self[fn].cmp(other[fn]))))):
107 (mf2node or self[fn].cmp(other[fn]))))):
120 modified.append(fn)
108 modified.append(fn)
121 elif listclean:
109 elif listclean:
122 clean.append(fn)
110 clean.append(fn)
123 del mf1[fn]
111 del mf1[fn]
124 elif fn not in deletedset:
112 elif fn not in deletedset:
125 added.append(fn)
113 added.append(fn)
126 removed = mf1.keys()
114 removed = mf1.keys()
127 if removed:
115 if removed:
128 # need to filter files if they are already reported as removed
116 # need to filter files if they are already reported as removed
129 unknown = [fn for fn in unknown if fn not in mf1]
117 unknown = [fn for fn in unknown if fn not in mf1]
130 ignored = [fn for fn in ignored if fn not in mf1]
118 ignored = [fn for fn in ignored if fn not in mf1]
131
119
132 return scmutil.status(modified, added, removed, deleted, unknown,
120 return scmutil.status(modified, added, removed, deleted, unknown,
133 ignored, clean)
121 ignored, clean)
134
122
135 @propertycache
123 @propertycache
136 def substate(self):
124 def substate(self):
137 return subrepo.state(self, self._repo.ui)
125 return subrepo.state(self, self._repo.ui)
138
126
139 def subrev(self, subpath):
127 def subrev(self, subpath):
140 return self.substate[subpath][1]
128 return self.substate[subpath][1]
141
129
142 def rev(self):
130 def rev(self):
143 return self._rev
131 return self._rev
144 def node(self):
132 def node(self):
145 return self._node
133 return self._node
146 def hex(self):
134 def hex(self):
147 return hex(self.node())
135 return hex(self.node())
148 def manifest(self):
136 def manifest(self):
149 return self._manifest
137 return self._manifest
150 def phasestr(self):
138 def phasestr(self):
151 return phases.phasenames[self.phase()]
139 return phases.phasenames[self.phase()]
152 def mutable(self):
140 def mutable(self):
153 return self.phase() > phases.public
141 return self.phase() > phases.public
154
142
155 def getfileset(self, expr):
143 def getfileset(self, expr):
156 return fileset.getfileset(self, expr)
144 return fileset.getfileset(self, expr)
157
145
158 def obsolete(self):
146 def obsolete(self):
159 """True if the changeset is obsolete"""
147 """True if the changeset is obsolete"""
160 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
148 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
161
149
162 def extinct(self):
150 def extinct(self):
163 """True if the changeset is extinct"""
151 """True if the changeset is extinct"""
164 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
152 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
165
153
166 def unstable(self):
154 def unstable(self):
167 """True if the changeset is not obsolete but it's ancestor are"""
155 """True if the changeset is not obsolete but it's ancestor are"""
168 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
156 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
169
157
170 def bumped(self):
158 def bumped(self):
171 """True if the changeset try to be a successor of a public changeset
159 """True if the changeset try to be a successor of a public changeset
172
160
173 Only non-public and non-obsolete changesets may be bumped.
161 Only non-public and non-obsolete changesets may be bumped.
174 """
162 """
175 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
163 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
176
164
177 def divergent(self):
165 def divergent(self):
178 """Is a successors of a changeset with multiple possible successors set
166 """Is a successors of a changeset with multiple possible successors set
179
167
180 Only non-public and non-obsolete changesets may be divergent.
168 Only non-public and non-obsolete changesets may be divergent.
181 """
169 """
182 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
170 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
183
171
184 def troubled(self):
172 def troubled(self):
185 """True if the changeset is either unstable, bumped or divergent"""
173 """True if the changeset is either unstable, bumped or divergent"""
186 return self.unstable() or self.bumped() or self.divergent()
174 return self.unstable() or self.bumped() or self.divergent()
187
175
188 def troubles(self):
176 def troubles(self):
189 """return the list of troubles affecting this changesets.
177 """return the list of troubles affecting this changesets.
190
178
191 Troubles are returned as strings. possible values are:
179 Troubles are returned as strings. possible values are:
192 - unstable,
180 - unstable,
193 - bumped,
181 - bumped,
194 - divergent.
182 - divergent.
195 """
183 """
196 troubles = []
184 troubles = []
197 if self.unstable():
185 if self.unstable():
198 troubles.append('unstable')
186 troubles.append('unstable')
199 if self.bumped():
187 if self.bumped():
200 troubles.append('bumped')
188 troubles.append('bumped')
201 if self.divergent():
189 if self.divergent():
202 troubles.append('divergent')
190 troubles.append('divergent')
203 return troubles
191 return troubles
204
192
205 def parents(self):
193 def parents(self):
206 """return contexts for each parent changeset"""
194 """return contexts for each parent changeset"""
207 return self._parents
195 return self._parents
208
196
209 def p1(self):
197 def p1(self):
210 return self._parents[0]
198 return self._parents[0]
211
199
212 def p2(self):
200 def p2(self):
213 if len(self._parents) == 2:
201 if len(self._parents) == 2:
214 return self._parents[1]
202 return self._parents[1]
215 return changectx(self._repo, -1)
203 return changectx(self._repo, -1)
216
204
217 def _fileinfo(self, path):
205 def _fileinfo(self, path):
218 if '_manifest' in self.__dict__:
206 if '_manifest' in self.__dict__:
219 try:
207 try:
220 return self._manifest[path], self._manifest.flags(path)
208 return self._manifest[path], self._manifest.flags(path)
221 except KeyError:
209 except KeyError:
222 raise error.ManifestLookupError(self._node, path,
210 raise error.ManifestLookupError(self._node, path,
223 _('not found in manifest'))
211 _('not found in manifest'))
224 if '_manifestdelta' in self.__dict__ or path in self.files():
212 if '_manifestdelta' in self.__dict__ or path in self.files():
225 if path in self._manifestdelta:
213 if path in self._manifestdelta:
226 return (self._manifestdelta[path],
214 return (self._manifestdelta[path],
227 self._manifestdelta.flags(path))
215 self._manifestdelta.flags(path))
228 node, flag = self._repo.manifest.find(self._changeset[0], path)
216 node, flag = self._repo.manifest.find(self._changeset[0], path)
229 if not node:
217 if not node:
230 raise error.ManifestLookupError(self._node, path,
218 raise error.ManifestLookupError(self._node, path,
231 _('not found in manifest'))
219 _('not found in manifest'))
232
220
233 return node, flag
221 return node, flag
234
222
235 def filenode(self, path):
223 def filenode(self, path):
236 return self._fileinfo(path)[0]
224 return self._fileinfo(path)[0]
237
225
238 def flags(self, path):
226 def flags(self, path):
239 try:
227 try:
240 return self._fileinfo(path)[1]
228 return self._fileinfo(path)[1]
241 except error.LookupError:
229 except error.LookupError:
242 return ''
230 return ''
243
231
244 def sub(self, path):
232 def sub(self, path):
245 return subrepo.subrepo(self, path)
233 return subrepo.subrepo(self, path)
246
234
247 def match(self, pats=[], include=None, exclude=None, default='glob'):
235 def match(self, pats=[], include=None, exclude=None, default='glob'):
248 r = self._repo
236 r = self._repo
249 return matchmod.match(r.root, r.getcwd(), pats,
237 return matchmod.match(r.root, r.getcwd(), pats,
250 include, exclude, default,
238 include, exclude, default,
251 auditor=r.auditor, ctx=self)
239 auditor=r.auditor, ctx=self)
252
240
253 def diff(self, ctx2=None, match=None, **opts):
241 def diff(self, ctx2=None, match=None, **opts):
254 """Returns a diff generator for the given contexts and matcher"""
242 """Returns a diff generator for the given contexts and matcher"""
255 if ctx2 is None:
243 if ctx2 is None:
256 ctx2 = self.p1()
244 ctx2 = self.p1()
257 if ctx2 is not None:
245 if ctx2 is not None:
258 ctx2 = self._repo[ctx2]
246 ctx2 = self._repo[ctx2]
259 diffopts = patch.diffopts(self._repo.ui, opts)
247 diffopts = patch.diffopts(self._repo.ui, opts)
260 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
248 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
261
249
262 @propertycache
250 @propertycache
263 def _dirs(self):
251 def _dirs(self):
264 return scmutil.dirs(self._manifest)
252 return scmutil.dirs(self._manifest)
265
253
266 def dirs(self):
254 def dirs(self):
267 return self._dirs
255 return self._dirs
268
256
269 def dirty(self, missing=False, merge=True, branch=True):
257 def dirty(self, missing=False, merge=True, branch=True):
270 return False
258 return False
271
259
272 def status(self, other=None, match=None, listignored=False,
260 def status(self, other=None, match=None, listignored=False,
273 listclean=False, listunknown=False, listsubrepos=False):
261 listclean=False, listunknown=False, listsubrepos=False):
274 """return status of files between two nodes or node and working
262 """return status of files between two nodes or node and working
275 directory.
263 directory.
276
264
277 If other is None, compare this node with working directory.
265 If other is None, compare this node with working directory.
278
266
279 returns (modified, added, removed, deleted, unknown, ignored, clean)
267 returns (modified, added, removed, deleted, unknown, ignored, clean)
280 """
268 """
281
269
282 ctx1 = self
270 ctx1 = self
283 ctx2 = self._repo[other]
271 ctx2 = self._repo[other]
284
272
285 # This next code block is, admittedly, fragile logic that tests for
273 # This next code block is, admittedly, fragile logic that tests for
286 # reversing the contexts and wouldn't need to exist if it weren't for
274 # reversing the contexts and wouldn't need to exist if it weren't for
287 # the fast (and common) code path of comparing the working directory
275 # the fast (and common) code path of comparing the working directory
288 # with its first parent.
276 # with its first parent.
289 #
277 #
290 # What we're aiming for here is the ability to call:
278 # What we're aiming for here is the ability to call:
291 #
279 #
292 # workingctx.status(parentctx)
280 # workingctx.status(parentctx)
293 #
281 #
294 # If we always built the manifest for each context and compared those,
282 # If we always built the manifest for each context and compared those,
295 # then we'd be done. But the special case of the above call means we
283 # then we'd be done. But the special case of the above call means we
296 # just copy the manifest of the parent.
284 # just copy the manifest of the parent.
297 reversed = False
285 reversed = False
298 if (not isinstance(ctx1, changectx)
286 if (not isinstance(ctx1, changectx)
299 and isinstance(ctx2, changectx)):
287 and isinstance(ctx2, changectx)):
300 reversed = True
288 reversed = True
301 ctx1, ctx2 = ctx2, ctx1
289 ctx1, ctx2 = ctx2, ctx1
302
290
303 match = ctx2._matchstatus(ctx1, match)
291 match = ctx2._matchstatus(ctx1, match)
304 r = scmutil.status([], [], [], [], [], [], [])
292 r = scmutil.status([], [], [], [], [], [], [])
305 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
293 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
306 listunknown)
294 listunknown)
307
295
308 if reversed:
296 if reversed:
309 # Reverse added and removed. Clear deleted, unknown and ignored as
297 # Reverse added and removed. Clear deleted, unknown and ignored as
310 # these make no sense to reverse.
298 # these make no sense to reverse.
311 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
299 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
312 r.clean)
300 r.clean)
313
301
314 if listsubrepos:
302 if listsubrepos:
315 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
303 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
316 rev2 = ctx2.subrev(subpath)
304 rev2 = ctx2.subrev(subpath)
317 try:
305 try:
318 submatch = matchmod.narrowmatcher(subpath, match)
306 submatch = matchmod.narrowmatcher(subpath, match)
319 s = sub.status(rev2, match=submatch, ignored=listignored,
307 s = sub.status(rev2, match=submatch, ignored=listignored,
320 clean=listclean, unknown=listunknown,
308 clean=listclean, unknown=listunknown,
321 listsubrepos=True)
309 listsubrepos=True)
322 for rfiles, sfiles in zip(r, s):
310 for rfiles, sfiles in zip(r, s):
323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
311 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
324 except error.LookupError:
312 except error.LookupError:
325 self._repo.ui.status(_("skipping missing "
313 self._repo.ui.status(_("skipping missing "
326 "subrepository: %s\n") % subpath)
314 "subrepository: %s\n") % subpath)
327
315
328 for l in r:
316 for l in r:
329 l.sort()
317 l.sort()
330
318
331 return r
319 return r
332
320
333
321
334 def makememctx(repo, parents, text, user, date, branch, files, store,
322 def makememctx(repo, parents, text, user, date, branch, files, store,
335 editor=None):
323 editor=None):
336 def getfilectx(repo, memctx, path):
324 def getfilectx(repo, memctx, path):
337 data, mode, copied = store.getfile(path)
325 data, mode, copied = store.getfile(path)
338 if data is None:
326 if data is None:
339 return None
327 return None
340 islink, isexec = mode
328 islink, isexec = mode
341 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
329 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
342 copied=copied, memctx=memctx)
330 copied=copied, memctx=memctx)
343 extra = {}
331 extra = {}
344 if branch:
332 if branch:
345 extra['branch'] = encoding.fromlocal(branch)
333 extra['branch'] = encoding.fromlocal(branch)
346 ctx = memctx(repo, parents, text, files, getfilectx, user,
334 ctx = memctx(repo, parents, text, files, getfilectx, user,
347 date, extra, editor)
335 date, extra, editor)
348 return ctx
336 return ctx
349
337
350 class changectx(basectx):
338 class changectx(basectx):
351 """A changecontext object makes access to data related to a particular
339 """A changecontext object makes access to data related to a particular
352 changeset convenient. It represents a read-only context already present in
340 changeset convenient. It represents a read-only context already present in
353 the repo."""
341 the repo."""
354 def __init__(self, repo, changeid=''):
342 def __init__(self, repo, changeid=''):
355 """changeid is a revision number, node, or tag"""
343 """changeid is a revision number, node, or tag"""
356
344
357 # since basectx.__new__ already took care of copying the object, we
345 # since basectx.__new__ already took care of copying the object, we
358 # don't need to do anything in __init__, so we just exit here
346 # don't need to do anything in __init__, so we just exit here
359 if isinstance(changeid, basectx):
347 if isinstance(changeid, basectx):
360 return
348 return
361
349
362 if changeid == '':
350 if changeid == '':
363 changeid = '.'
351 changeid = '.'
364 self._repo = repo
352 self._repo = repo
365
353
366 try:
354 try:
367 if isinstance(changeid, int):
355 if isinstance(changeid, int):
368 self._node = repo.changelog.node(changeid)
356 self._node = repo.changelog.node(changeid)
369 self._rev = changeid
357 self._rev = changeid
370 return
358 return
371 if isinstance(changeid, long):
359 if isinstance(changeid, long):
372 changeid = str(changeid)
360 changeid = str(changeid)
373 if changeid == '.':
361 if changeid == '.':
374 self._node = repo.dirstate.p1()
362 self._node = repo.dirstate.p1()
375 self._rev = repo.changelog.rev(self._node)
363 self._rev = repo.changelog.rev(self._node)
376 return
364 return
377 if changeid == 'null':
365 if changeid == 'null':
378 self._node = nullid
366 self._node = nullid
379 self._rev = nullrev
367 self._rev = nullrev
380 return
368 return
381 if changeid == 'tip':
369 if changeid == 'tip':
382 self._node = repo.changelog.tip()
370 self._node = repo.changelog.tip()
383 self._rev = repo.changelog.rev(self._node)
371 self._rev = repo.changelog.rev(self._node)
384 return
372 return
385 if len(changeid) == 20:
373 if len(changeid) == 20:
386 try:
374 try:
387 self._node = changeid
375 self._node = changeid
388 self._rev = repo.changelog.rev(changeid)
376 self._rev = repo.changelog.rev(changeid)
389 return
377 return
390 except error.FilteredRepoLookupError:
378 except error.FilteredRepoLookupError:
391 raise
379 raise
392 except LookupError:
380 except LookupError:
393 pass
381 pass
394
382
395 try:
383 try:
396 r = int(changeid)
384 r = int(changeid)
397 if str(r) != changeid:
385 if str(r) != changeid:
398 raise ValueError
386 raise ValueError
399 l = len(repo.changelog)
387 l = len(repo.changelog)
400 if r < 0:
388 if r < 0:
401 r += l
389 r += l
402 if r < 0 or r >= l:
390 if r < 0 or r >= l:
403 raise ValueError
391 raise ValueError
404 self._rev = r
392 self._rev = r
405 self._node = repo.changelog.node(r)
393 self._node = repo.changelog.node(r)
406 return
394 return
407 except error.FilteredIndexError:
395 except error.FilteredIndexError:
408 raise
396 raise
409 except (ValueError, OverflowError, IndexError):
397 except (ValueError, OverflowError, IndexError):
410 pass
398 pass
411
399
412 if len(changeid) == 40:
400 if len(changeid) == 40:
413 try:
401 try:
414 self._node = bin(changeid)
402 self._node = bin(changeid)
415 self._rev = repo.changelog.rev(self._node)
403 self._rev = repo.changelog.rev(self._node)
416 return
404 return
417 except error.FilteredLookupError:
405 except error.FilteredLookupError:
418 raise
406 raise
419 except (TypeError, LookupError):
407 except (TypeError, LookupError):
420 pass
408 pass
421
409
422 if changeid in repo._bookmarks:
410 if changeid in repo._bookmarks:
423 self._node = repo._bookmarks[changeid]
411 self._node = repo._bookmarks[changeid]
424 self._rev = repo.changelog.rev(self._node)
412 self._rev = repo.changelog.rev(self._node)
425 return
413 return
426 if changeid in repo._tagscache.tags:
414 if changeid in repo._tagscache.tags:
427 self._node = repo._tagscache.tags[changeid]
415 self._node = repo._tagscache.tags[changeid]
428 self._rev = repo.changelog.rev(self._node)
416 self._rev = repo.changelog.rev(self._node)
429 return
417 return
430 try:
418 try:
431 self._node = repo.branchtip(changeid)
419 self._node = repo.branchtip(changeid)
432 self._rev = repo.changelog.rev(self._node)
420 self._rev = repo.changelog.rev(self._node)
433 return
421 return
434 except error.FilteredRepoLookupError:
422 except error.FilteredRepoLookupError:
435 raise
423 raise
436 except error.RepoLookupError:
424 except error.RepoLookupError:
437 pass
425 pass
438
426
439 self._node = repo.unfiltered().changelog._partialmatch(changeid)
427 self._node = repo.unfiltered().changelog._partialmatch(changeid)
440 if self._node is not None:
428 if self._node is not None:
441 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
442 return
430 return
443
431
444 # lookup failed
432 # lookup failed
445 # check if it might have come from damaged dirstate
433 # check if it might have come from damaged dirstate
446 #
434 #
447 # XXX we could avoid the unfiltered if we had a recognizable
435 # XXX we could avoid the unfiltered if we had a recognizable
448 # exception for filtered changeset access
436 # exception for filtered changeset access
449 if changeid in repo.unfiltered().dirstate.parents():
437 if changeid in repo.unfiltered().dirstate.parents():
450 msg = _("working directory has unknown parent '%s'!")
438 msg = _("working directory has unknown parent '%s'!")
451 raise error.Abort(msg % short(changeid))
439 raise error.Abort(msg % short(changeid))
452 try:
440 try:
453 if len(changeid) == 20:
441 if len(changeid) == 20:
454 changeid = hex(changeid)
442 changeid = hex(changeid)
455 except TypeError:
443 except TypeError:
456 pass
444 pass
457 except (error.FilteredIndexError, error.FilteredLookupError,
445 except (error.FilteredIndexError, error.FilteredLookupError,
458 error.FilteredRepoLookupError):
446 error.FilteredRepoLookupError):
459 if repo.filtername == 'visible':
447 if repo.filtername == 'visible':
460 msg = _("hidden revision '%s'") % changeid
448 msg = _("hidden revision '%s'") % changeid
461 hint = _('use --hidden to access hidden revisions')
449 hint = _('use --hidden to access hidden revisions')
462 raise error.FilteredRepoLookupError(msg, hint=hint)
450 raise error.FilteredRepoLookupError(msg, hint=hint)
463 msg = _("filtered revision '%s' (not in '%s' subset)")
451 msg = _("filtered revision '%s' (not in '%s' subset)")
464 msg %= (changeid, repo.filtername)
452 msg %= (changeid, repo.filtername)
465 raise error.FilteredRepoLookupError(msg)
453 raise error.FilteredRepoLookupError(msg)
466 except IndexError:
454 except IndexError:
467 pass
455 pass
468 raise error.RepoLookupError(
456 raise error.RepoLookupError(
469 _("unknown revision '%s'") % changeid)
457 _("unknown revision '%s'") % changeid)
470
458
471 def __hash__(self):
459 def __hash__(self):
472 try:
460 try:
473 return hash(self._rev)
461 return hash(self._rev)
474 except AttributeError:
462 except AttributeError:
475 return id(self)
463 return id(self)
476
464
477 def __nonzero__(self):
465 def __nonzero__(self):
478 return self._rev != nullrev
466 return self._rev != nullrev
479
467
480 @propertycache
468 @propertycache
481 def _changeset(self):
469 def _changeset(self):
482 return self._repo.changelog.read(self.rev())
470 return self._repo.changelog.read(self.rev())
483
471
484 @propertycache
472 @propertycache
485 def _manifest(self):
473 def _manifest(self):
486 return self._repo.manifest.read(self._changeset[0])
474 return self._repo.manifest.read(self._changeset[0])
487
475
488 @propertycache
476 @propertycache
489 def _manifestdelta(self):
477 def _manifestdelta(self):
490 return self._repo.manifest.readdelta(self._changeset[0])
478 return self._repo.manifest.readdelta(self._changeset[0])
491
479
492 @propertycache
480 @propertycache
493 def _parents(self):
481 def _parents(self):
494 p = self._repo.changelog.parentrevs(self._rev)
482 p = self._repo.changelog.parentrevs(self._rev)
495 if p[1] == nullrev:
483 if p[1] == nullrev:
496 p = p[:-1]
484 p = p[:-1]
497 return [changectx(self._repo, x) for x in p]
485 return [changectx(self._repo, x) for x in p]
498
486
499 def changeset(self):
487 def changeset(self):
500 return self._changeset
488 return self._changeset
501 def manifestnode(self):
489 def manifestnode(self):
502 return self._changeset[0]
490 return self._changeset[0]
503
491
504 def user(self):
492 def user(self):
505 return self._changeset[1]
493 return self._changeset[1]
506 def date(self):
494 def date(self):
507 return self._changeset[2]
495 return self._changeset[2]
508 def files(self):
496 def files(self):
509 return self._changeset[3]
497 return self._changeset[3]
510 def description(self):
498 def description(self):
511 return self._changeset[4]
499 return self._changeset[4]
512 def branch(self):
500 def branch(self):
513 return encoding.tolocal(self._changeset[5].get("branch"))
501 return encoding.tolocal(self._changeset[5].get("branch"))
514 def closesbranch(self):
502 def closesbranch(self):
515 return 'close' in self._changeset[5]
503 return 'close' in self._changeset[5]
516 def extra(self):
504 def extra(self):
517 return self._changeset[5]
505 return self._changeset[5]
518 def tags(self):
506 def tags(self):
519 return self._repo.nodetags(self._node)
507 return self._repo.nodetags(self._node)
520 def bookmarks(self):
508 def bookmarks(self):
521 return self._repo.nodebookmarks(self._node)
509 return self._repo.nodebookmarks(self._node)
522 def phase(self):
510 def phase(self):
523 return self._repo._phasecache.phase(self._repo, self._rev)
511 return self._repo._phasecache.phase(self._repo, self._rev)
524 def hidden(self):
512 def hidden(self):
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
513 return self._rev in repoview.filterrevs(self._repo, 'visible')
526
514
527 def children(self):
515 def children(self):
528 """return contexts for each child changeset"""
516 """return contexts for each child changeset"""
529 c = self._repo.changelog.children(self._node)
517 c = self._repo.changelog.children(self._node)
530 return [changectx(self._repo, x) for x in c]
518 return [changectx(self._repo, x) for x in c]
531
519
532 def ancestors(self):
520 def ancestors(self):
533 for a in self._repo.changelog.ancestors([self._rev]):
521 for a in self._repo.changelog.ancestors([self._rev]):
534 yield changectx(self._repo, a)
522 yield changectx(self._repo, a)
535
523
536 def descendants(self):
524 def descendants(self):
537 for d in self._repo.changelog.descendants([self._rev]):
525 for d in self._repo.changelog.descendants([self._rev]):
538 yield changectx(self._repo, d)
526 yield changectx(self._repo, d)
539
527
540 def filectx(self, path, fileid=None, filelog=None):
528 def filectx(self, path, fileid=None, filelog=None):
541 """get a file context from this changeset"""
529 """get a file context from this changeset"""
542 if fileid is None:
530 if fileid is None:
543 fileid = self.filenode(path)
531 fileid = self.filenode(path)
544 return filectx(self._repo, path, fileid=fileid,
532 return filectx(self._repo, path, fileid=fileid,
545 changectx=self, filelog=filelog)
533 changectx=self, filelog=filelog)
546
534
547 def ancestor(self, c2, warn=False):
535 def ancestor(self, c2, warn=False):
548 """return the "best" ancestor context of self and c2
536 """return the "best" ancestor context of self and c2
549
537
550 If there are multiple candidates, it will show a message and check
538 If there are multiple candidates, it will show a message and check
551 merge.preferancestor configuration before falling back to the
539 merge.preferancestor configuration before falling back to the
552 revlog ancestor."""
540 revlog ancestor."""
553 # deal with workingctxs
541 # deal with workingctxs
554 n2 = c2._node
542 n2 = c2._node
555 if n2 is None:
543 if n2 is None:
556 n2 = c2._parents[0]._node
544 n2 = c2._parents[0]._node
557 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 if not cahs:
546 if not cahs:
559 anc = nullid
547 anc = nullid
560 elif len(cahs) == 1:
548 elif len(cahs) == 1:
561 anc = cahs[0]
549 anc = cahs[0]
562 else:
550 else:
563 for r in self._repo.ui.configlist('merge', 'preferancestor'):
551 for r in self._repo.ui.configlist('merge', 'preferancestor'):
564 try:
552 try:
565 ctx = changectx(self._repo, r)
553 ctx = changectx(self._repo, r)
566 except error.RepoLookupError:
554 except error.RepoLookupError:
567 continue
555 continue
568 anc = ctx.node()
556 anc = ctx.node()
569 if anc in cahs:
557 if anc in cahs:
570 break
558 break
571 else:
559 else:
572 anc = self._repo.changelog.ancestor(self._node, n2)
560 anc = self._repo.changelog.ancestor(self._node, n2)
573 if warn:
561 if warn:
574 self._repo.ui.status(
562 self._repo.ui.status(
575 (_("note: using %s as ancestor of %s and %s\n") %
563 (_("note: using %s as ancestor of %s and %s\n") %
576 (short(anc), short(self._node), short(n2))) +
564 (short(anc), short(self._node), short(n2))) +
577 ''.join(_(" alternatively, use --config "
565 ''.join(_(" alternatively, use --config "
578 "merge.preferancestor=%s\n") %
566 "merge.preferancestor=%s\n") %
579 short(n) for n in sorted(cahs) if n != anc))
567 short(n) for n in sorted(cahs) if n != anc))
580 return changectx(self._repo, anc)
568 return changectx(self._repo, anc)
581
569
582 def descendant(self, other):
570 def descendant(self, other):
583 """True if other is descendant of this changeset"""
571 """True if other is descendant of this changeset"""
584 return self._repo.changelog.descendant(self._rev, other._rev)
572 return self._repo.changelog.descendant(self._rev, other._rev)
585
573
586 def walk(self, match):
574 def walk(self, match):
587 fset = set(match.files())
575 fset = set(match.files())
588 # for dirstate.walk, files=['.'] means "walk the whole tree".
576 # for dirstate.walk, files=['.'] means "walk the whole tree".
589 # follow that here, too
577 # follow that here, too
590 fset.discard('.')
578 fset.discard('.')
591
579
592 # avoid the entire walk if we're only looking for specific files
580 # avoid the entire walk if we're only looking for specific files
593 if fset and not match.anypats():
581 if fset and not match.anypats():
594 if util.all([fn in self for fn in fset]):
582 if util.all([fn in self for fn in fset]):
595 for fn in sorted(fset):
583 for fn in sorted(fset):
596 if match(fn):
584 if match(fn):
597 yield fn
585 yield fn
598 raise StopIteration
586 raise StopIteration
599
587
600 for fn in self:
588 for fn in self:
601 if fn in fset:
589 if fn in fset:
602 # specified pattern is the exact name
590 # specified pattern is the exact name
603 fset.remove(fn)
591 fset.remove(fn)
604 if match(fn):
592 if match(fn):
605 yield fn
593 yield fn
606 for fn in sorted(fset):
594 for fn in sorted(fset):
607 if fn in self._dirs:
595 if fn in self._dirs:
608 # specified pattern is a directory
596 # specified pattern is a directory
609 continue
597 continue
610 match.bad(fn, _('no such file in rev %s') % self)
598 match.bad(fn, _('no such file in rev %s') % self)
611
599
612 def matches(self, match):
600 def matches(self, match):
613 return self.walk(match)
601 return self.walk(match)
614
602
615 class basefilectx(object):
603 class basefilectx(object):
616 """A filecontext object represents the common logic for its children:
604 """A filecontext object represents the common logic for its children:
617 filectx: read-only access to a filerevision that is already present
605 filectx: read-only access to a filerevision that is already present
618 in the repo,
606 in the repo,
619 workingfilectx: a filecontext that represents files from the working
607 workingfilectx: a filecontext that represents files from the working
620 directory,
608 directory,
621 memfilectx: a filecontext that represents files in-memory."""
609 memfilectx: a filecontext that represents files in-memory."""
622 def __new__(cls, repo, path, *args, **kwargs):
610 def __new__(cls, repo, path, *args, **kwargs):
623 return super(basefilectx, cls).__new__(cls)
611 return super(basefilectx, cls).__new__(cls)
624
612
625 @propertycache
613 @propertycache
626 def _filelog(self):
614 def _filelog(self):
627 return self._repo.file(self._path)
615 return self._repo.file(self._path)
628
616
629 @propertycache
617 @propertycache
630 def _changeid(self):
618 def _changeid(self):
631 if '_changeid' in self.__dict__:
619 if '_changeid' in self.__dict__:
632 return self._changeid
620 return self._changeid
633 elif '_changectx' in self.__dict__:
621 elif '_changectx' in self.__dict__:
634 return self._changectx.rev()
622 return self._changectx.rev()
635 else:
623 else:
636 return self._filelog.linkrev(self._filerev)
624 return self._filelog.linkrev(self._filerev)
637
625
638 @propertycache
626 @propertycache
639 def _filenode(self):
627 def _filenode(self):
640 if '_fileid' in self.__dict__:
628 if '_fileid' in self.__dict__:
641 return self._filelog.lookup(self._fileid)
629 return self._filelog.lookup(self._fileid)
642 else:
630 else:
643 return self._changectx.filenode(self._path)
631 return self._changectx.filenode(self._path)
644
632
645 @propertycache
633 @propertycache
646 def _filerev(self):
634 def _filerev(self):
647 return self._filelog.rev(self._filenode)
635 return self._filelog.rev(self._filenode)
648
636
649 @propertycache
637 @propertycache
650 def _repopath(self):
638 def _repopath(self):
651 return self._path
639 return self._path
652
640
653 def __nonzero__(self):
641 def __nonzero__(self):
654 try:
642 try:
655 self._filenode
643 self._filenode
656 return True
644 return True
657 except error.LookupError:
645 except error.LookupError:
658 # file is missing
646 # file is missing
659 return False
647 return False
660
648
661 def __str__(self):
649 def __str__(self):
662 return "%s@%s" % (self.path(), self._changectx)
650 return "%s@%s" % (self.path(), self._changectx)
663
651
664 def __repr__(self):
652 def __repr__(self):
665 return "<%s %s>" % (type(self).__name__, str(self))
653 return "<%s %s>" % (type(self).__name__, str(self))
666
654
667 def __hash__(self):
655 def __hash__(self):
668 try:
656 try:
669 return hash((self._path, self._filenode))
657 return hash((self._path, self._filenode))
670 except AttributeError:
658 except AttributeError:
671 return id(self)
659 return id(self)
672
660
673 def __eq__(self, other):
661 def __eq__(self, other):
674 try:
662 try:
675 return (type(self) == type(other) and self._path == other._path
663 return (type(self) == type(other) and self._path == other._path
676 and self._filenode == other._filenode)
664 and self._filenode == other._filenode)
677 except AttributeError:
665 except AttributeError:
678 return False
666 return False
679
667
680 def __ne__(self, other):
668 def __ne__(self, other):
681 return not (self == other)
669 return not (self == other)
682
670
683 def filerev(self):
671 def filerev(self):
684 return self._filerev
672 return self._filerev
685 def filenode(self):
673 def filenode(self):
686 return self._filenode
674 return self._filenode
687 def flags(self):
675 def flags(self):
688 return self._changectx.flags(self._path)
676 return self._changectx.flags(self._path)
689 def filelog(self):
677 def filelog(self):
690 return self._filelog
678 return self._filelog
691 def rev(self):
679 def rev(self):
692 return self._changeid
680 return self._changeid
693 def linkrev(self):
681 def linkrev(self):
694 return self._filelog.linkrev(self._filerev)
682 return self._filelog.linkrev(self._filerev)
695 def node(self):
683 def node(self):
696 return self._changectx.node()
684 return self._changectx.node()
697 def hex(self):
685 def hex(self):
698 return self._changectx.hex()
686 return self._changectx.hex()
699 def user(self):
687 def user(self):
700 return self._changectx.user()
688 return self._changectx.user()
701 def date(self):
689 def date(self):
702 return self._changectx.date()
690 return self._changectx.date()
703 def files(self):
691 def files(self):
704 return self._changectx.files()
692 return self._changectx.files()
705 def description(self):
693 def description(self):
706 return self._changectx.description()
694 return self._changectx.description()
707 def branch(self):
695 def branch(self):
708 return self._changectx.branch()
696 return self._changectx.branch()
709 def extra(self):
697 def extra(self):
710 return self._changectx.extra()
698 return self._changectx.extra()
711 def phase(self):
699 def phase(self):
712 return self._changectx.phase()
700 return self._changectx.phase()
713 def phasestr(self):
701 def phasestr(self):
714 return self._changectx.phasestr()
702 return self._changectx.phasestr()
715 def manifest(self):
703 def manifest(self):
716 return self._changectx.manifest()
704 return self._changectx.manifest()
717 def changectx(self):
705 def changectx(self):
718 return self._changectx
706 return self._changectx
719
707
720 def path(self):
708 def path(self):
721 return self._path
709 return self._path
722
710
723 def isbinary(self):
711 def isbinary(self):
724 try:
712 try:
725 return util.binary(self.data())
713 return util.binary(self.data())
726 except IOError:
714 except IOError:
727 return False
715 return False
728 def isexec(self):
716 def isexec(self):
729 return 'x' in self.flags()
717 return 'x' in self.flags()
730 def islink(self):
718 def islink(self):
731 return 'l' in self.flags()
719 return 'l' in self.flags()
732
720
733 def cmp(self, fctx):
721 def cmp(self, fctx):
734 """compare with other file context
722 """compare with other file context
735
723
736 returns True if different than fctx.
724 returns True if different than fctx.
737 """
725 """
738 if (fctx._filerev is None
726 if (fctx._filerev is None
739 and (self._repo._encodefilterpats
727 and (self._repo._encodefilterpats
740 # if file data starts with '\1\n', empty metadata block is
728 # if file data starts with '\1\n', empty metadata block is
741 # prepended, which adds 4 bytes to filelog.size().
729 # prepended, which adds 4 bytes to filelog.size().
742 or self.size() - 4 == fctx.size())
730 or self.size() - 4 == fctx.size())
743 or self.size() == fctx.size()):
731 or self.size() == fctx.size()):
744 return self._filelog.cmp(self._filenode, fctx.data())
732 return self._filelog.cmp(self._filenode, fctx.data())
745
733
746 return True
734 return True
747
735
748 def parents(self):
736 def parents(self):
749 _path = self._path
737 _path = self._path
750 fl = self._filelog
738 fl = self._filelog
751 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
739 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
752
740
753 r = self._filelog.renamed(self._filenode)
741 r = self._filelog.renamed(self._filenode)
754 if r:
742 if r:
755 pl[0] = (r[0], r[1], None)
743 pl[0] = (r[0], r[1], None)
756
744
757 return [filectx(self._repo, p, fileid=n, filelog=l)
745 return [filectx(self._repo, p, fileid=n, filelog=l)
758 for p, n, l in pl if n != nullid]
746 for p, n, l in pl if n != nullid]
759
747
760 def p1(self):
748 def p1(self):
761 return self.parents()[0]
749 return self.parents()[0]
762
750
763 def p2(self):
751 def p2(self):
764 p = self.parents()
752 p = self.parents()
765 if len(p) == 2:
753 if len(p) == 2:
766 return p[1]
754 return p[1]
767 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
755 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
768
756
769 def annotate(self, follow=False, linenumber=None, diffopts=None):
757 def annotate(self, follow=False, linenumber=None, diffopts=None):
770 '''returns a list of tuples of (ctx, line) for each line
758 '''returns a list of tuples of (ctx, line) for each line
771 in the file, where ctx is the filectx of the node where
759 in the file, where ctx is the filectx of the node where
772 that line was last changed.
760 that line was last changed.
773 This returns tuples of ((ctx, linenumber), line) for each line,
761 This returns tuples of ((ctx, linenumber), line) for each line,
774 if "linenumber" parameter is NOT "None".
762 if "linenumber" parameter is NOT "None".
775 In such tuples, linenumber means one at the first appearance
763 In such tuples, linenumber means one at the first appearance
776 in the managed file.
764 in the managed file.
777 To reduce annotation cost,
765 To reduce annotation cost,
778 this returns fixed value(False is used) as linenumber,
766 this returns fixed value(False is used) as linenumber,
779 if "linenumber" parameter is "False".'''
767 if "linenumber" parameter is "False".'''
780
768
781 if linenumber is None:
769 if linenumber is None:
782 def decorate(text, rev):
770 def decorate(text, rev):
783 return ([rev] * len(text.splitlines()), text)
771 return ([rev] * len(text.splitlines()), text)
784 elif linenumber:
772 elif linenumber:
785 def decorate(text, rev):
773 def decorate(text, rev):
786 size = len(text.splitlines())
774 size = len(text.splitlines())
787 return ([(rev, i) for i in xrange(1, size + 1)], text)
775 return ([(rev, i) for i in xrange(1, size + 1)], text)
788 else:
776 else:
789 def decorate(text, rev):
777 def decorate(text, rev):
790 return ([(rev, False)] * len(text.splitlines()), text)
778 return ([(rev, False)] * len(text.splitlines()), text)
791
779
792 def pair(parent, child):
780 def pair(parent, child):
793 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
781 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
794 refine=True)
782 refine=True)
795 for (a1, a2, b1, b2), t in blocks:
783 for (a1, a2, b1, b2), t in blocks:
796 # Changed blocks ('!') or blocks made only of blank lines ('~')
784 # Changed blocks ('!') or blocks made only of blank lines ('~')
797 # belong to the child.
785 # belong to the child.
798 if t == '=':
786 if t == '=':
799 child[0][b1:b2] = parent[0][a1:a2]
787 child[0][b1:b2] = parent[0][a1:a2]
800 return child
788 return child
801
789
802 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
790 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
803
791
804 def parents(f):
792 def parents(f):
805 pl = f.parents()
793 pl = f.parents()
806
794
807 # Don't return renamed parents if we aren't following.
795 # Don't return renamed parents if we aren't following.
808 if not follow:
796 if not follow:
809 pl = [p for p in pl if p.path() == f.path()]
797 pl = [p for p in pl if p.path() == f.path()]
810
798
811 # renamed filectx won't have a filelog yet, so set it
799 # renamed filectx won't have a filelog yet, so set it
812 # from the cache to save time
800 # from the cache to save time
813 for p in pl:
801 for p in pl:
814 if not '_filelog' in p.__dict__:
802 if not '_filelog' in p.__dict__:
815 p._filelog = getlog(p.path())
803 p._filelog = getlog(p.path())
816
804
817 return pl
805 return pl
818
806
819 # use linkrev to find the first changeset where self appeared
807 # use linkrev to find the first changeset where self appeared
820 if self.rev() != self.linkrev():
808 if self.rev() != self.linkrev():
821 base = self.filectx(self.filenode())
809 base = self.filectx(self.filenode())
822 else:
810 else:
823 base = self
811 base = self
824
812
825 # This algorithm would prefer to be recursive, but Python is a
813 # This algorithm would prefer to be recursive, but Python is a
826 # bit recursion-hostile. Instead we do an iterative
814 # bit recursion-hostile. Instead we do an iterative
827 # depth-first search.
815 # depth-first search.
828
816
829 visit = [base]
817 visit = [base]
830 hist = {}
818 hist = {}
831 pcache = {}
819 pcache = {}
832 needed = {base: 1}
820 needed = {base: 1}
833 while visit:
821 while visit:
834 f = visit[-1]
822 f = visit[-1]
835 pcached = f in pcache
823 pcached = f in pcache
836 if not pcached:
824 if not pcached:
837 pcache[f] = parents(f)
825 pcache[f] = parents(f)
838
826
839 ready = True
827 ready = True
840 pl = pcache[f]
828 pl = pcache[f]
841 for p in pl:
829 for p in pl:
842 if p not in hist:
830 if p not in hist:
843 ready = False
831 ready = False
844 visit.append(p)
832 visit.append(p)
845 if not pcached:
833 if not pcached:
846 needed[p] = needed.get(p, 0) + 1
834 needed[p] = needed.get(p, 0) + 1
847 if ready:
835 if ready:
848 visit.pop()
836 visit.pop()
849 reusable = f in hist
837 reusable = f in hist
850 if reusable:
838 if reusable:
851 curr = hist[f]
839 curr = hist[f]
852 else:
840 else:
853 curr = decorate(f.data(), f)
841 curr = decorate(f.data(), f)
854 for p in pl:
842 for p in pl:
855 if not reusable:
843 if not reusable:
856 curr = pair(hist[p], curr)
844 curr = pair(hist[p], curr)
857 if needed[p] == 1:
845 if needed[p] == 1:
858 del hist[p]
846 del hist[p]
859 del needed[p]
847 del needed[p]
860 else:
848 else:
861 needed[p] -= 1
849 needed[p] -= 1
862
850
863 hist[f] = curr
851 hist[f] = curr
864 pcache[f] = []
852 pcache[f] = []
865
853
866 return zip(hist[base][0], hist[base][1].splitlines(True))
854 return zip(hist[base][0], hist[base][1].splitlines(True))
867
855
868 def ancestors(self, followfirst=False):
856 def ancestors(self, followfirst=False):
869 visit = {}
857 visit = {}
870 c = self
858 c = self
871 cut = followfirst and 1 or None
859 cut = followfirst and 1 or None
872 while True:
860 while True:
873 for parent in c.parents()[:cut]:
861 for parent in c.parents()[:cut]:
874 visit[(parent.rev(), parent.node())] = parent
862 visit[(parent.rev(), parent.node())] = parent
875 if not visit:
863 if not visit:
876 break
864 break
877 c = visit.pop(max(visit))
865 c = visit.pop(max(visit))
878 yield c
866 yield c
879
867
880 class filectx(basefilectx):
868 class filectx(basefilectx):
881 """A filecontext object makes access to data related to a particular
869 """A filecontext object makes access to data related to a particular
882 filerevision convenient."""
870 filerevision convenient."""
883 def __init__(self, repo, path, changeid=None, fileid=None,
871 def __init__(self, repo, path, changeid=None, fileid=None,
884 filelog=None, changectx=None):
872 filelog=None, changectx=None):
885 """changeid can be a changeset revision, node, or tag.
873 """changeid can be a changeset revision, node, or tag.
886 fileid can be a file revision or node."""
874 fileid can be a file revision or node."""
887 self._repo = repo
875 self._repo = repo
888 self._path = path
876 self._path = path
889
877
890 assert (changeid is not None
878 assert (changeid is not None
891 or fileid is not None
879 or fileid is not None
892 or changectx is not None), \
880 or changectx is not None), \
893 ("bad args: changeid=%r, fileid=%r, changectx=%r"
881 ("bad args: changeid=%r, fileid=%r, changectx=%r"
894 % (changeid, fileid, changectx))
882 % (changeid, fileid, changectx))
895
883
896 if filelog is not None:
884 if filelog is not None:
897 self._filelog = filelog
885 self._filelog = filelog
898
886
899 if changeid is not None:
887 if changeid is not None:
900 self._changeid = changeid
888 self._changeid = changeid
901 if changectx is not None:
889 if changectx is not None:
902 self._changectx = changectx
890 self._changectx = changectx
903 if fileid is not None:
891 if fileid is not None:
904 self._fileid = fileid
892 self._fileid = fileid
905
893
906 @propertycache
894 @propertycache
907 def _changectx(self):
895 def _changectx(self):
908 try:
896 try:
909 return changectx(self._repo, self._changeid)
897 return changectx(self._repo, self._changeid)
910 except error.RepoLookupError:
898 except error.RepoLookupError:
911 # Linkrev may point to any revision in the repository. When the
899 # Linkrev may point to any revision in the repository. When the
912 # repository is filtered this may lead to `filectx` trying to build
900 # repository is filtered this may lead to `filectx` trying to build
913 # `changectx` for filtered revision. In such case we fallback to
901 # `changectx` for filtered revision. In such case we fallback to
914 # creating `changectx` on the unfiltered version of the reposition.
902 # creating `changectx` on the unfiltered version of the reposition.
915 # This fallback should not be an issue because `changectx` from
903 # This fallback should not be an issue because `changectx` from
916 # `filectx` are not used in complex operations that care about
904 # `filectx` are not used in complex operations that care about
917 # filtering.
905 # filtering.
918 #
906 #
919 # This fallback is a cheap and dirty fix that prevent several
907 # This fallback is a cheap and dirty fix that prevent several
920 # crashes. It does not ensure the behavior is correct. However the
908 # crashes. It does not ensure the behavior is correct. However the
921 # behavior was not correct before filtering either and "incorrect
909 # behavior was not correct before filtering either and "incorrect
922 # behavior" is seen as better as "crash"
910 # behavior" is seen as better as "crash"
923 #
911 #
924 # Linkrevs have several serious troubles with filtering that are
912 # Linkrevs have several serious troubles with filtering that are
925 # complicated to solve. Proper handling of the issue here should be
913 # complicated to solve. Proper handling of the issue here should be
926 # considered when solving linkrev issue are on the table.
914 # considered when solving linkrev issue are on the table.
927 return changectx(self._repo.unfiltered(), self._changeid)
915 return changectx(self._repo.unfiltered(), self._changeid)
928
916
929 def filectx(self, fileid):
917 def filectx(self, fileid):
930 '''opens an arbitrary revision of the file without
918 '''opens an arbitrary revision of the file without
931 opening a new filelog'''
919 opening a new filelog'''
932 return filectx(self._repo, self._path, fileid=fileid,
920 return filectx(self._repo, self._path, fileid=fileid,
933 filelog=self._filelog)
921 filelog=self._filelog)
934
922
935 def data(self):
923 def data(self):
936 try:
924 try:
937 return self._filelog.read(self._filenode)
925 return self._filelog.read(self._filenode)
938 except error.CensoredNodeError:
926 except error.CensoredNodeError:
939 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
927 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
940 return ""
928 return ""
941 raise util.Abort(_("censored node: %s") % short(self._filenode),
929 raise util.Abort(_("censored node: %s") % short(self._filenode),
942 hint=_("set censor.policy to ignore errors"))
930 hint=_("set censor.policy to ignore errors"))
943
931
944 def size(self):
932 def size(self):
945 return self._filelog.size(self._filerev)
933 return self._filelog.size(self._filerev)
946
934
947 def renamed(self):
935 def renamed(self):
948 """check if file was actually renamed in this changeset revision
936 """check if file was actually renamed in this changeset revision
949
937
950 If rename logged in file revision, we report copy for changeset only
938 If rename logged in file revision, we report copy for changeset only
951 if file revisions linkrev points back to the changeset in question
939 if file revisions linkrev points back to the changeset in question
952 or both changeset parents contain different file revisions.
940 or both changeset parents contain different file revisions.
953 """
941 """
954
942
955 renamed = self._filelog.renamed(self._filenode)
943 renamed = self._filelog.renamed(self._filenode)
956 if not renamed:
944 if not renamed:
957 return renamed
945 return renamed
958
946
959 if self.rev() == self.linkrev():
947 if self.rev() == self.linkrev():
960 return renamed
948 return renamed
961
949
962 name = self.path()
950 name = self.path()
963 fnode = self._filenode
951 fnode = self._filenode
964 for p in self._changectx.parents():
952 for p in self._changectx.parents():
965 try:
953 try:
966 if fnode == p.filenode(name):
954 if fnode == p.filenode(name):
967 return None
955 return None
968 except error.LookupError:
956 except error.LookupError:
969 pass
957 pass
970 return renamed
958 return renamed
971
959
972 def children(self):
960 def children(self):
973 # hard for renames
961 # hard for renames
974 c = self._filelog.children(self._filenode)
962 c = self._filelog.children(self._filenode)
975 return [filectx(self._repo, self._path, fileid=x,
963 return [filectx(self._repo, self._path, fileid=x,
976 filelog=self._filelog) for x in c]
964 filelog=self._filelog) for x in c]
977
965
978 class committablectx(basectx):
966 class committablectx(basectx):
979 """A committablectx object provides common functionality for a context that
967 """A committablectx object provides common functionality for a context that
980 wants the ability to commit, e.g. workingctx or memctx."""
968 wants the ability to commit, e.g. workingctx or memctx."""
981 def __init__(self, repo, text="", user=None, date=None, extra=None,
969 def __init__(self, repo, text="", user=None, date=None, extra=None,
982 changes=None):
970 changes=None):
983 self._repo = repo
971 self._repo = repo
984 self._rev = None
972 self._rev = None
985 self._node = None
973 self._node = None
986 self._text = text
974 self._text = text
987 if date:
975 if date:
988 self._date = util.parsedate(date)
976 self._date = util.parsedate(date)
989 if user:
977 if user:
990 self._user = user
978 self._user = user
991 if changes:
979 if changes:
992 self._status = changes
980 self._status = changes
993
981
994 self._extra = {}
982 self._extra = {}
995 if extra:
983 if extra:
996 self._extra = extra.copy()
984 self._extra = extra.copy()
997 if 'branch' not in self._extra:
985 if 'branch' not in self._extra:
998 try:
986 try:
999 branch = encoding.fromlocal(self._repo.dirstate.branch())
987 branch = encoding.fromlocal(self._repo.dirstate.branch())
1000 except UnicodeDecodeError:
988 except UnicodeDecodeError:
1001 raise util.Abort(_('branch name not in UTF-8!'))
989 raise util.Abort(_('branch name not in UTF-8!'))
1002 self._extra['branch'] = branch
990 self._extra['branch'] = branch
1003 if self._extra['branch'] == '':
991 if self._extra['branch'] == '':
1004 self._extra['branch'] = 'default'
992 self._extra['branch'] = 'default'
1005
993
1006 def __str__(self):
994 def __str__(self):
1007 return str(self._parents[0]) + "+"
995 return str(self._parents[0]) + "+"
1008
996
1009 def __nonzero__(self):
997 def __nonzero__(self):
1010 return True
998 return True
1011
999
1012 def _buildflagfunc(self):
1000 def _buildflagfunc(self):
1013 # Create a fallback function for getting file flags when the
1001 # Create a fallback function for getting file flags when the
1014 # filesystem doesn't support them
1002 # filesystem doesn't support them
1015
1003
1016 copiesget = self._repo.dirstate.copies().get
1004 copiesget = self._repo.dirstate.copies().get
1017
1005
1018 if len(self._parents) < 2:
1006 if len(self._parents) < 2:
1019 # when we have one parent, it's easy: copy from parent
1007 # when we have one parent, it's easy: copy from parent
1020 man = self._parents[0].manifest()
1008 man = self._parents[0].manifest()
1021 def func(f):
1009 def func(f):
1022 f = copiesget(f, f)
1010 f = copiesget(f, f)
1023 return man.flags(f)
1011 return man.flags(f)
1024 else:
1012 else:
1025 # merges are tricky: we try to reconstruct the unstored
1013 # merges are tricky: we try to reconstruct the unstored
1026 # result from the merge (issue1802)
1014 # result from the merge (issue1802)
1027 p1, p2 = self._parents
1015 p1, p2 = self._parents
1028 pa = p1.ancestor(p2)
1016 pa = p1.ancestor(p2)
1029 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1017 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1030
1018
1031 def func(f):
1019 def func(f):
1032 f = copiesget(f, f) # may be wrong for merges with copies
1020 f = copiesget(f, f) # may be wrong for merges with copies
1033 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1021 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1034 if fl1 == fl2:
1022 if fl1 == fl2:
1035 return fl1
1023 return fl1
1036 if fl1 == fla:
1024 if fl1 == fla:
1037 return fl2
1025 return fl2
1038 if fl2 == fla:
1026 if fl2 == fla:
1039 return fl1
1027 return fl1
1040 return '' # punt for conflicts
1028 return '' # punt for conflicts
1041
1029
1042 return func
1030 return func
1043
1031
1044 @propertycache
1032 @propertycache
1045 def _flagfunc(self):
1033 def _flagfunc(self):
1046 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1034 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1047
1035
1048 @propertycache
1036 @propertycache
1049 def _manifest(self):
1037 def _manifest(self):
1050 """generate a manifest corresponding to the values in self._status"""
1038 """generate a manifest corresponding to the values in self._status"""
1051
1039
1052 man = self._parents[0].manifest().copy()
1040 man = self._parents[0].manifest().copy()
1053 if len(self._parents) > 1:
1041 if len(self._parents) > 1:
1054 man2 = self.p2().manifest()
1042 man2 = self.p2().manifest()
1055 def getman(f):
1043 def getman(f):
1056 if f in man:
1044 if f in man:
1057 return man
1045 return man
1058 return man2
1046 return man2
1059 else:
1047 else:
1060 getman = lambda f: man
1048 getman = lambda f: man
1061
1049
1062 copied = self._repo.dirstate.copies()
1050 copied = self._repo.dirstate.copies()
1063 ff = self._flagfunc
1051 ff = self._flagfunc
1064 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1052 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1065 for f in l:
1053 for f in l:
1066 orig = copied.get(f, f)
1054 orig = copied.get(f, f)
1067 man[f] = getman(orig).get(orig, nullid) + i
1055 man[f] = getman(orig).get(orig, nullid) + i
1068 try:
1056 try:
1069 man.setflag(f, ff(f))
1057 man.setflag(f, ff(f))
1070 except OSError:
1058 except OSError:
1071 pass
1059 pass
1072
1060
1073 for f in self._status.deleted + self._status.removed:
1061 for f in self._status.deleted + self._status.removed:
1074 if f in man:
1062 if f in man:
1075 del man[f]
1063 del man[f]
1076
1064
1077 return man
1065 return man
1078
1066
1079 @propertycache
1067 @propertycache
1080 def _status(self):
1068 def _status(self):
1081 return self._repo.status()
1069 return self._repo.status()
1082
1070
1083 @propertycache
1071 @propertycache
1084 def _user(self):
1072 def _user(self):
1085 return self._repo.ui.username()
1073 return self._repo.ui.username()
1086
1074
1087 @propertycache
1075 @propertycache
1088 def _date(self):
1076 def _date(self):
1089 return util.makedate()
1077 return util.makedate()
1090
1078
1091 def subrev(self, subpath):
1079 def subrev(self, subpath):
1092 return None
1080 return None
1093
1081
1094 def user(self):
1082 def user(self):
1095 return self._user or self._repo.ui.username()
1083 return self._user or self._repo.ui.username()
1096 def date(self):
1084 def date(self):
1097 return self._date
1085 return self._date
1098 def description(self):
1086 def description(self):
1099 return self._text
1087 return self._text
1100 def files(self):
1088 def files(self):
1101 return sorted(self._status.modified + self._status.added +
1089 return sorted(self._status.modified + self._status.added +
1102 self._status.removed)
1090 self._status.removed)
1103
1091
1104 def modified(self):
1092 def modified(self):
1105 return self._status.modified
1093 return self._status.modified
1106 def added(self):
1094 def added(self):
1107 return self._status.added
1095 return self._status.added
1108 def removed(self):
1096 def removed(self):
1109 return self._status.removed
1097 return self._status.removed
1110 def deleted(self):
1098 def deleted(self):
1111 return self._status.deleted
1099 return self._status.deleted
1112 def unknown(self):
1100 def unknown(self):
1113 return self._status.unknown
1101 return self._status.unknown
1114 def ignored(self):
1102 def ignored(self):
1115 return self._status.ignored
1103 return self._status.ignored
1116 def clean(self):
1104 def clean(self):
1117 return self._status.clean
1105 return self._status.clean
1118 def branch(self):
1106 def branch(self):
1119 return encoding.tolocal(self._extra['branch'])
1107 return encoding.tolocal(self._extra['branch'])
1120 def closesbranch(self):
1108 def closesbranch(self):
1121 return 'close' in self._extra
1109 return 'close' in self._extra
1122 def extra(self):
1110 def extra(self):
1123 return self._extra
1111 return self._extra
1124
1112
1125 def tags(self):
1113 def tags(self):
1126 t = []
1114 t = []
1127 for p in self.parents():
1115 for p in self.parents():
1128 t.extend(p.tags())
1116 t.extend(p.tags())
1129 return t
1117 return t
1130
1118
1131 def bookmarks(self):
1119 def bookmarks(self):
1132 b = []
1120 b = []
1133 for p in self.parents():
1121 for p in self.parents():
1134 b.extend(p.bookmarks())
1122 b.extend(p.bookmarks())
1135 return b
1123 return b
1136
1124
1137 def phase(self):
1125 def phase(self):
1138 phase = phases.draft # default phase to draft
1126 phase = phases.draft # default phase to draft
1139 for p in self.parents():
1127 for p in self.parents():
1140 phase = max(phase, p.phase())
1128 phase = max(phase, p.phase())
1141 return phase
1129 return phase
1142
1130
1143 def hidden(self):
1131 def hidden(self):
1144 return False
1132 return False
1145
1133
1146 def children(self):
1134 def children(self):
1147 return []
1135 return []
1148
1136
1149 def flags(self, path):
1137 def flags(self, path):
1150 if '_manifest' in self.__dict__:
1138 if '_manifest' in self.__dict__:
1151 try:
1139 try:
1152 return self._manifest.flags(path)
1140 return self._manifest.flags(path)
1153 except KeyError:
1141 except KeyError:
1154 return ''
1142 return ''
1155
1143
1156 try:
1144 try:
1157 return self._flagfunc(path)
1145 return self._flagfunc(path)
1158 except OSError:
1146 except OSError:
1159 return ''
1147 return ''
1160
1148
1161 def ancestor(self, c2):
1149 def ancestor(self, c2):
1162 """return the "best" ancestor context of self and c2"""
1150 """return the "best" ancestor context of self and c2"""
1163 return self._parents[0].ancestor(c2) # punt on two parents for now
1151 return self._parents[0].ancestor(c2) # punt on two parents for now
1164
1152
1165 def walk(self, match):
1153 def walk(self, match):
1166 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1154 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1167 True, False))
1155 True, False))
1168
1156
1169 def matches(self, match):
1157 def matches(self, match):
1170 return sorted(self._repo.dirstate.matches(match))
1158 return sorted(self._repo.dirstate.matches(match))
1171
1159
1172 def ancestors(self):
1160 def ancestors(self):
1173 for a in self._repo.changelog.ancestors(
1161 for a in self._repo.changelog.ancestors(
1174 [p.rev() for p in self._parents]):
1162 [p.rev() for p in self._parents]):
1175 yield changectx(self._repo, a)
1163 yield changectx(self._repo, a)
1176
1164
1177 def markcommitted(self, node):
1165 def markcommitted(self, node):
1178 """Perform post-commit cleanup necessary after committing this ctx
1166 """Perform post-commit cleanup necessary after committing this ctx
1179
1167
1180 Specifically, this updates backing stores this working context
1168 Specifically, this updates backing stores this working context
1181 wraps to reflect the fact that the changes reflected by this
1169 wraps to reflect the fact that the changes reflected by this
1182 workingctx have been committed. For example, it marks
1170 workingctx have been committed. For example, it marks
1183 modified and added files as normal in the dirstate.
1171 modified and added files as normal in the dirstate.
1184
1172
1185 """
1173 """
1186
1174
1187 self._repo.dirstate.beginparentchange()
1175 self._repo.dirstate.beginparentchange()
1188 for f in self.modified() + self.added():
1176 for f in self.modified() + self.added():
1189 self._repo.dirstate.normal(f)
1177 self._repo.dirstate.normal(f)
1190 for f in self.removed():
1178 for f in self.removed():
1191 self._repo.dirstate.drop(f)
1179 self._repo.dirstate.drop(f)
1192 self._repo.dirstate.setparents(node)
1180 self._repo.dirstate.setparents(node)
1193 self._repo.dirstate.endparentchange()
1181 self._repo.dirstate.endparentchange()
1194
1182
1195 def dirs(self):
1183 def dirs(self):
1196 return self._repo.dirstate.dirs()
1184 return self._repo.dirstate.dirs()
1197
1185
1198 class workingctx(committablectx):
1186 class workingctx(committablectx):
1199 """A workingctx object makes access to data related to
1187 """A workingctx object makes access to data related to
1200 the current working directory convenient.
1188 the current working directory convenient.
1201 date - any valid date string or (unixtime, offset), or None.
1189 date - any valid date string or (unixtime, offset), or None.
1202 user - username string, or None.
1190 user - username string, or None.
1203 extra - a dictionary of extra values, or None.
1191 extra - a dictionary of extra values, or None.
1204 changes - a list of file lists as returned by localrepo.status()
1192 changes - a list of file lists as returned by localrepo.status()
1205 or None to use the repository status.
1193 or None to use the repository status.
1206 """
1194 """
1207 def __init__(self, repo, text="", user=None, date=None, extra=None,
1195 def __init__(self, repo, text="", user=None, date=None, extra=None,
1208 changes=None):
1196 changes=None):
1209 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1197 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1210
1198
1211 def __iter__(self):
1199 def __iter__(self):
1212 d = self._repo.dirstate
1200 d = self._repo.dirstate
1213 for f in d:
1201 for f in d:
1214 if d[f] != 'r':
1202 if d[f] != 'r':
1215 yield f
1203 yield f
1216
1204
1217 def __contains__(self, key):
1205 def __contains__(self, key):
1218 return self._repo.dirstate[key] not in "?r"
1206 return self._repo.dirstate[key] not in "?r"
1219
1207
1220 @propertycache
1208 @propertycache
1221 def _parents(self):
1209 def _parents(self):
1222 p = self._repo.dirstate.parents()
1210 p = self._repo.dirstate.parents()
1223 if p[1] == nullid:
1211 if p[1] == nullid:
1224 p = p[:-1]
1212 p = p[:-1]
1225 return [changectx(self._repo, x) for x in p]
1213 return [changectx(self._repo, x) for x in p]
1226
1214
1227 def filectx(self, path, filelog=None):
1215 def filectx(self, path, filelog=None):
1228 """get a file context from the working directory"""
1216 """get a file context from the working directory"""
1229 return workingfilectx(self._repo, path, workingctx=self,
1217 return workingfilectx(self._repo, path, workingctx=self,
1230 filelog=filelog)
1218 filelog=filelog)
1231
1219
1232 def dirty(self, missing=False, merge=True, branch=True):
1220 def dirty(self, missing=False, merge=True, branch=True):
1233 "check whether a working directory is modified"
1221 "check whether a working directory is modified"
1234 # check subrepos first
1222 # check subrepos first
1235 for s in sorted(self.substate):
1223 for s in sorted(self.substate):
1236 if self.sub(s).dirty():
1224 if self.sub(s).dirty():
1237 return True
1225 return True
1238 # check current working dir
1226 # check current working dir
1239 return ((merge and self.p2()) or
1227 return ((merge and self.p2()) or
1240 (branch and self.branch() != self.p1().branch()) or
1228 (branch and self.branch() != self.p1().branch()) or
1241 self.modified() or self.added() or self.removed() or
1229 self.modified() or self.added() or self.removed() or
1242 (missing and self.deleted()))
1230 (missing and self.deleted()))
1243
1231
1244 def add(self, list, prefix=""):
1232 def add(self, list, prefix=""):
1245 join = lambda f: os.path.join(prefix, f)
1233 join = lambda f: os.path.join(prefix, f)
1246 wlock = self._repo.wlock()
1234 wlock = self._repo.wlock()
1247 ui, ds = self._repo.ui, self._repo.dirstate
1235 ui, ds = self._repo.ui, self._repo.dirstate
1248 try:
1236 try:
1249 rejected = []
1237 rejected = []
1250 lstat = self._repo.wvfs.lstat
1238 lstat = self._repo.wvfs.lstat
1251 for f in list:
1239 for f in list:
1252 scmutil.checkportable(ui, join(f))
1240 scmutil.checkportable(ui, join(f))
1253 try:
1241 try:
1254 st = lstat(f)
1242 st = lstat(f)
1255 except OSError:
1243 except OSError:
1256 ui.warn(_("%s does not exist!\n") % join(f))
1244 ui.warn(_("%s does not exist!\n") % join(f))
1257 rejected.append(f)
1245 rejected.append(f)
1258 continue
1246 continue
1259 if st.st_size > 10000000:
1247 if st.st_size > 10000000:
1260 ui.warn(_("%s: up to %d MB of RAM may be required "
1248 ui.warn(_("%s: up to %d MB of RAM may be required "
1261 "to manage this file\n"
1249 "to manage this file\n"
1262 "(use 'hg revert %s' to cancel the "
1250 "(use 'hg revert %s' to cancel the "
1263 "pending addition)\n")
1251 "pending addition)\n")
1264 % (f, 3 * st.st_size // 1000000, join(f)))
1252 % (f, 3 * st.st_size // 1000000, join(f)))
1265 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1253 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1266 ui.warn(_("%s not added: only files and symlinks "
1254 ui.warn(_("%s not added: only files and symlinks "
1267 "supported currently\n") % join(f))
1255 "supported currently\n") % join(f))
1268 rejected.append(f)
1256 rejected.append(f)
1269 elif ds[f] in 'amn':
1257 elif ds[f] in 'amn':
1270 ui.warn(_("%s already tracked!\n") % join(f))
1258 ui.warn(_("%s already tracked!\n") % join(f))
1271 elif ds[f] == 'r':
1259 elif ds[f] == 'r':
1272 ds.normallookup(f)
1260 ds.normallookup(f)
1273 else:
1261 else:
1274 ds.add(f)
1262 ds.add(f)
1275 return rejected
1263 return rejected
1276 finally:
1264 finally:
1277 wlock.release()
1265 wlock.release()
1278
1266
1279 def forget(self, files, prefix=""):
1267 def forget(self, files, prefix=""):
1280 join = lambda f: os.path.join(prefix, f)
1268 join = lambda f: os.path.join(prefix, f)
1281 wlock = self._repo.wlock()
1269 wlock = self._repo.wlock()
1282 try:
1270 try:
1283 rejected = []
1271 rejected = []
1284 for f in files:
1272 for f in files:
1285 if f not in self._repo.dirstate:
1273 if f not in self._repo.dirstate:
1286 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1274 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1287 rejected.append(f)
1275 rejected.append(f)
1288 elif self._repo.dirstate[f] != 'a':
1276 elif self._repo.dirstate[f] != 'a':
1289 self._repo.dirstate.remove(f)
1277 self._repo.dirstate.remove(f)
1290 else:
1278 else:
1291 self._repo.dirstate.drop(f)
1279 self._repo.dirstate.drop(f)
1292 return rejected
1280 return rejected
1293 finally:
1281 finally:
1294 wlock.release()
1282 wlock.release()
1295
1283
1296 def undelete(self, list):
1284 def undelete(self, list):
1297 pctxs = self.parents()
1285 pctxs = self.parents()
1298 wlock = self._repo.wlock()
1286 wlock = self._repo.wlock()
1299 try:
1287 try:
1300 for f in list:
1288 for f in list:
1301 if self._repo.dirstate[f] != 'r':
1289 if self._repo.dirstate[f] != 'r':
1302 self._repo.ui.warn(_("%s not removed!\n") % f)
1290 self._repo.ui.warn(_("%s not removed!\n") % f)
1303 else:
1291 else:
1304 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1292 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1305 t = fctx.data()
1293 t = fctx.data()
1306 self._repo.wwrite(f, t, fctx.flags())
1294 self._repo.wwrite(f, t, fctx.flags())
1307 self._repo.dirstate.normal(f)
1295 self._repo.dirstate.normal(f)
1308 finally:
1296 finally:
1309 wlock.release()
1297 wlock.release()
1310
1298
1311 def copy(self, source, dest):
1299 def copy(self, source, dest):
1312 try:
1300 try:
1313 st = self._repo.wvfs.lstat(dest)
1301 st = self._repo.wvfs.lstat(dest)
1314 except OSError, err:
1302 except OSError, err:
1315 if err.errno != errno.ENOENT:
1303 if err.errno != errno.ENOENT:
1316 raise
1304 raise
1317 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1305 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1318 return
1306 return
1319 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1307 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1320 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1308 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1321 "symbolic link\n") % dest)
1309 "symbolic link\n") % dest)
1322 else:
1310 else:
1323 wlock = self._repo.wlock()
1311 wlock = self._repo.wlock()
1324 try:
1312 try:
1325 if self._repo.dirstate[dest] in '?r':
1313 if self._repo.dirstate[dest] in '?r':
1326 self._repo.dirstate.add(dest)
1314 self._repo.dirstate.add(dest)
1327 self._repo.dirstate.copy(source, dest)
1315 self._repo.dirstate.copy(source, dest)
1328 finally:
1316 finally:
1329 wlock.release()
1317 wlock.release()
1330
1318
1331 def _filtersuspectsymlink(self, files):
1319 def _filtersuspectsymlink(self, files):
1332 if not files or self._repo.dirstate._checklink:
1320 if not files or self._repo.dirstate._checklink:
1333 return files
1321 return files
1334
1322
1335 # Symlink placeholders may get non-symlink-like contents
1323 # Symlink placeholders may get non-symlink-like contents
1336 # via user error or dereferencing by NFS or Samba servers,
1324 # via user error or dereferencing by NFS or Samba servers,
1337 # so we filter out any placeholders that don't look like a
1325 # so we filter out any placeholders that don't look like a
1338 # symlink
1326 # symlink
1339 sane = []
1327 sane = []
1340 for f in files:
1328 for f in files:
1341 if self.flags(f) == 'l':
1329 if self.flags(f) == 'l':
1342 d = self[f].data()
1330 d = self[f].data()
1343 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1331 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1344 self._repo.ui.debug('ignoring suspect symlink placeholder'
1332 self._repo.ui.debug('ignoring suspect symlink placeholder'
1345 ' "%s"\n' % f)
1333 ' "%s"\n' % f)
1346 continue
1334 continue
1347 sane.append(f)
1335 sane.append(f)
1348 return sane
1336 return sane
1349
1337
1350 def _checklookup(self, files):
1338 def _checklookup(self, files):
1351 # check for any possibly clean files
1339 # check for any possibly clean files
1352 if not files:
1340 if not files:
1353 return [], []
1341 return [], []
1354
1342
1355 modified = []
1343 modified = []
1356 fixup = []
1344 fixup = []
1357 pctx = self._parents[0]
1345 pctx = self._parents[0]
1358 # do a full compare of any files that might have changed
1346 # do a full compare of any files that might have changed
1359 for f in sorted(files):
1347 for f in sorted(files):
1360 if (f not in pctx or self.flags(f) != pctx.flags(f)
1348 if (f not in pctx or self.flags(f) != pctx.flags(f)
1361 or pctx[f].cmp(self[f])):
1349 or pctx[f].cmp(self[f])):
1362 modified.append(f)
1350 modified.append(f)
1363 else:
1351 else:
1364 fixup.append(f)
1352 fixup.append(f)
1365
1353
1366 # update dirstate for files that are actually clean
1354 # update dirstate for files that are actually clean
1367 if fixup:
1355 if fixup:
1368 try:
1356 try:
1369 # updating the dirstate is optional
1357 # updating the dirstate is optional
1370 # so we don't wait on the lock
1358 # so we don't wait on the lock
1371 # wlock can invalidate the dirstate, so cache normal _after_
1359 # wlock can invalidate the dirstate, so cache normal _after_
1372 # taking the lock
1360 # taking the lock
1373 wlock = self._repo.wlock(False)
1361 wlock = self._repo.wlock(False)
1374 normal = self._repo.dirstate.normal
1362 normal = self._repo.dirstate.normal
1375 try:
1363 try:
1376 for f in fixup:
1364 for f in fixup:
1377 normal(f)
1365 normal(f)
1378 finally:
1366 finally:
1379 wlock.release()
1367 wlock.release()
1380 except error.LockError:
1368 except error.LockError:
1381 pass
1369 pass
1382 return modified, fixup
1370 return modified, fixup
1383
1371
1384 def _manifestmatches(self, match, s):
1372 def _manifestmatches(self, match, s):
1385 """Slow path for workingctx
1373 """Slow path for workingctx
1386
1374
1387 The fast path is when we compare the working directory to its parent
1375 The fast path is when we compare the working directory to its parent
1388 which means this function is comparing with a non-parent; therefore we
1376 which means this function is comparing with a non-parent; therefore we
1389 need to build a manifest and return what matches.
1377 need to build a manifest and return what matches.
1390 """
1378 """
1391 mf = self._repo['.']._manifestmatches(match, s)
1379 mf = self._repo['.']._manifestmatches(match, s)
1392 for f in s.modified + s.added:
1380 for f in s.modified + s.added:
1393 mf[f] = None
1381 mf[f] = None
1394 mf.setflag(f, self.flags(f))
1382 mf.setflag(f, self.flags(f))
1395 for f in s.removed:
1383 for f in s.removed:
1396 if f in mf:
1384 if f in mf:
1397 del mf[f]
1385 del mf[f]
1398 return mf
1386 return mf
1399
1387
1400 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1388 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1401 unknown=False):
1389 unknown=False):
1402 '''Gets the status from the dirstate -- internal use only.'''
1390 '''Gets the status from the dirstate -- internal use only.'''
1403 listignored, listclean, listunknown = ignored, clean, unknown
1391 listignored, listclean, listunknown = ignored, clean, unknown
1404 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1392 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1405 subrepos = []
1393 subrepos = []
1406 if '.hgsub' in self:
1394 if '.hgsub' in self:
1407 subrepos = sorted(self.substate)
1395 subrepos = sorted(self.substate)
1408 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1396 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1409 listclean, listunknown)
1397 listclean, listunknown)
1410
1398
1411 # check for any possibly clean files
1399 # check for any possibly clean files
1412 if cmp:
1400 if cmp:
1413 modified2, fixup = self._checklookup(cmp)
1401 modified2, fixup = self._checklookup(cmp)
1414 s.modified.extend(modified2)
1402 s.modified.extend(modified2)
1415
1403
1416 # update dirstate for files that are actually clean
1404 # update dirstate for files that are actually clean
1417 if fixup and listclean:
1405 if fixup and listclean:
1418 s.clean.extend(fixup)
1406 s.clean.extend(fixup)
1419
1407
1420 return s
1408 return s
1421
1409
1422 def _buildstatus(self, other, s, match, listignored, listclean,
1410 def _buildstatus(self, other, s, match, listignored, listclean,
1423 listunknown):
1411 listunknown):
1424 """build a status with respect to another context
1412 """build a status with respect to another context
1425
1413
1426 This includes logic for maintaining the fast path of status when
1414 This includes logic for maintaining the fast path of status when
1427 comparing the working directory against its parent, which is to skip
1415 comparing the working directory against its parent, which is to skip
1428 building a new manifest if self (working directory) is not comparing
1416 building a new manifest if self (working directory) is not comparing
1429 against its parent (repo['.']).
1417 against its parent (repo['.']).
1430 """
1418 """
1431 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1419 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1432 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1420 # Filter out symlinks that, in the case of FAT32 and NTFS filesytems,
1433 # might have accidentally ended up with the entire contents of the file
1421 # might have accidentally ended up with the entire contents of the file
1434 # they are susposed to be linking to.
1422 # they are susposed to be linking to.
1435 s.modified[:] = self._filtersuspectsymlink(s.modified)
1423 s.modified[:] = self._filtersuspectsymlink(s.modified)
1436 if other != self._repo['.']:
1424 if other != self._repo['.']:
1437 s = super(workingctx, self)._buildstatus(other, s, match,
1425 s = super(workingctx, self)._buildstatus(other, s, match,
1438 listignored, listclean,
1426 listignored, listclean,
1439 listunknown)
1427 listunknown)
1440 self._status = s
1428 self._status = s
1441 return s
1429 return s
1442
1430
1443 def _matchstatus(self, other, match):
1431 def _matchstatus(self, other, match):
1444 """override the match method with a filter for directory patterns
1432 """override the match method with a filter for directory patterns
1445
1433
1446 We use inheritance to customize the match.bad method only in cases of
1434 We use inheritance to customize the match.bad method only in cases of
1447 workingctx since it belongs only to the working directory when
1435 workingctx since it belongs only to the working directory when
1448 comparing against the parent changeset.
1436 comparing against the parent changeset.
1449
1437
1450 If we aren't comparing against the working directory's parent, then we
1438 If we aren't comparing against the working directory's parent, then we
1451 just use the default match object sent to us.
1439 just use the default match object sent to us.
1452 """
1440 """
1453 superself = super(workingctx, self)
1441 superself = super(workingctx, self)
1454 match = superself._matchstatus(other, match)
1442 match = superself._matchstatus(other, match)
1455 if other != self._repo['.']:
1443 if other != self._repo['.']:
1456 def bad(f, msg):
1444 def bad(f, msg):
1457 # 'f' may be a directory pattern from 'match.files()',
1445 # 'f' may be a directory pattern from 'match.files()',
1458 # so 'f not in ctx1' is not enough
1446 # so 'f not in ctx1' is not enough
1459 if f not in other and f not in other.dirs():
1447 if f not in other and f not in other.dirs():
1460 self._repo.ui.warn('%s: %s\n' %
1448 self._repo.ui.warn('%s: %s\n' %
1461 (self._repo.dirstate.pathto(f), msg))
1449 (self._repo.dirstate.pathto(f), msg))
1462 match.bad = bad
1450 match.bad = bad
1463 return match
1451 return match
1464
1452
1465 class committablefilectx(basefilectx):
1453 class committablefilectx(basefilectx):
1466 """A committablefilectx provides common functionality for a file context
1454 """A committablefilectx provides common functionality for a file context
1467 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1455 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1468 def __init__(self, repo, path, filelog=None, ctx=None):
1456 def __init__(self, repo, path, filelog=None, ctx=None):
1469 self._repo = repo
1457 self._repo = repo
1470 self._path = path
1458 self._path = path
1471 self._changeid = None
1459 self._changeid = None
1472 self._filerev = self._filenode = None
1460 self._filerev = self._filenode = None
1473
1461
1474 if filelog is not None:
1462 if filelog is not None:
1475 self._filelog = filelog
1463 self._filelog = filelog
1476 if ctx:
1464 if ctx:
1477 self._changectx = ctx
1465 self._changectx = ctx
1478
1466
1479 def __nonzero__(self):
1467 def __nonzero__(self):
1480 return True
1468 return True
1481
1469
1482 def parents(self):
1470 def parents(self):
1483 '''return parent filectxs, following copies if necessary'''
1471 '''return parent filectxs, following copies if necessary'''
1484 def filenode(ctx, path):
1472 def filenode(ctx, path):
1485 return ctx._manifest.get(path, nullid)
1473 return ctx._manifest.get(path, nullid)
1486
1474
1487 path = self._path
1475 path = self._path
1488 fl = self._filelog
1476 fl = self._filelog
1489 pcl = self._changectx._parents
1477 pcl = self._changectx._parents
1490 renamed = self.renamed()
1478 renamed = self.renamed()
1491
1479
1492 if renamed:
1480 if renamed:
1493 pl = [renamed + (None,)]
1481 pl = [renamed + (None,)]
1494 else:
1482 else:
1495 pl = [(path, filenode(pcl[0], path), fl)]
1483 pl = [(path, filenode(pcl[0], path), fl)]
1496
1484
1497 for pc in pcl[1:]:
1485 for pc in pcl[1:]:
1498 pl.append((path, filenode(pc, path), fl))
1486 pl.append((path, filenode(pc, path), fl))
1499
1487
1500 return [filectx(self._repo, p, fileid=n, filelog=l)
1488 return [filectx(self._repo, p, fileid=n, filelog=l)
1501 for p, n, l in pl if n != nullid]
1489 for p, n, l in pl if n != nullid]
1502
1490
1503 def children(self):
1491 def children(self):
1504 return []
1492 return []
1505
1493
1506 class workingfilectx(committablefilectx):
1494 class workingfilectx(committablefilectx):
1507 """A workingfilectx object makes access to data related to a particular
1495 """A workingfilectx object makes access to data related to a particular
1508 file in the working directory convenient."""
1496 file in the working directory convenient."""
1509 def __init__(self, repo, path, filelog=None, workingctx=None):
1497 def __init__(self, repo, path, filelog=None, workingctx=None):
1510 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1498 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1511
1499
1512 @propertycache
1500 @propertycache
1513 def _changectx(self):
1501 def _changectx(self):
1514 return workingctx(self._repo)
1502 return workingctx(self._repo)
1515
1503
1516 def data(self):
1504 def data(self):
1517 return self._repo.wread(self._path)
1505 return self._repo.wread(self._path)
1518 def renamed(self):
1506 def renamed(self):
1519 rp = self._repo.dirstate.copied(self._path)
1507 rp = self._repo.dirstate.copied(self._path)
1520 if not rp:
1508 if not rp:
1521 return None
1509 return None
1522 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1510 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1523
1511
1524 def size(self):
1512 def size(self):
1525 return self._repo.wvfs.lstat(self._path).st_size
1513 return self._repo.wvfs.lstat(self._path).st_size
1526 def date(self):
1514 def date(self):
1527 t, tz = self._changectx.date()
1515 t, tz = self._changectx.date()
1528 try:
1516 try:
1529 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1517 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1530 except OSError, err:
1518 except OSError, err:
1531 if err.errno != errno.ENOENT:
1519 if err.errno != errno.ENOENT:
1532 raise
1520 raise
1533 return (t, tz)
1521 return (t, tz)
1534
1522
1535 def cmp(self, fctx):
1523 def cmp(self, fctx):
1536 """compare with other file context
1524 """compare with other file context
1537
1525
1538 returns True if different than fctx.
1526 returns True if different than fctx.
1539 """
1527 """
1540 # fctx should be a filectx (not a workingfilectx)
1528 # fctx should be a filectx (not a workingfilectx)
1541 # invert comparison to reuse the same code path
1529 # invert comparison to reuse the same code path
1542 return fctx.cmp(self)
1530 return fctx.cmp(self)
1543
1531
1544 def remove(self, ignoremissing=False):
1532 def remove(self, ignoremissing=False):
1545 """wraps unlink for a repo's working directory"""
1533 """wraps unlink for a repo's working directory"""
1546 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1534 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1547
1535
1548 def write(self, data, flags):
1536 def write(self, data, flags):
1549 """wraps repo.wwrite"""
1537 """wraps repo.wwrite"""
1550 self._repo.wwrite(self._path, data, flags)
1538 self._repo.wwrite(self._path, data, flags)
1551
1539
1552 class memctx(committablectx):
1540 class memctx(committablectx):
1553 """Use memctx to perform in-memory commits via localrepo.commitctx().
1541 """Use memctx to perform in-memory commits via localrepo.commitctx().
1554
1542
1555 Revision information is supplied at initialization time while
1543 Revision information is supplied at initialization time while
1556 related files data and is made available through a callback
1544 related files data and is made available through a callback
1557 mechanism. 'repo' is the current localrepo, 'parents' is a
1545 mechanism. 'repo' is the current localrepo, 'parents' is a
1558 sequence of two parent revisions identifiers (pass None for every
1546 sequence of two parent revisions identifiers (pass None for every
1559 missing parent), 'text' is the commit message and 'files' lists
1547 missing parent), 'text' is the commit message and 'files' lists
1560 names of files touched by the revision (normalized and relative to
1548 names of files touched by the revision (normalized and relative to
1561 repository root).
1549 repository root).
1562
1550
1563 filectxfn(repo, memctx, path) is a callable receiving the
1551 filectxfn(repo, memctx, path) is a callable receiving the
1564 repository, the current memctx object and the normalized path of
1552 repository, the current memctx object and the normalized path of
1565 requested file, relative to repository root. It is fired by the
1553 requested file, relative to repository root. It is fired by the
1566 commit function for every file in 'files', but calls order is
1554 commit function for every file in 'files', but calls order is
1567 undefined. If the file is available in the revision being
1555 undefined. If the file is available in the revision being
1568 committed (updated or added), filectxfn returns a memfilectx
1556 committed (updated or added), filectxfn returns a memfilectx
1569 object. If the file was removed, filectxfn raises an
1557 object. If the file was removed, filectxfn raises an
1570 IOError. Moved files are represented by marking the source file
1558 IOError. Moved files are represented by marking the source file
1571 removed and the new file added with copy information (see
1559 removed and the new file added with copy information (see
1572 memfilectx).
1560 memfilectx).
1573
1561
1574 user receives the committer name and defaults to current
1562 user receives the committer name and defaults to current
1575 repository username, date is the commit date in any format
1563 repository username, date is the commit date in any format
1576 supported by util.parsedate() and defaults to current date, extra
1564 supported by util.parsedate() and defaults to current date, extra
1577 is a dictionary of metadata or is left empty.
1565 is a dictionary of metadata or is left empty.
1578 """
1566 """
1579
1567
1580 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1568 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1581 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1569 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1582 # this field to determine what to do in filectxfn.
1570 # this field to determine what to do in filectxfn.
1583 _returnnoneformissingfiles = True
1571 _returnnoneformissingfiles = True
1584
1572
1585 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1573 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1586 date=None, extra=None, editor=False):
1574 date=None, extra=None, editor=False):
1587 super(memctx, self).__init__(repo, text, user, date, extra)
1575 super(memctx, self).__init__(repo, text, user, date, extra)
1588 self._rev = None
1576 self._rev = None
1589 self._node = None
1577 self._node = None
1590 parents = [(p or nullid) for p in parents]
1578 parents = [(p or nullid) for p in parents]
1591 p1, p2 = parents
1579 p1, p2 = parents
1592 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1580 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1593 files = sorted(set(files))
1581 files = sorted(set(files))
1594 self._status = scmutil.status(files, [], [], [], [], [], [])
1582 self._status = scmutil.status(files, [], [], [], [], [], [])
1595 self._filectxfn = filectxfn
1583 self._filectxfn = filectxfn
1596 self.substate = {}
1584 self.substate = {}
1597
1585
1598 # if store is not callable, wrap it in a function
1586 # if store is not callable, wrap it in a function
1599 if not callable(filectxfn):
1587 if not callable(filectxfn):
1600 def getfilectx(repo, memctx, path):
1588 def getfilectx(repo, memctx, path):
1601 fctx = filectxfn[path]
1589 fctx = filectxfn[path]
1602 # this is weird but apparently we only keep track of one parent
1590 # this is weird but apparently we only keep track of one parent
1603 # (why not only store that instead of a tuple?)
1591 # (why not only store that instead of a tuple?)
1604 copied = fctx.renamed()
1592 copied = fctx.renamed()
1605 if copied:
1593 if copied:
1606 copied = copied[0]
1594 copied = copied[0]
1607 return memfilectx(repo, path, fctx.data(),
1595 return memfilectx(repo, path, fctx.data(),
1608 islink=fctx.islink(), isexec=fctx.isexec(),
1596 islink=fctx.islink(), isexec=fctx.isexec(),
1609 copied=copied, memctx=memctx)
1597 copied=copied, memctx=memctx)
1610 self._filectxfn = getfilectx
1598 self._filectxfn = getfilectx
1611
1599
1612 self._extra = extra and extra.copy() or {}
1600 self._extra = extra and extra.copy() or {}
1613 if self._extra.get('branch', '') == '':
1601 if self._extra.get('branch', '') == '':
1614 self._extra['branch'] = 'default'
1602 self._extra['branch'] = 'default'
1615
1603
1616 if editor:
1604 if editor:
1617 self._text = editor(self._repo, self, [])
1605 self._text = editor(self._repo, self, [])
1618 self._repo.savecommitmessage(self._text)
1606 self._repo.savecommitmessage(self._text)
1619
1607
1620 def filectx(self, path, filelog=None):
1608 def filectx(self, path, filelog=None):
1621 """get a file context from the working directory
1609 """get a file context from the working directory
1622
1610
1623 Returns None if file doesn't exist and should be removed."""
1611 Returns None if file doesn't exist and should be removed."""
1624 return self._filectxfn(self._repo, self, path)
1612 return self._filectxfn(self._repo, self, path)
1625
1613
1626 def commit(self):
1614 def commit(self):
1627 """commit context to the repo"""
1615 """commit context to the repo"""
1628 return self._repo.commitctx(self)
1616 return self._repo.commitctx(self)
1629
1617
1630 @propertycache
1618 @propertycache
1631 def _manifest(self):
1619 def _manifest(self):
1632 """generate a manifest based on the return values of filectxfn"""
1620 """generate a manifest based on the return values of filectxfn"""
1633
1621
1634 # keep this simple for now; just worry about p1
1622 # keep this simple for now; just worry about p1
1635 pctx = self._parents[0]
1623 pctx = self._parents[0]
1636 man = pctx.manifest().copy()
1624 man = pctx.manifest().copy()
1637
1625
1638 for f, fnode in man.iteritems():
1626 for f, fnode in man.iteritems():
1639 p1node = nullid
1627 p1node = nullid
1640 p2node = nullid
1628 p2node = nullid
1641 p = pctx[f].parents() # if file isn't in pctx, check p2?
1629 p = pctx[f].parents() # if file isn't in pctx, check p2?
1642 if len(p) > 0:
1630 if len(p) > 0:
1643 p1node = p[0].node()
1631 p1node = p[0].node()
1644 if len(p) > 1:
1632 if len(p) > 1:
1645 p2node = p[1].node()
1633 p2node = p[1].node()
1646 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1634 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1647
1635
1648 return man
1636 return man
1649
1637
1650
1638
1651 class memfilectx(committablefilectx):
1639 class memfilectx(committablefilectx):
1652 """memfilectx represents an in-memory file to commit.
1640 """memfilectx represents an in-memory file to commit.
1653
1641
1654 See memctx and committablefilectx for more details.
1642 See memctx and committablefilectx for more details.
1655 """
1643 """
1656 def __init__(self, repo, path, data, islink=False,
1644 def __init__(self, repo, path, data, islink=False,
1657 isexec=False, copied=None, memctx=None):
1645 isexec=False, copied=None, memctx=None):
1658 """
1646 """
1659 path is the normalized file path relative to repository root.
1647 path is the normalized file path relative to repository root.
1660 data is the file content as a string.
1648 data is the file content as a string.
1661 islink is True if the file is a symbolic link.
1649 islink is True if the file is a symbolic link.
1662 isexec is True if the file is executable.
1650 isexec is True if the file is executable.
1663 copied is the source file path if current file was copied in the
1651 copied is the source file path if current file was copied in the
1664 revision being committed, or None."""
1652 revision being committed, or None."""
1665 super(memfilectx, self).__init__(repo, path, None, memctx)
1653 super(memfilectx, self).__init__(repo, path, None, memctx)
1666 self._data = data
1654 self._data = data
1667 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1655 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1668 self._copied = None
1656 self._copied = None
1669 if copied:
1657 if copied:
1670 self._copied = (copied, nullid)
1658 self._copied = (copied, nullid)
1671
1659
1672 def data(self):
1660 def data(self):
1673 return self._data
1661 return self._data
1674 def size(self):
1662 def size(self):
1675 return len(self.data())
1663 return len(self.data())
1676 def flags(self):
1664 def flags(self):
1677 return self._flags
1665 return self._flags
1678 def renamed(self):
1666 def renamed(self):
1679 return self._copied
1667 return self._copied
1680
1668
1681 def remove(self, ignoremissing=False):
1669 def remove(self, ignoremissing=False):
1682 """wraps unlink for a repo's working directory"""
1670 """wraps unlink for a repo's working directory"""
1683 # need to figure out what to do here
1671 # need to figure out what to do here
1684 del self._changectx[self._path]
1672 del self._changectx[self._path]
1685
1673
1686 def write(self, data, flags):
1674 def write(self, data, flags):
1687 """wraps repo.wwrite"""
1675 """wraps repo.wwrite"""
1688 self._data = data
1676 self._data = data
@@ -1,268 +1,284 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self, mapping=None, flags=None):
13 def __init__(self, mapping=None, flags=None):
14 if mapping is None:
14 if mapping is None:
15 mapping = {}
15 mapping = {}
16 if flags is None:
16 if flags is None:
17 flags = {}
17 flags = {}
18 dict.__init__(self, mapping)
18 dict.__init__(self, mapping)
19 self._flags = flags
19 self._flags = flags
20 def flags(self, f):
20 def flags(self, f):
21 return self._flags.get(f, "")
21 return self._flags.get(f, "")
22 def withflags(self):
22 def withflags(self):
23 return set(self._flags.keys())
23 return set(self._flags.keys())
24 def setflag(self, f, flags):
24 def setflag(self, f, flags):
25 """Set the flags (symlink, executable) for path f."""
25 """Set the flags (symlink, executable) for path f."""
26 self._flags[f] = flags
26 self._flags[f] = flags
27 def copy(self):
27 def copy(self):
28 return manifestdict(self, dict.copy(self._flags))
28 return manifestdict(self, dict.copy(self._flags))
29 def intersectfiles(self, files):
29 def intersectfiles(self, files):
30 '''make a new manifestdict with the intersection of self with files
30 '''make a new manifestdict with the intersection of self with files
31
31
32 The algorithm assumes that files is much smaller than self.'''
32 The algorithm assumes that files is much smaller than self.'''
33 ret = manifestdict()
33 ret = manifestdict()
34 for fn in files:
34 for fn in files:
35 if fn in self:
35 if fn in self:
36 ret[fn] = self[fn]
36 ret[fn] = self[fn]
37 flags = self._flags.get(fn, None)
37 flags = self._flags.get(fn, None)
38 if flags:
38 if flags:
39 ret._flags[fn] = flags
39 ret._flags[fn] = flags
40 return ret
40 return ret
41
41
42 def matches(self, match):
43 '''generate a new manifest filtered by the match argument'''
44 if match.always():
45 return self.copy()
46
47 files = match.files()
48 if (match.matchfn == match.exact or
49 (not match.anypats() and util.all(fn in self for fn in files))):
50 return self.intersectfiles(files)
51
52 mf = self.copy()
53 for fn in mf.keys():
54 if not match(fn):
55 del mf[fn]
56 return mf
57
42 def diff(self, m2):
58 def diff(self, m2):
43 '''Finds changes between the current manifest and m2. The result is
59 '''Finds changes between the current manifest and m2. The result is
44 returned as a dict with filename as key and values of the form
60 returned as a dict with filename as key and values of the form
45 ((n1,fl1),(n2,fl2)), where n1/n2 is the nodeid in the current/other
61 ((n1,fl1),(n2,fl2)), where n1/n2 is the nodeid in the current/other
46 manifest and fl1/fl2 is the flag in the current/other manifest. Where
62 manifest and fl1/fl2 is the flag in the current/other manifest. Where
47 the file does not exist, the nodeid will be None and the flags will be
63 the file does not exist, the nodeid will be None and the flags will be
48 the empty string.'''
64 the empty string.'''
49 diff = {}
65 diff = {}
50
66
51 for fn, n1 in self.iteritems():
67 for fn, n1 in self.iteritems():
52 fl1 = self._flags.get(fn, '')
68 fl1 = self._flags.get(fn, '')
53 n2 = m2.get(fn, None)
69 n2 = m2.get(fn, None)
54 fl2 = m2._flags.get(fn, '')
70 fl2 = m2._flags.get(fn, '')
55 if n2 is None:
71 if n2 is None:
56 fl2 = ''
72 fl2 = ''
57 if n1 != n2 or fl1 != fl2:
73 if n1 != n2 or fl1 != fl2:
58 diff[fn] = ((n1, fl1), (n2, fl2))
74 diff[fn] = ((n1, fl1), (n2, fl2))
59
75
60 for fn, n2 in m2.iteritems():
76 for fn, n2 in m2.iteritems():
61 if fn not in self:
77 if fn not in self:
62 fl2 = m2._flags.get(fn, '')
78 fl2 = m2._flags.get(fn, '')
63 diff[fn] = ((None, ''), (n2, fl2))
79 diff[fn] = ((None, ''), (n2, fl2))
64
80
65 return diff
81 return diff
66
82
67 def text(self):
83 def text(self):
68 """Get the full data of this manifest as a bytestring."""
84 """Get the full data of this manifest as a bytestring."""
69 fl = sorted(self)
85 fl = sorted(self)
70 _checkforbidden(fl)
86 _checkforbidden(fl)
71
87
72 hex, flags = revlog.hex, self.flags
88 hex, flags = revlog.hex, self.flags
73 # if this is changed to support newlines in filenames,
89 # if this is changed to support newlines in filenames,
74 # be sure to check the templates/ dir again (especially *-raw.tmpl)
90 # be sure to check the templates/ dir again (especially *-raw.tmpl)
75 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
91 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
76
92
77 def fastdelta(self, base, changes):
93 def fastdelta(self, base, changes):
78 """Given a base manifest text as an array.array and a list of changes
94 """Given a base manifest text as an array.array and a list of changes
79 relative to that text, compute a delta that can be used by revlog.
95 relative to that text, compute a delta that can be used by revlog.
80 """
96 """
81 delta = []
97 delta = []
82 dstart = None
98 dstart = None
83 dend = None
99 dend = None
84 dline = [""]
100 dline = [""]
85 start = 0
101 start = 0
86 # zero copy representation of base as a buffer
102 # zero copy representation of base as a buffer
87 addbuf = util.buffer(base)
103 addbuf = util.buffer(base)
88
104
89 # start with a readonly loop that finds the offset of
105 # start with a readonly loop that finds the offset of
90 # each line and creates the deltas
106 # each line and creates the deltas
91 for f, todelete in changes:
107 for f, todelete in changes:
92 # bs will either be the index of the item or the insert point
108 # bs will either be the index of the item or the insert point
93 start, end = _msearch(addbuf, f, start)
109 start, end = _msearch(addbuf, f, start)
94 if not todelete:
110 if not todelete:
95 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
111 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
96 else:
112 else:
97 if start == end:
113 if start == end:
98 # item we want to delete was not found, error out
114 # item we want to delete was not found, error out
99 raise AssertionError(
115 raise AssertionError(
100 _("failed to remove %s from manifest") % f)
116 _("failed to remove %s from manifest") % f)
101 l = ""
117 l = ""
102 if dstart is not None and dstart <= start and dend >= start:
118 if dstart is not None and dstart <= start and dend >= start:
103 if dend < end:
119 if dend < end:
104 dend = end
120 dend = end
105 if l:
121 if l:
106 dline.append(l)
122 dline.append(l)
107 else:
123 else:
108 if dstart is not None:
124 if dstart is not None:
109 delta.append([dstart, dend, "".join(dline)])
125 delta.append([dstart, dend, "".join(dline)])
110 dstart = start
126 dstart = start
111 dend = end
127 dend = end
112 dline = [l]
128 dline = [l]
113
129
114 if dstart is not None:
130 if dstart is not None:
115 delta.append([dstart, dend, "".join(dline)])
131 delta.append([dstart, dend, "".join(dline)])
116 # apply the delta to the base, and get a delta for addrevision
132 # apply the delta to the base, and get a delta for addrevision
117 deltatext, arraytext = _addlistdelta(base, delta)
133 deltatext, arraytext = _addlistdelta(base, delta)
118 return arraytext, deltatext
134 return arraytext, deltatext
119
135
120 def _msearch(m, s, lo=0, hi=None):
136 def _msearch(m, s, lo=0, hi=None):
121 '''return a tuple (start, end) that says where to find s within m.
137 '''return a tuple (start, end) that says where to find s within m.
122
138
123 If the string is found m[start:end] are the line containing
139 If the string is found m[start:end] are the line containing
124 that string. If start == end the string was not found and
140 that string. If start == end the string was not found and
125 they indicate the proper sorted insertion point.
141 they indicate the proper sorted insertion point.
126
142
127 m should be a buffer or a string
143 m should be a buffer or a string
128 s is a string'''
144 s is a string'''
129 def advance(i, c):
145 def advance(i, c):
130 while i < lenm and m[i] != c:
146 while i < lenm and m[i] != c:
131 i += 1
147 i += 1
132 return i
148 return i
133 if not s:
149 if not s:
134 return (lo, lo)
150 return (lo, lo)
135 lenm = len(m)
151 lenm = len(m)
136 if not hi:
152 if not hi:
137 hi = lenm
153 hi = lenm
138 while lo < hi:
154 while lo < hi:
139 mid = (lo + hi) // 2
155 mid = (lo + hi) // 2
140 start = mid
156 start = mid
141 while start > 0 and m[start - 1] != '\n':
157 while start > 0 and m[start - 1] != '\n':
142 start -= 1
158 start -= 1
143 end = advance(start, '\0')
159 end = advance(start, '\0')
144 if m[start:end] < s:
160 if m[start:end] < s:
145 # we know that after the null there are 40 bytes of sha1
161 # we know that after the null there are 40 bytes of sha1
146 # this translates to the bisect lo = mid + 1
162 # this translates to the bisect lo = mid + 1
147 lo = advance(end + 40, '\n') + 1
163 lo = advance(end + 40, '\n') + 1
148 else:
164 else:
149 # this translates to the bisect hi = mid
165 # this translates to the bisect hi = mid
150 hi = start
166 hi = start
151 end = advance(lo, '\0')
167 end = advance(lo, '\0')
152 found = m[lo:end]
168 found = m[lo:end]
153 if s == found:
169 if s == found:
154 # we know that after the null there are 40 bytes of sha1
170 # we know that after the null there are 40 bytes of sha1
155 end = advance(end + 40, '\n')
171 end = advance(end + 40, '\n')
156 return (lo, end + 1)
172 return (lo, end + 1)
157 else:
173 else:
158 return (lo, lo)
174 return (lo, lo)
159
175
160 def _checkforbidden(l):
176 def _checkforbidden(l):
161 """Check filenames for illegal characters."""
177 """Check filenames for illegal characters."""
162 for f in l:
178 for f in l:
163 if '\n' in f or '\r' in f:
179 if '\n' in f or '\r' in f:
164 raise error.RevlogError(
180 raise error.RevlogError(
165 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
181 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
166
182
167
183
168 # apply the changes collected during the bisect loop to our addlist
184 # apply the changes collected during the bisect loop to our addlist
169 # return a delta suitable for addrevision
185 # return a delta suitable for addrevision
170 def _addlistdelta(addlist, x):
186 def _addlistdelta(addlist, x):
171 # for large addlist arrays, building a new array is cheaper
187 # for large addlist arrays, building a new array is cheaper
172 # than repeatedly modifying the existing one
188 # than repeatedly modifying the existing one
173 currentposition = 0
189 currentposition = 0
174 newaddlist = array.array('c')
190 newaddlist = array.array('c')
175
191
176 for start, end, content in x:
192 for start, end, content in x:
177 newaddlist += addlist[currentposition:start]
193 newaddlist += addlist[currentposition:start]
178 if content:
194 if content:
179 newaddlist += array.array('c', content)
195 newaddlist += array.array('c', content)
180
196
181 currentposition = end
197 currentposition = end
182
198
183 newaddlist += addlist[currentposition:]
199 newaddlist += addlist[currentposition:]
184
200
185 deltatext = "".join(struct.pack(">lll", start, end, len(content))
201 deltatext = "".join(struct.pack(">lll", start, end, len(content))
186 + content for start, end, content in x)
202 + content for start, end, content in x)
187 return deltatext, newaddlist
203 return deltatext, newaddlist
188
204
189 def _parse(lines):
205 def _parse(lines):
190 mfdict = manifestdict()
206 mfdict = manifestdict()
191 parsers.parse_manifest(mfdict, mfdict._flags, lines)
207 parsers.parse_manifest(mfdict, mfdict._flags, lines)
192 return mfdict
208 return mfdict
193
209
194 class manifest(revlog.revlog):
210 class manifest(revlog.revlog):
195 def __init__(self, opener):
211 def __init__(self, opener):
196 # we expect to deal with not more than four revs at a time,
212 # we expect to deal with not more than four revs at a time,
197 # during a commit --amend
213 # during a commit --amend
198 self._mancache = util.lrucachedict(4)
214 self._mancache = util.lrucachedict(4)
199 revlog.revlog.__init__(self, opener, "00manifest.i")
215 revlog.revlog.__init__(self, opener, "00manifest.i")
200
216
201 def readdelta(self, node):
217 def readdelta(self, node):
202 r = self.rev(node)
218 r = self.rev(node)
203 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
219 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
204
220
205 def readfast(self, node):
221 def readfast(self, node):
206 '''use the faster of readdelta or read'''
222 '''use the faster of readdelta or read'''
207 r = self.rev(node)
223 r = self.rev(node)
208 deltaparent = self.deltaparent(r)
224 deltaparent = self.deltaparent(r)
209 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
225 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
210 return self.readdelta(node)
226 return self.readdelta(node)
211 return self.read(node)
227 return self.read(node)
212
228
213 def read(self, node):
229 def read(self, node):
214 if node == revlog.nullid:
230 if node == revlog.nullid:
215 return manifestdict() # don't upset local cache
231 return manifestdict() # don't upset local cache
216 if node in self._mancache:
232 if node in self._mancache:
217 return self._mancache[node][0]
233 return self._mancache[node][0]
218 text = self.revision(node)
234 text = self.revision(node)
219 arraytext = array.array('c', text)
235 arraytext = array.array('c', text)
220 mapping = _parse(text)
236 mapping = _parse(text)
221 self._mancache[node] = (mapping, arraytext)
237 self._mancache[node] = (mapping, arraytext)
222 return mapping
238 return mapping
223
239
224 def find(self, node, f):
240 def find(self, node, f):
225 '''look up entry for a single file efficiently.
241 '''look up entry for a single file efficiently.
226 return (node, flags) pair if found, (None, None) if not.'''
242 return (node, flags) pair if found, (None, None) if not.'''
227 if node in self._mancache:
243 if node in self._mancache:
228 mapping = self._mancache[node][0]
244 mapping = self._mancache[node][0]
229 return mapping.get(f), mapping.flags(f)
245 return mapping.get(f), mapping.flags(f)
230 text = self.revision(node)
246 text = self.revision(node)
231 start, end = _msearch(text, f)
247 start, end = _msearch(text, f)
232 if start == end:
248 if start == end:
233 return None, None
249 return None, None
234 l = text[start:end]
250 l = text[start:end]
235 f, n = l.split('\0')
251 f, n = l.split('\0')
236 return revlog.bin(n[:40]), n[40:-1]
252 return revlog.bin(n[:40]), n[40:-1]
237
253
238 def add(self, map, transaction, link, p1, p2, added, removed):
254 def add(self, map, transaction, link, p1, p2, added, removed):
239 if p1 in self._mancache:
255 if p1 in self._mancache:
240 # If our first parent is in the manifest cache, we can
256 # If our first parent is in the manifest cache, we can
241 # compute a delta here using properties we know about the
257 # compute a delta here using properties we know about the
242 # manifest up-front, which may save time later for the
258 # manifest up-front, which may save time later for the
243 # revlog layer.
259 # revlog layer.
244
260
245 _checkforbidden(added)
261 _checkforbidden(added)
246 # combine the changed lists into one list for sorting
262 # combine the changed lists into one list for sorting
247 work = [(x, False) for x in added]
263 work = [(x, False) for x in added]
248 work.extend((x, True) for x in removed)
264 work.extend((x, True) for x in removed)
249 # this could use heapq.merge() (from Python 2.6+) or equivalent
265 # this could use heapq.merge() (from Python 2.6+) or equivalent
250 # since the lists are already sorted
266 # since the lists are already sorted
251 work.sort()
267 work.sort()
252
268
253 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
269 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
254 cachedelta = self.rev(p1), deltatext
270 cachedelta = self.rev(p1), deltatext
255 text = util.buffer(arraytext)
271 text = util.buffer(arraytext)
256 else:
272 else:
257 # The first parent manifest isn't already loaded, so we'll
273 # The first parent manifest isn't already loaded, so we'll
258 # just encode a fulltext of the manifest and pass that
274 # just encode a fulltext of the manifest and pass that
259 # through to the revlog layer, and let it handle the delta
275 # through to the revlog layer, and let it handle the delta
260 # process.
276 # process.
261 text = map.text()
277 text = map.text()
262 arraytext = array.array('c', text)
278 arraytext = array.array('c', text)
263 cachedelta = None
279 cachedelta = None
264
280
265 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
281 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
266 self._mancache[n] = (map, arraytext)
282 self._mancache[n] = (map, arraytext)
267
283
268 return n
284 return n
General Comments 0
You need to be logged in to leave comments. Login now