##// END OF EJS Templates
manifest: adds manifestctx.readfast...
Durham Goode -
r29939:80be4436 default
parent child Browse files
Show More
@@ -1,1985 +1,1985 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.changelogrevision(self.rev())
527 return self._repo.changelog.changelogrevision(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifestlog[self._changeset.manifest].read()
531 return self._repo.manifestlog[self._changeset.manifest].read()
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 mfnode = self._changeset.manifest
535 mfnode = self._changeset.manifest
536 return self._repo.manifestlog[mfnode].readdelta()
536 return self._repo.manifestlog[mfnode].readdelta()
537
537
538 @propertycache
538 @propertycache
539 def _parents(self):
539 def _parents(self):
540 repo = self._repo
540 repo = self._repo
541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1)]
543 return [changectx(repo, p1)]
544 return [changectx(repo, p1), changectx(repo, p2)]
544 return [changectx(repo, p1), changectx(repo, p2)]
545
545
546 def changeset(self):
546 def changeset(self):
547 c = self._changeset
547 c = self._changeset
548 return (
548 return (
549 c.manifest,
549 c.manifest,
550 c.user,
550 c.user,
551 c.date,
551 c.date,
552 c.files,
552 c.files,
553 c.description,
553 c.description,
554 c.extra,
554 c.extra,
555 )
555 )
556 def manifestnode(self):
556 def manifestnode(self):
557 return self._changeset.manifest
557 return self._changeset.manifest
558
558
559 def user(self):
559 def user(self):
560 return self._changeset.user
560 return self._changeset.user
561 def date(self):
561 def date(self):
562 return self._changeset.date
562 return self._changeset.date
563 def files(self):
563 def files(self):
564 return self._changeset.files
564 return self._changeset.files
565 def description(self):
565 def description(self):
566 return self._changeset.description
566 return self._changeset.description
567 def branch(self):
567 def branch(self):
568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 def closesbranch(self):
569 def closesbranch(self):
570 return 'close' in self._changeset.extra
570 return 'close' in self._changeset.extra
571 def extra(self):
571 def extra(self):
572 return self._changeset.extra
572 return self._changeset.extra
573 def tags(self):
573 def tags(self):
574 return self._repo.nodetags(self._node)
574 return self._repo.nodetags(self._node)
575 def bookmarks(self):
575 def bookmarks(self):
576 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
577 def phase(self):
577 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
579 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
581
582 def children(self):
582 def children(self):
583 """return contexts for each child changeset"""
583 """return contexts for each child changeset"""
584 c = self._repo.changelog.children(self._node)
584 c = self._repo.changelog.children(self._node)
585 return [changectx(self._repo, x) for x in c]
585 return [changectx(self._repo, x) for x in c]
586
586
587 def ancestors(self):
587 def ancestors(self):
588 for a in self._repo.changelog.ancestors([self._rev]):
588 for a in self._repo.changelog.ancestors([self._rev]):
589 yield changectx(self._repo, a)
589 yield changectx(self._repo, a)
590
590
591 def descendants(self):
591 def descendants(self):
592 for d in self._repo.changelog.descendants([self._rev]):
592 for d in self._repo.changelog.descendants([self._rev]):
593 yield changectx(self._repo, d)
593 yield changectx(self._repo, d)
594
594
595 def filectx(self, path, fileid=None, filelog=None):
595 def filectx(self, path, fileid=None, filelog=None):
596 """get a file context from this changeset"""
596 """get a file context from this changeset"""
597 if fileid is None:
597 if fileid is None:
598 fileid = self.filenode(path)
598 fileid = self.filenode(path)
599 return filectx(self._repo, path, fileid=fileid,
599 return filectx(self._repo, path, fileid=fileid,
600 changectx=self, filelog=filelog)
600 changectx=self, filelog=filelog)
601
601
602 def ancestor(self, c2, warn=False):
602 def ancestor(self, c2, warn=False):
603 """return the "best" ancestor context of self and c2
603 """return the "best" ancestor context of self and c2
604
604
605 If there are multiple candidates, it will show a message and check
605 If there are multiple candidates, it will show a message and check
606 merge.preferancestor configuration before falling back to the
606 merge.preferancestor configuration before falling back to the
607 revlog ancestor."""
607 revlog ancestor."""
608 # deal with workingctxs
608 # deal with workingctxs
609 n2 = c2._node
609 n2 = c2._node
610 if n2 is None:
610 if n2 is None:
611 n2 = c2._parents[0]._node
611 n2 = c2._parents[0]._node
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 if not cahs:
613 if not cahs:
614 anc = nullid
614 anc = nullid
615 elif len(cahs) == 1:
615 elif len(cahs) == 1:
616 anc = cahs[0]
616 anc = cahs[0]
617 else:
617 else:
618 # experimental config: merge.preferancestor
618 # experimental config: merge.preferancestor
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 try:
620 try:
621 ctx = changectx(self._repo, r)
621 ctx = changectx(self._repo, r)
622 except error.RepoLookupError:
622 except error.RepoLookupError:
623 continue
623 continue
624 anc = ctx.node()
624 anc = ctx.node()
625 if anc in cahs:
625 if anc in cahs:
626 break
626 break
627 else:
627 else:
628 anc = self._repo.changelog.ancestor(self._node, n2)
628 anc = self._repo.changelog.ancestor(self._node, n2)
629 if warn:
629 if warn:
630 self._repo.ui.status(
630 self._repo.ui.status(
631 (_("note: using %s as ancestor of %s and %s\n") %
631 (_("note: using %s as ancestor of %s and %s\n") %
632 (short(anc), short(self._node), short(n2))) +
632 (short(anc), short(self._node), short(n2))) +
633 ''.join(_(" alternatively, use --config "
633 ''.join(_(" alternatively, use --config "
634 "merge.preferancestor=%s\n") %
634 "merge.preferancestor=%s\n") %
635 short(n) for n in sorted(cahs) if n != anc))
635 short(n) for n in sorted(cahs) if n != anc))
636 return changectx(self._repo, anc)
636 return changectx(self._repo, anc)
637
637
638 def descendant(self, other):
638 def descendant(self, other):
639 """True if other is descendant of this changeset"""
639 """True if other is descendant of this changeset"""
640 return self._repo.changelog.descendant(self._rev, other._rev)
640 return self._repo.changelog.descendant(self._rev, other._rev)
641
641
642 def walk(self, match):
642 def walk(self, match):
643 '''Generates matching file names.'''
643 '''Generates matching file names.'''
644
644
645 # Wrap match.bad method to have message with nodeid
645 # Wrap match.bad method to have message with nodeid
646 def bad(fn, msg):
646 def bad(fn, msg):
647 # The manifest doesn't know about subrepos, so don't complain about
647 # The manifest doesn't know about subrepos, so don't complain about
648 # paths into valid subrepos.
648 # paths into valid subrepos.
649 if any(fn == s or fn.startswith(s + '/')
649 if any(fn == s or fn.startswith(s + '/')
650 for s in self.substate):
650 for s in self.substate):
651 return
651 return
652 match.bad(fn, _('no such file in rev %s') % self)
652 match.bad(fn, _('no such file in rev %s') % self)
653
653
654 m = matchmod.badmatch(match, bad)
654 m = matchmod.badmatch(match, bad)
655 return self._manifest.walk(m)
655 return self._manifest.walk(m)
656
656
657 def matches(self, match):
657 def matches(self, match):
658 return self.walk(match)
658 return self.walk(match)
659
659
660 class basefilectx(object):
660 class basefilectx(object):
661 """A filecontext object represents the common logic for its children:
661 """A filecontext object represents the common logic for its children:
662 filectx: read-only access to a filerevision that is already present
662 filectx: read-only access to a filerevision that is already present
663 in the repo,
663 in the repo,
664 workingfilectx: a filecontext that represents files from the working
664 workingfilectx: a filecontext that represents files from the working
665 directory,
665 directory,
666 memfilectx: a filecontext that represents files in-memory."""
666 memfilectx: a filecontext that represents files in-memory."""
667 def __new__(cls, repo, path, *args, **kwargs):
667 def __new__(cls, repo, path, *args, **kwargs):
668 return super(basefilectx, cls).__new__(cls)
668 return super(basefilectx, cls).__new__(cls)
669
669
670 @propertycache
670 @propertycache
671 def _filelog(self):
671 def _filelog(self):
672 return self._repo.file(self._path)
672 return self._repo.file(self._path)
673
673
674 @propertycache
674 @propertycache
675 def _changeid(self):
675 def _changeid(self):
676 if '_changeid' in self.__dict__:
676 if '_changeid' in self.__dict__:
677 return self._changeid
677 return self._changeid
678 elif '_changectx' in self.__dict__:
678 elif '_changectx' in self.__dict__:
679 return self._changectx.rev()
679 return self._changectx.rev()
680 elif '_descendantrev' in self.__dict__:
680 elif '_descendantrev' in self.__dict__:
681 # this file context was created from a revision with a known
681 # this file context was created from a revision with a known
682 # descendant, we can (lazily) correct for linkrev aliases
682 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._path, self._filelog,
683 return self._adjustlinkrev(self._path, self._filelog,
684 self._filenode, self._descendantrev)
684 self._filenode, self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if '_fileid' in self.__dict__:
690 if '_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 def __str__(self):
711 def __str__(self):
712 return "%s@%s" % (self.path(), self._changectx)
712 return "%s@%s" % (self.path(), self._changectx)
713
713
714 def __repr__(self):
714 def __repr__(self):
715 return "<%s %s>" % (type(self).__name__, str(self))
715 return "<%s %s>" % (type(self).__name__, str(self))
716
716
717 def __hash__(self):
717 def __hash__(self):
718 try:
718 try:
719 return hash((self._path, self._filenode))
719 return hash((self._path, self._filenode))
720 except AttributeError:
720 except AttributeError:
721 return id(self)
721 return id(self)
722
722
723 def __eq__(self, other):
723 def __eq__(self, other):
724 try:
724 try:
725 return (type(self) == type(other) and self._path == other._path
725 return (type(self) == type(other) and self._path == other._path
726 and self._filenode == other._filenode)
726 and self._filenode == other._filenode)
727 except AttributeError:
727 except AttributeError:
728 return False
728 return False
729
729
730 def __ne__(self, other):
730 def __ne__(self, other):
731 return not (self == other)
731 return not (self == other)
732
732
733 def filerev(self):
733 def filerev(self):
734 return self._filerev
734 return self._filerev
735 def filenode(self):
735 def filenode(self):
736 return self._filenode
736 return self._filenode
737 def flags(self):
737 def flags(self):
738 return self._changectx.flags(self._path)
738 return self._changectx.flags(self._path)
739 def filelog(self):
739 def filelog(self):
740 return self._filelog
740 return self._filelog
741 def rev(self):
741 def rev(self):
742 return self._changeid
742 return self._changeid
743 def linkrev(self):
743 def linkrev(self):
744 return self._filelog.linkrev(self._filerev)
744 return self._filelog.linkrev(self._filerev)
745 def node(self):
745 def node(self):
746 return self._changectx.node()
746 return self._changectx.node()
747 def hex(self):
747 def hex(self):
748 return self._changectx.hex()
748 return self._changectx.hex()
749 def user(self):
749 def user(self):
750 return self._changectx.user()
750 return self._changectx.user()
751 def date(self):
751 def date(self):
752 return self._changectx.date()
752 return self._changectx.date()
753 def files(self):
753 def files(self):
754 return self._changectx.files()
754 return self._changectx.files()
755 def description(self):
755 def description(self):
756 return self._changectx.description()
756 return self._changectx.description()
757 def branch(self):
757 def branch(self):
758 return self._changectx.branch()
758 return self._changectx.branch()
759 def extra(self):
759 def extra(self):
760 return self._changectx.extra()
760 return self._changectx.extra()
761 def phase(self):
761 def phase(self):
762 return self._changectx.phase()
762 return self._changectx.phase()
763 def phasestr(self):
763 def phasestr(self):
764 return self._changectx.phasestr()
764 return self._changectx.phasestr()
765 def manifest(self):
765 def manifest(self):
766 return self._changectx.manifest()
766 return self._changectx.manifest()
767 def changectx(self):
767 def changectx(self):
768 return self._changectx
768 return self._changectx
769 def repo(self):
769 def repo(self):
770 return self._repo
770 return self._repo
771
771
772 def path(self):
772 def path(self):
773 return self._path
773 return self._path
774
774
775 def isbinary(self):
775 def isbinary(self):
776 try:
776 try:
777 return util.binary(self.data())
777 return util.binary(self.data())
778 except IOError:
778 except IOError:
779 return False
779 return False
780 def isexec(self):
780 def isexec(self):
781 return 'x' in self.flags()
781 return 'x' in self.flags()
782 def islink(self):
782 def islink(self):
783 return 'l' in self.flags()
783 return 'l' in self.flags()
784
784
785 def isabsent(self):
785 def isabsent(self):
786 """whether this filectx represents a file not in self._changectx
786 """whether this filectx represents a file not in self._changectx
787
787
788 This is mainly for merge code to detect change/delete conflicts. This is
788 This is mainly for merge code to detect change/delete conflicts. This is
789 expected to be True for all subclasses of basectx."""
789 expected to be True for all subclasses of basectx."""
790 return False
790 return False
791
791
792 _customcmp = False
792 _customcmp = False
793 def cmp(self, fctx):
793 def cmp(self, fctx):
794 """compare with other file context
794 """compare with other file context
795
795
796 returns True if different than fctx.
796 returns True if different than fctx.
797 """
797 """
798 if fctx._customcmp:
798 if fctx._customcmp:
799 return fctx.cmp(self)
799 return fctx.cmp(self)
800
800
801 if (fctx._filenode is None
801 if (fctx._filenode is None
802 and (self._repo._encodefilterpats
802 and (self._repo._encodefilterpats
803 # if file data starts with '\1\n', empty metadata block is
803 # if file data starts with '\1\n', empty metadata block is
804 # prepended, which adds 4 bytes to filelog.size().
804 # prepended, which adds 4 bytes to filelog.size().
805 or self.size() - 4 == fctx.size())
805 or self.size() - 4 == fctx.size())
806 or self.size() == fctx.size()):
806 or self.size() == fctx.size()):
807 return self._filelog.cmp(self._filenode, fctx.data())
807 return self._filelog.cmp(self._filenode, fctx.data())
808
808
809 return True
809 return True
810
810
811 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
811 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
812 """return the first ancestor of <srcrev> introducing <fnode>
812 """return the first ancestor of <srcrev> introducing <fnode>
813
813
814 If the linkrev of the file revision does not point to an ancestor of
814 If the linkrev of the file revision does not point to an ancestor of
815 srcrev, we'll walk down the ancestors until we find one introducing
815 srcrev, we'll walk down the ancestors until we find one introducing
816 this file revision.
816 this file revision.
817
817
818 :repo: a localrepository object (used to access changelog and manifest)
818 :repo: a localrepository object (used to access changelog and manifest)
819 :path: the file path
819 :path: the file path
820 :fnode: the nodeid of the file revision
820 :fnode: the nodeid of the file revision
821 :filelog: the filelog of this path
821 :filelog: the filelog of this path
822 :srcrev: the changeset revision we search ancestors from
822 :srcrev: the changeset revision we search ancestors from
823 :inclusive: if true, the src revision will also be checked
823 :inclusive: if true, the src revision will also be checked
824 """
824 """
825 repo = self._repo
825 repo = self._repo
826 cl = repo.unfiltered().changelog
826 cl = repo.unfiltered().changelog
827 ma = repo.manifest
827 mfl = repo.manifestlog
828 # fetch the linkrev
828 # fetch the linkrev
829 fr = filelog.rev(fnode)
829 fr = filelog.rev(fnode)
830 lkr = filelog.linkrev(fr)
830 lkr = filelog.linkrev(fr)
831 # hack to reuse ancestor computation when searching for renames
831 # hack to reuse ancestor computation when searching for renames
832 memberanc = getattr(self, '_ancestrycontext', None)
832 memberanc = getattr(self, '_ancestrycontext', None)
833 iteranc = None
833 iteranc = None
834 if srcrev is None:
834 if srcrev is None:
835 # wctx case, used by workingfilectx during mergecopy
835 # wctx case, used by workingfilectx during mergecopy
836 revs = [p.rev() for p in self._repo[None].parents()]
836 revs = [p.rev() for p in self._repo[None].parents()]
837 inclusive = True # we skipped the real (revless) source
837 inclusive = True # we skipped the real (revless) source
838 else:
838 else:
839 revs = [srcrev]
839 revs = [srcrev]
840 if memberanc is None:
840 if memberanc is None:
841 memberanc = iteranc = cl.ancestors(revs, lkr,
841 memberanc = iteranc = cl.ancestors(revs, lkr,
842 inclusive=inclusive)
842 inclusive=inclusive)
843 # check if this linkrev is an ancestor of srcrev
843 # check if this linkrev is an ancestor of srcrev
844 if lkr not in memberanc:
844 if lkr not in memberanc:
845 if iteranc is None:
845 if iteranc is None:
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
847 for a in iteranc:
847 for a in iteranc:
848 ac = cl.read(a) # get changeset data (we avoid object creation)
848 ac = cl.read(a) # get changeset data (we avoid object creation)
849 if path in ac[3]: # checking the 'files' field.
849 if path in ac[3]: # checking the 'files' field.
850 # The file has been touched, check if the content is
850 # The file has been touched, check if the content is
851 # similar to the one we search for.
851 # similar to the one we search for.
852 if fnode == ma.readfast(ac[0]).get(path):
852 if fnode == mfl[ac[0]].readfast().get(path):
853 return a
853 return a
854 # In theory, we should never get out of that loop without a result.
854 # In theory, we should never get out of that loop without a result.
855 # But if manifest uses a buggy file revision (not children of the
855 # But if manifest uses a buggy file revision (not children of the
856 # one it replaces) we could. Such a buggy situation will likely
856 # one it replaces) we could. Such a buggy situation will likely
857 # result is crash somewhere else at to some point.
857 # result is crash somewhere else at to some point.
858 return lkr
858 return lkr
859
859
860 def introrev(self):
860 def introrev(self):
861 """return the rev of the changeset which introduced this file revision
861 """return the rev of the changeset which introduced this file revision
862
862
863 This method is different from linkrev because it take into account the
863 This method is different from linkrev because it take into account the
864 changeset the filectx was created from. It ensures the returned
864 changeset the filectx was created from. It ensures the returned
865 revision is one of its ancestors. This prevents bugs from
865 revision is one of its ancestors. This prevents bugs from
866 'linkrev-shadowing' when a file revision is used by multiple
866 'linkrev-shadowing' when a file revision is used by multiple
867 changesets.
867 changesets.
868 """
868 """
869 lkr = self.linkrev()
869 lkr = self.linkrev()
870 attrs = vars(self)
870 attrs = vars(self)
871 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
871 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
872 if noctx or self.rev() == lkr:
872 if noctx or self.rev() == lkr:
873 return self.linkrev()
873 return self.linkrev()
874 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
874 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
875 self.rev(), inclusive=True)
875 self.rev(), inclusive=True)
876
876
877 def _parentfilectx(self, path, fileid, filelog):
877 def _parentfilectx(self, path, fileid, filelog):
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 # If self is associated with a changeset (probably explicitly
881 # If self is associated with a changeset (probably explicitly
882 # fed), ensure the created filectx is associated with a
882 # fed), ensure the created filectx is associated with a
883 # changeset that is an ancestor of self.changectx.
883 # changeset that is an ancestor of self.changectx.
884 # This lets us later use _adjustlinkrev to get a correct link.
884 # This lets us later use _adjustlinkrev to get a correct link.
885 fctx._descendantrev = self.rev()
885 fctx._descendantrev = self.rev()
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 elif '_descendantrev' in vars(self):
887 elif '_descendantrev' in vars(self):
888 # Otherwise propagate _descendantrev if we have one associated.
888 # Otherwise propagate _descendantrev if we have one associated.
889 fctx._descendantrev = self._descendantrev
889 fctx._descendantrev = self._descendantrev
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 return fctx
891 return fctx
892
892
893 def parents(self):
893 def parents(self):
894 _path = self._path
894 _path = self._path
895 fl = self._filelog
895 fl = self._filelog
896 parents = self._filelog.parents(self._filenode)
896 parents = self._filelog.parents(self._filenode)
897 pl = [(_path, node, fl) for node in parents if node != nullid]
897 pl = [(_path, node, fl) for node in parents if node != nullid]
898
898
899 r = fl.renamed(self._filenode)
899 r = fl.renamed(self._filenode)
900 if r:
900 if r:
901 # - In the simple rename case, both parent are nullid, pl is empty.
901 # - In the simple rename case, both parent are nullid, pl is empty.
902 # - In case of merge, only one of the parent is null id and should
902 # - In case of merge, only one of the parent is null id and should
903 # be replaced with the rename information. This parent is -always-
903 # be replaced with the rename information. This parent is -always-
904 # the first one.
904 # the first one.
905 #
905 #
906 # As null id have always been filtered out in the previous list
906 # As null id have always been filtered out in the previous list
907 # comprehension, inserting to 0 will always result in "replacing
907 # comprehension, inserting to 0 will always result in "replacing
908 # first nullid parent with rename information.
908 # first nullid parent with rename information.
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910
910
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912
912
913 def p1(self):
913 def p1(self):
914 return self.parents()[0]
914 return self.parents()[0]
915
915
916 def p2(self):
916 def p2(self):
917 p = self.parents()
917 p = self.parents()
918 if len(p) == 2:
918 if len(p) == 2:
919 return p[1]
919 return p[1]
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921
921
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 '''returns a list of tuples of ((ctx, number), line) for each line
923 '''returns a list of tuples of ((ctx, number), line) for each line
924 in the file, where ctx is the filectx of the node where
924 in the file, where ctx is the filectx of the node where
925 that line was last changed; if linenumber parameter is true, number is
925 that line was last changed; if linenumber parameter is true, number is
926 the line number at the first appearance in the managed file, otherwise,
926 the line number at the first appearance in the managed file, otherwise,
927 number has a fixed value of False.
927 number has a fixed value of False.
928 '''
928 '''
929
929
930 def lines(text):
930 def lines(text):
931 if text.endswith("\n"):
931 if text.endswith("\n"):
932 return text.count("\n")
932 return text.count("\n")
933 return text.count("\n") + 1
933 return text.count("\n") + 1
934
934
935 if linenumber:
935 if linenumber:
936 def decorate(text, rev):
936 def decorate(text, rev):
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 else:
938 else:
939 def decorate(text, rev):
939 def decorate(text, rev):
940 return ([(rev, False)] * lines(text), text)
940 return ([(rev, False)] * lines(text), text)
941
941
942 def pair(parent, child):
942 def pair(parent, child):
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
944 refine=True)
944 refine=True)
945 for (a1, a2, b1, b2), t in blocks:
945 for (a1, a2, b1, b2), t in blocks:
946 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 # Changed blocks ('!') or blocks made only of blank lines ('~')
947 # belong to the child.
947 # belong to the child.
948 if t == '=':
948 if t == '=':
949 child[0][b1:b2] = parent[0][a1:a2]
949 child[0][b1:b2] = parent[0][a1:a2]
950 return child
950 return child
951
951
952 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
953
953
954 def parents(f):
954 def parents(f):
955 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
956 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
957 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # from the topmost introrev (= srcrev) down to p.linkrev() if it
958 # isn't an ancestor of the srcrev.
958 # isn't an ancestor of the srcrev.
959 f._changeid
959 f._changeid
960 pl = f.parents()
960 pl = f.parents()
961
961
962 # Don't return renamed parents if we aren't following.
962 # Don't return renamed parents if we aren't following.
963 if not follow:
963 if not follow:
964 pl = [p for p in pl if p.path() == f.path()]
964 pl = [p for p in pl if p.path() == f.path()]
965
965
966 # renamed filectx won't have a filelog yet, so set it
966 # renamed filectx won't have a filelog yet, so set it
967 # from the cache to save time
967 # from the cache to save time
968 for p in pl:
968 for p in pl:
969 if not '_filelog' in p.__dict__:
969 if not '_filelog' in p.__dict__:
970 p._filelog = getlog(p.path())
970 p._filelog = getlog(p.path())
971
971
972 return pl
972 return pl
973
973
974 # use linkrev to find the first changeset where self appeared
974 # use linkrev to find the first changeset where self appeared
975 base = self
975 base = self
976 introrev = self.introrev()
976 introrev = self.introrev()
977 if self.rev() != introrev:
977 if self.rev() != introrev:
978 base = self.filectx(self.filenode(), changeid=introrev)
978 base = self.filectx(self.filenode(), changeid=introrev)
979 if getattr(base, '_ancestrycontext', None) is None:
979 if getattr(base, '_ancestrycontext', None) is None:
980 cl = self._repo.changelog
980 cl = self._repo.changelog
981 if introrev is None:
981 if introrev is None:
982 # wctx is not inclusive, but works because _ancestrycontext
982 # wctx is not inclusive, but works because _ancestrycontext
983 # is used to test filelog revisions
983 # is used to test filelog revisions
984 ac = cl.ancestors([p.rev() for p in base.parents()],
984 ac = cl.ancestors([p.rev() for p in base.parents()],
985 inclusive=True)
985 inclusive=True)
986 else:
986 else:
987 ac = cl.ancestors([introrev], inclusive=True)
987 ac = cl.ancestors([introrev], inclusive=True)
988 base._ancestrycontext = ac
988 base._ancestrycontext = ac
989
989
990 # This algorithm would prefer to be recursive, but Python is a
990 # This algorithm would prefer to be recursive, but Python is a
991 # bit recursion-hostile. Instead we do an iterative
991 # bit recursion-hostile. Instead we do an iterative
992 # depth-first search.
992 # depth-first search.
993
993
994 # 1st DFS pre-calculates pcache and needed
994 # 1st DFS pre-calculates pcache and needed
995 visit = [base]
995 visit = [base]
996 pcache = {}
996 pcache = {}
997 needed = {base: 1}
997 needed = {base: 1}
998 while visit:
998 while visit:
999 f = visit.pop()
999 f = visit.pop()
1000 if f in pcache:
1000 if f in pcache:
1001 continue
1001 continue
1002 pl = parents(f)
1002 pl = parents(f)
1003 pcache[f] = pl
1003 pcache[f] = pl
1004 for p in pl:
1004 for p in pl:
1005 needed[p] = needed.get(p, 0) + 1
1005 needed[p] = needed.get(p, 0) + 1
1006 if p not in pcache:
1006 if p not in pcache:
1007 visit.append(p)
1007 visit.append(p)
1008
1008
1009 # 2nd DFS does the actual annotate
1009 # 2nd DFS does the actual annotate
1010 visit[:] = [base]
1010 visit[:] = [base]
1011 hist = {}
1011 hist = {}
1012 while visit:
1012 while visit:
1013 f = visit[-1]
1013 f = visit[-1]
1014 if f in hist:
1014 if f in hist:
1015 visit.pop()
1015 visit.pop()
1016 continue
1016 continue
1017
1017
1018 ready = True
1018 ready = True
1019 pl = pcache[f]
1019 pl = pcache[f]
1020 for p in pl:
1020 for p in pl:
1021 if p not in hist:
1021 if p not in hist:
1022 ready = False
1022 ready = False
1023 visit.append(p)
1023 visit.append(p)
1024 if ready:
1024 if ready:
1025 visit.pop()
1025 visit.pop()
1026 curr = decorate(f.data(), f)
1026 curr = decorate(f.data(), f)
1027 for p in pl:
1027 for p in pl:
1028 curr = pair(hist[p], curr)
1028 curr = pair(hist[p], curr)
1029 if needed[p] == 1:
1029 if needed[p] == 1:
1030 del hist[p]
1030 del hist[p]
1031 del needed[p]
1031 del needed[p]
1032 else:
1032 else:
1033 needed[p] -= 1
1033 needed[p] -= 1
1034
1034
1035 hist[f] = curr
1035 hist[f] = curr
1036 del pcache[f]
1036 del pcache[f]
1037
1037
1038 return zip(hist[base][0], hist[base][1].splitlines(True))
1038 return zip(hist[base][0], hist[base][1].splitlines(True))
1039
1039
1040 def ancestors(self, followfirst=False):
1040 def ancestors(self, followfirst=False):
1041 visit = {}
1041 visit = {}
1042 c = self
1042 c = self
1043 if followfirst:
1043 if followfirst:
1044 cut = 1
1044 cut = 1
1045 else:
1045 else:
1046 cut = None
1046 cut = None
1047
1047
1048 while True:
1048 while True:
1049 for parent in c.parents()[:cut]:
1049 for parent in c.parents()[:cut]:
1050 visit[(parent.linkrev(), parent.filenode())] = parent
1050 visit[(parent.linkrev(), parent.filenode())] = parent
1051 if not visit:
1051 if not visit:
1052 break
1052 break
1053 c = visit.pop(max(visit))
1053 c = visit.pop(max(visit))
1054 yield c
1054 yield c
1055
1055
1056 class filectx(basefilectx):
1056 class filectx(basefilectx):
1057 """A filecontext object makes access to data related to a particular
1057 """A filecontext object makes access to data related to a particular
1058 filerevision convenient."""
1058 filerevision convenient."""
1059 def __init__(self, repo, path, changeid=None, fileid=None,
1059 def __init__(self, repo, path, changeid=None, fileid=None,
1060 filelog=None, changectx=None):
1060 filelog=None, changectx=None):
1061 """changeid can be a changeset revision, node, or tag.
1061 """changeid can be a changeset revision, node, or tag.
1062 fileid can be a file revision or node."""
1062 fileid can be a file revision or node."""
1063 self._repo = repo
1063 self._repo = repo
1064 self._path = path
1064 self._path = path
1065
1065
1066 assert (changeid is not None
1066 assert (changeid is not None
1067 or fileid is not None
1067 or fileid is not None
1068 or changectx is not None), \
1068 or changectx is not None), \
1069 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1070 % (changeid, fileid, changectx))
1070 % (changeid, fileid, changectx))
1071
1071
1072 if filelog is not None:
1072 if filelog is not None:
1073 self._filelog = filelog
1073 self._filelog = filelog
1074
1074
1075 if changeid is not None:
1075 if changeid is not None:
1076 self._changeid = changeid
1076 self._changeid = changeid
1077 if changectx is not None:
1077 if changectx is not None:
1078 self._changectx = changectx
1078 self._changectx = changectx
1079 if fileid is not None:
1079 if fileid is not None:
1080 self._fileid = fileid
1080 self._fileid = fileid
1081
1081
1082 @propertycache
1082 @propertycache
1083 def _changectx(self):
1083 def _changectx(self):
1084 try:
1084 try:
1085 return changectx(self._repo, self._changeid)
1085 return changectx(self._repo, self._changeid)
1086 except error.FilteredRepoLookupError:
1086 except error.FilteredRepoLookupError:
1087 # Linkrev may point to any revision in the repository. When the
1087 # Linkrev may point to any revision in the repository. When the
1088 # repository is filtered this may lead to `filectx` trying to build
1088 # repository is filtered this may lead to `filectx` trying to build
1089 # `changectx` for filtered revision. In such case we fallback to
1089 # `changectx` for filtered revision. In such case we fallback to
1090 # creating `changectx` on the unfiltered version of the reposition.
1090 # creating `changectx` on the unfiltered version of the reposition.
1091 # This fallback should not be an issue because `changectx` from
1091 # This fallback should not be an issue because `changectx` from
1092 # `filectx` are not used in complex operations that care about
1092 # `filectx` are not used in complex operations that care about
1093 # filtering.
1093 # filtering.
1094 #
1094 #
1095 # This fallback is a cheap and dirty fix that prevent several
1095 # This fallback is a cheap and dirty fix that prevent several
1096 # crashes. It does not ensure the behavior is correct. However the
1096 # crashes. It does not ensure the behavior is correct. However the
1097 # behavior was not correct before filtering either and "incorrect
1097 # behavior was not correct before filtering either and "incorrect
1098 # behavior" is seen as better as "crash"
1098 # behavior" is seen as better as "crash"
1099 #
1099 #
1100 # Linkrevs have several serious troubles with filtering that are
1100 # Linkrevs have several serious troubles with filtering that are
1101 # complicated to solve. Proper handling of the issue here should be
1101 # complicated to solve. Proper handling of the issue here should be
1102 # considered when solving linkrev issue are on the table.
1102 # considered when solving linkrev issue are on the table.
1103 return changectx(self._repo.unfiltered(), self._changeid)
1103 return changectx(self._repo.unfiltered(), self._changeid)
1104
1104
1105 def filectx(self, fileid, changeid=None):
1105 def filectx(self, fileid, changeid=None):
1106 '''opens an arbitrary revision of the file without
1106 '''opens an arbitrary revision of the file without
1107 opening a new filelog'''
1107 opening a new filelog'''
1108 return filectx(self._repo, self._path, fileid=fileid,
1108 return filectx(self._repo, self._path, fileid=fileid,
1109 filelog=self._filelog, changeid=changeid)
1109 filelog=self._filelog, changeid=changeid)
1110
1110
1111 def data(self):
1111 def data(self):
1112 try:
1112 try:
1113 return self._filelog.read(self._filenode)
1113 return self._filelog.read(self._filenode)
1114 except error.CensoredNodeError:
1114 except error.CensoredNodeError:
1115 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1116 return ""
1116 return ""
1117 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 raise error.Abort(_("censored node: %s") % short(self._filenode),
1118 hint=_("set censor.policy to ignore errors"))
1118 hint=_("set censor.policy to ignore errors"))
1119
1119
1120 def size(self):
1120 def size(self):
1121 return self._filelog.size(self._filerev)
1121 return self._filelog.size(self._filerev)
1122
1122
1123 def renamed(self):
1123 def renamed(self):
1124 """check if file was actually renamed in this changeset revision
1124 """check if file was actually renamed in this changeset revision
1125
1125
1126 If rename logged in file revision, we report copy for changeset only
1126 If rename logged in file revision, we report copy for changeset only
1127 if file revisions linkrev points back to the changeset in question
1127 if file revisions linkrev points back to the changeset in question
1128 or both changeset parents contain different file revisions.
1128 or both changeset parents contain different file revisions.
1129 """
1129 """
1130
1130
1131 renamed = self._filelog.renamed(self._filenode)
1131 renamed = self._filelog.renamed(self._filenode)
1132 if not renamed:
1132 if not renamed:
1133 return renamed
1133 return renamed
1134
1134
1135 if self.rev() == self.linkrev():
1135 if self.rev() == self.linkrev():
1136 return renamed
1136 return renamed
1137
1137
1138 name = self.path()
1138 name = self.path()
1139 fnode = self._filenode
1139 fnode = self._filenode
1140 for p in self._changectx.parents():
1140 for p in self._changectx.parents():
1141 try:
1141 try:
1142 if fnode == p.filenode(name):
1142 if fnode == p.filenode(name):
1143 return None
1143 return None
1144 except error.LookupError:
1144 except error.LookupError:
1145 pass
1145 pass
1146 return renamed
1146 return renamed
1147
1147
1148 def children(self):
1148 def children(self):
1149 # hard for renames
1149 # hard for renames
1150 c = self._filelog.children(self._filenode)
1150 c = self._filelog.children(self._filenode)
1151 return [filectx(self._repo, self._path, fileid=x,
1151 return [filectx(self._repo, self._path, fileid=x,
1152 filelog=self._filelog) for x in c]
1152 filelog=self._filelog) for x in c]
1153
1153
1154 class committablectx(basectx):
1154 class committablectx(basectx):
1155 """A committablectx object provides common functionality for a context that
1155 """A committablectx object provides common functionality for a context that
1156 wants the ability to commit, e.g. workingctx or memctx."""
1156 wants the ability to commit, e.g. workingctx or memctx."""
1157 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 def __init__(self, repo, text="", user=None, date=None, extra=None,
1158 changes=None):
1158 changes=None):
1159 self._repo = repo
1159 self._repo = repo
1160 self._rev = None
1160 self._rev = None
1161 self._node = None
1161 self._node = None
1162 self._text = text
1162 self._text = text
1163 if date:
1163 if date:
1164 self._date = util.parsedate(date)
1164 self._date = util.parsedate(date)
1165 if user:
1165 if user:
1166 self._user = user
1166 self._user = user
1167 if changes:
1167 if changes:
1168 self._status = changes
1168 self._status = changes
1169
1169
1170 self._extra = {}
1170 self._extra = {}
1171 if extra:
1171 if extra:
1172 self._extra = extra.copy()
1172 self._extra = extra.copy()
1173 if 'branch' not in self._extra:
1173 if 'branch' not in self._extra:
1174 try:
1174 try:
1175 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 branch = encoding.fromlocal(self._repo.dirstate.branch())
1176 except UnicodeDecodeError:
1176 except UnicodeDecodeError:
1177 raise error.Abort(_('branch name not in UTF-8!'))
1177 raise error.Abort(_('branch name not in UTF-8!'))
1178 self._extra['branch'] = branch
1178 self._extra['branch'] = branch
1179 if self._extra['branch'] == '':
1179 if self._extra['branch'] == '':
1180 self._extra['branch'] = 'default'
1180 self._extra['branch'] = 'default'
1181
1181
1182 def __str__(self):
1182 def __str__(self):
1183 return str(self._parents[0]) + "+"
1183 return str(self._parents[0]) + "+"
1184
1184
1185 def __nonzero__(self):
1185 def __nonzero__(self):
1186 return True
1186 return True
1187
1187
1188 def _buildflagfunc(self):
1188 def _buildflagfunc(self):
1189 # Create a fallback function for getting file flags when the
1189 # Create a fallback function for getting file flags when the
1190 # filesystem doesn't support them
1190 # filesystem doesn't support them
1191
1191
1192 copiesget = self._repo.dirstate.copies().get
1192 copiesget = self._repo.dirstate.copies().get
1193 parents = self.parents()
1193 parents = self.parents()
1194 if len(parents) < 2:
1194 if len(parents) < 2:
1195 # when we have one parent, it's easy: copy from parent
1195 # when we have one parent, it's easy: copy from parent
1196 man = parents[0].manifest()
1196 man = parents[0].manifest()
1197 def func(f):
1197 def func(f):
1198 f = copiesget(f, f)
1198 f = copiesget(f, f)
1199 return man.flags(f)
1199 return man.flags(f)
1200 else:
1200 else:
1201 # merges are tricky: we try to reconstruct the unstored
1201 # merges are tricky: we try to reconstruct the unstored
1202 # result from the merge (issue1802)
1202 # result from the merge (issue1802)
1203 p1, p2 = parents
1203 p1, p2 = parents
1204 pa = p1.ancestor(p2)
1204 pa = p1.ancestor(p2)
1205 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1206
1206
1207 def func(f):
1207 def func(f):
1208 f = copiesget(f, f) # may be wrong for merges with copies
1208 f = copiesget(f, f) # may be wrong for merges with copies
1209 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1210 if fl1 == fl2:
1210 if fl1 == fl2:
1211 return fl1
1211 return fl1
1212 if fl1 == fla:
1212 if fl1 == fla:
1213 return fl2
1213 return fl2
1214 if fl2 == fla:
1214 if fl2 == fla:
1215 return fl1
1215 return fl1
1216 return '' # punt for conflicts
1216 return '' # punt for conflicts
1217
1217
1218 return func
1218 return func
1219
1219
1220 @propertycache
1220 @propertycache
1221 def _flagfunc(self):
1221 def _flagfunc(self):
1222 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1223
1223
1224 @propertycache
1224 @propertycache
1225 def _manifest(self):
1225 def _manifest(self):
1226 """generate a manifest corresponding to the values in self._status
1226 """generate a manifest corresponding to the values in self._status
1227
1227
1228 This reuse the file nodeid from parent, but we append an extra letter
1228 This reuse the file nodeid from parent, but we append an extra letter
1229 when modified. Modified files get an extra 'm' while added files get
1229 when modified. Modified files get an extra 'm' while added files get
1230 an extra 'a'. This is used by manifests merge to see that files
1230 an extra 'a'. This is used by manifests merge to see that files
1231 are different and by update logic to avoid deleting newly added files.
1231 are different and by update logic to avoid deleting newly added files.
1232 """
1232 """
1233 parents = self.parents()
1233 parents = self.parents()
1234
1234
1235 man1 = parents[0].manifest()
1235 man1 = parents[0].manifest()
1236 man = man1.copy()
1236 man = man1.copy()
1237 if len(parents) > 1:
1237 if len(parents) > 1:
1238 man2 = self.p2().manifest()
1238 man2 = self.p2().manifest()
1239 def getman(f):
1239 def getman(f):
1240 if f in man1:
1240 if f in man1:
1241 return man1
1241 return man1
1242 return man2
1242 return man2
1243 else:
1243 else:
1244 getman = lambda f: man1
1244 getman = lambda f: man1
1245
1245
1246 copied = self._repo.dirstate.copies()
1246 copied = self._repo.dirstate.copies()
1247 ff = self._flagfunc
1247 ff = self._flagfunc
1248 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1249 for f in l:
1249 for f in l:
1250 orig = copied.get(f, f)
1250 orig = copied.get(f, f)
1251 man[f] = getman(orig).get(orig, nullid) + i
1251 man[f] = getman(orig).get(orig, nullid) + i
1252 try:
1252 try:
1253 man.setflag(f, ff(f))
1253 man.setflag(f, ff(f))
1254 except OSError:
1254 except OSError:
1255 pass
1255 pass
1256
1256
1257 for f in self._status.deleted + self._status.removed:
1257 for f in self._status.deleted + self._status.removed:
1258 if f in man:
1258 if f in man:
1259 del man[f]
1259 del man[f]
1260
1260
1261 return man
1261 return man
1262
1262
1263 @propertycache
1263 @propertycache
1264 def _status(self):
1264 def _status(self):
1265 return self._repo.status()
1265 return self._repo.status()
1266
1266
1267 @propertycache
1267 @propertycache
1268 def _user(self):
1268 def _user(self):
1269 return self._repo.ui.username()
1269 return self._repo.ui.username()
1270
1270
1271 @propertycache
1271 @propertycache
1272 def _date(self):
1272 def _date(self):
1273 return util.makedate()
1273 return util.makedate()
1274
1274
1275 def subrev(self, subpath):
1275 def subrev(self, subpath):
1276 return None
1276 return None
1277
1277
1278 def manifestnode(self):
1278 def manifestnode(self):
1279 return None
1279 return None
1280 def user(self):
1280 def user(self):
1281 return self._user or self._repo.ui.username()
1281 return self._user or self._repo.ui.username()
1282 def date(self):
1282 def date(self):
1283 return self._date
1283 return self._date
1284 def description(self):
1284 def description(self):
1285 return self._text
1285 return self._text
1286 def files(self):
1286 def files(self):
1287 return sorted(self._status.modified + self._status.added +
1287 return sorted(self._status.modified + self._status.added +
1288 self._status.removed)
1288 self._status.removed)
1289
1289
1290 def modified(self):
1290 def modified(self):
1291 return self._status.modified
1291 return self._status.modified
1292 def added(self):
1292 def added(self):
1293 return self._status.added
1293 return self._status.added
1294 def removed(self):
1294 def removed(self):
1295 return self._status.removed
1295 return self._status.removed
1296 def deleted(self):
1296 def deleted(self):
1297 return self._status.deleted
1297 return self._status.deleted
1298 def branch(self):
1298 def branch(self):
1299 return encoding.tolocal(self._extra['branch'])
1299 return encoding.tolocal(self._extra['branch'])
1300 def closesbranch(self):
1300 def closesbranch(self):
1301 return 'close' in self._extra
1301 return 'close' in self._extra
1302 def extra(self):
1302 def extra(self):
1303 return self._extra
1303 return self._extra
1304
1304
1305 def tags(self):
1305 def tags(self):
1306 return []
1306 return []
1307
1307
1308 def bookmarks(self):
1308 def bookmarks(self):
1309 b = []
1309 b = []
1310 for p in self.parents():
1310 for p in self.parents():
1311 b.extend(p.bookmarks())
1311 b.extend(p.bookmarks())
1312 return b
1312 return b
1313
1313
1314 def phase(self):
1314 def phase(self):
1315 phase = phases.draft # default phase to draft
1315 phase = phases.draft # default phase to draft
1316 for p in self.parents():
1316 for p in self.parents():
1317 phase = max(phase, p.phase())
1317 phase = max(phase, p.phase())
1318 return phase
1318 return phase
1319
1319
1320 def hidden(self):
1320 def hidden(self):
1321 return False
1321 return False
1322
1322
1323 def children(self):
1323 def children(self):
1324 return []
1324 return []
1325
1325
1326 def flags(self, path):
1326 def flags(self, path):
1327 if '_manifest' in self.__dict__:
1327 if '_manifest' in self.__dict__:
1328 try:
1328 try:
1329 return self._manifest.flags(path)
1329 return self._manifest.flags(path)
1330 except KeyError:
1330 except KeyError:
1331 return ''
1331 return ''
1332
1332
1333 try:
1333 try:
1334 return self._flagfunc(path)
1334 return self._flagfunc(path)
1335 except OSError:
1335 except OSError:
1336 return ''
1336 return ''
1337
1337
1338 def ancestor(self, c2):
1338 def ancestor(self, c2):
1339 """return the "best" ancestor context of self and c2"""
1339 """return the "best" ancestor context of self and c2"""
1340 return self._parents[0].ancestor(c2) # punt on two parents for now
1340 return self._parents[0].ancestor(c2) # punt on two parents for now
1341
1341
1342 def walk(self, match):
1342 def walk(self, match):
1343 '''Generates matching file names.'''
1343 '''Generates matching file names.'''
1344 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1345 True, False))
1345 True, False))
1346
1346
1347 def matches(self, match):
1347 def matches(self, match):
1348 return sorted(self._repo.dirstate.matches(match))
1348 return sorted(self._repo.dirstate.matches(match))
1349
1349
1350 def ancestors(self):
1350 def ancestors(self):
1351 for p in self._parents:
1351 for p in self._parents:
1352 yield p
1352 yield p
1353 for a in self._repo.changelog.ancestors(
1353 for a in self._repo.changelog.ancestors(
1354 [p.rev() for p in self._parents]):
1354 [p.rev() for p in self._parents]):
1355 yield changectx(self._repo, a)
1355 yield changectx(self._repo, a)
1356
1356
1357 def markcommitted(self, node):
1357 def markcommitted(self, node):
1358 """Perform post-commit cleanup necessary after committing this ctx
1358 """Perform post-commit cleanup necessary after committing this ctx
1359
1359
1360 Specifically, this updates backing stores this working context
1360 Specifically, this updates backing stores this working context
1361 wraps to reflect the fact that the changes reflected by this
1361 wraps to reflect the fact that the changes reflected by this
1362 workingctx have been committed. For example, it marks
1362 workingctx have been committed. For example, it marks
1363 modified and added files as normal in the dirstate.
1363 modified and added files as normal in the dirstate.
1364
1364
1365 """
1365 """
1366
1366
1367 self._repo.dirstate.beginparentchange()
1367 self._repo.dirstate.beginparentchange()
1368 for f in self.modified() + self.added():
1368 for f in self.modified() + self.added():
1369 self._repo.dirstate.normal(f)
1369 self._repo.dirstate.normal(f)
1370 for f in self.removed():
1370 for f in self.removed():
1371 self._repo.dirstate.drop(f)
1371 self._repo.dirstate.drop(f)
1372 self._repo.dirstate.setparents(node)
1372 self._repo.dirstate.setparents(node)
1373 self._repo.dirstate.endparentchange()
1373 self._repo.dirstate.endparentchange()
1374
1374
1375 # write changes out explicitly, because nesting wlock at
1375 # write changes out explicitly, because nesting wlock at
1376 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1377 # from immediately doing so for subsequent changing files
1377 # from immediately doing so for subsequent changing files
1378 self._repo.dirstate.write(self._repo.currenttransaction())
1378 self._repo.dirstate.write(self._repo.currenttransaction())
1379
1379
1380 class workingctx(committablectx):
1380 class workingctx(committablectx):
1381 """A workingctx object makes access to data related to
1381 """A workingctx object makes access to data related to
1382 the current working directory convenient.
1382 the current working directory convenient.
1383 date - any valid date string or (unixtime, offset), or None.
1383 date - any valid date string or (unixtime, offset), or None.
1384 user - username string, or None.
1384 user - username string, or None.
1385 extra - a dictionary of extra values, or None.
1385 extra - a dictionary of extra values, or None.
1386 changes - a list of file lists as returned by localrepo.status()
1386 changes - a list of file lists as returned by localrepo.status()
1387 or None to use the repository status.
1387 or None to use the repository status.
1388 """
1388 """
1389 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 def __init__(self, repo, text="", user=None, date=None, extra=None,
1390 changes=None):
1390 changes=None):
1391 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1392
1392
1393 def __iter__(self):
1393 def __iter__(self):
1394 d = self._repo.dirstate
1394 d = self._repo.dirstate
1395 for f in d:
1395 for f in d:
1396 if d[f] != 'r':
1396 if d[f] != 'r':
1397 yield f
1397 yield f
1398
1398
1399 def __contains__(self, key):
1399 def __contains__(self, key):
1400 return self._repo.dirstate[key] not in "?r"
1400 return self._repo.dirstate[key] not in "?r"
1401
1401
1402 def hex(self):
1402 def hex(self):
1403 return hex(wdirid)
1403 return hex(wdirid)
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _parents(self):
1406 def _parents(self):
1407 p = self._repo.dirstate.parents()
1407 p = self._repo.dirstate.parents()
1408 if p[1] == nullid:
1408 if p[1] == nullid:
1409 p = p[:-1]
1409 p = p[:-1]
1410 return [changectx(self._repo, x) for x in p]
1410 return [changectx(self._repo, x) for x in p]
1411
1411
1412 def filectx(self, path, filelog=None):
1412 def filectx(self, path, filelog=None):
1413 """get a file context from the working directory"""
1413 """get a file context from the working directory"""
1414 return workingfilectx(self._repo, path, workingctx=self,
1414 return workingfilectx(self._repo, path, workingctx=self,
1415 filelog=filelog)
1415 filelog=filelog)
1416
1416
1417 def dirty(self, missing=False, merge=True, branch=True):
1417 def dirty(self, missing=False, merge=True, branch=True):
1418 "check whether a working directory is modified"
1418 "check whether a working directory is modified"
1419 # check subrepos first
1419 # check subrepos first
1420 for s in sorted(self.substate):
1420 for s in sorted(self.substate):
1421 if self.sub(s).dirty():
1421 if self.sub(s).dirty():
1422 return True
1422 return True
1423 # check current working dir
1423 # check current working dir
1424 return ((merge and self.p2()) or
1424 return ((merge and self.p2()) or
1425 (branch and self.branch() != self.p1().branch()) or
1425 (branch and self.branch() != self.p1().branch()) or
1426 self.modified() or self.added() or self.removed() or
1426 self.modified() or self.added() or self.removed() or
1427 (missing and self.deleted()))
1427 (missing and self.deleted()))
1428
1428
1429 def add(self, list, prefix=""):
1429 def add(self, list, prefix=""):
1430 join = lambda f: os.path.join(prefix, f)
1430 join = lambda f: os.path.join(prefix, f)
1431 with self._repo.wlock():
1431 with self._repo.wlock():
1432 ui, ds = self._repo.ui, self._repo.dirstate
1432 ui, ds = self._repo.ui, self._repo.dirstate
1433 rejected = []
1433 rejected = []
1434 lstat = self._repo.wvfs.lstat
1434 lstat = self._repo.wvfs.lstat
1435 for f in list:
1435 for f in list:
1436 scmutil.checkportable(ui, join(f))
1436 scmutil.checkportable(ui, join(f))
1437 try:
1437 try:
1438 st = lstat(f)
1438 st = lstat(f)
1439 except OSError:
1439 except OSError:
1440 ui.warn(_("%s does not exist!\n") % join(f))
1440 ui.warn(_("%s does not exist!\n") % join(f))
1441 rejected.append(f)
1441 rejected.append(f)
1442 continue
1442 continue
1443 if st.st_size > 10000000:
1443 if st.st_size > 10000000:
1444 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 ui.warn(_("%s: up to %d MB of RAM may be required "
1445 "to manage this file\n"
1445 "to manage this file\n"
1446 "(use 'hg revert %s' to cancel the "
1446 "(use 'hg revert %s' to cancel the "
1447 "pending addition)\n")
1447 "pending addition)\n")
1448 % (f, 3 * st.st_size // 1000000, join(f)))
1448 % (f, 3 * st.st_size // 1000000, join(f)))
1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1450 ui.warn(_("%s not added: only files and symlinks "
1450 ui.warn(_("%s not added: only files and symlinks "
1451 "supported currently\n") % join(f))
1451 "supported currently\n") % join(f))
1452 rejected.append(f)
1452 rejected.append(f)
1453 elif ds[f] in 'amn':
1453 elif ds[f] in 'amn':
1454 ui.warn(_("%s already tracked!\n") % join(f))
1454 ui.warn(_("%s already tracked!\n") % join(f))
1455 elif ds[f] == 'r':
1455 elif ds[f] == 'r':
1456 ds.normallookup(f)
1456 ds.normallookup(f)
1457 else:
1457 else:
1458 ds.add(f)
1458 ds.add(f)
1459 return rejected
1459 return rejected
1460
1460
1461 def forget(self, files, prefix=""):
1461 def forget(self, files, prefix=""):
1462 join = lambda f: os.path.join(prefix, f)
1462 join = lambda f: os.path.join(prefix, f)
1463 with self._repo.wlock():
1463 with self._repo.wlock():
1464 rejected = []
1464 rejected = []
1465 for f in files:
1465 for f in files:
1466 if f not in self._repo.dirstate:
1466 if f not in self._repo.dirstate:
1467 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1468 rejected.append(f)
1468 rejected.append(f)
1469 elif self._repo.dirstate[f] != 'a':
1469 elif self._repo.dirstate[f] != 'a':
1470 self._repo.dirstate.remove(f)
1470 self._repo.dirstate.remove(f)
1471 else:
1471 else:
1472 self._repo.dirstate.drop(f)
1472 self._repo.dirstate.drop(f)
1473 return rejected
1473 return rejected
1474
1474
1475 def undelete(self, list):
1475 def undelete(self, list):
1476 pctxs = self.parents()
1476 pctxs = self.parents()
1477 with self._repo.wlock():
1477 with self._repo.wlock():
1478 for f in list:
1478 for f in list:
1479 if self._repo.dirstate[f] != 'r':
1479 if self._repo.dirstate[f] != 'r':
1480 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 self._repo.ui.warn(_("%s not removed!\n") % f)
1481 else:
1481 else:
1482 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1483 t = fctx.data()
1483 t = fctx.data()
1484 self._repo.wwrite(f, t, fctx.flags())
1484 self._repo.wwrite(f, t, fctx.flags())
1485 self._repo.dirstate.normal(f)
1485 self._repo.dirstate.normal(f)
1486
1486
1487 def copy(self, source, dest):
1487 def copy(self, source, dest):
1488 try:
1488 try:
1489 st = self._repo.wvfs.lstat(dest)
1489 st = self._repo.wvfs.lstat(dest)
1490 except OSError as err:
1490 except OSError as err:
1491 if err.errno != errno.ENOENT:
1491 if err.errno != errno.ENOENT:
1492 raise
1492 raise
1493 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1494 return
1494 return
1495 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1496 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1497 "symbolic link\n") % dest)
1497 "symbolic link\n") % dest)
1498 else:
1498 else:
1499 with self._repo.wlock():
1499 with self._repo.wlock():
1500 if self._repo.dirstate[dest] in '?':
1500 if self._repo.dirstate[dest] in '?':
1501 self._repo.dirstate.add(dest)
1501 self._repo.dirstate.add(dest)
1502 elif self._repo.dirstate[dest] in 'r':
1502 elif self._repo.dirstate[dest] in 'r':
1503 self._repo.dirstate.normallookup(dest)
1503 self._repo.dirstate.normallookup(dest)
1504 self._repo.dirstate.copy(source, dest)
1504 self._repo.dirstate.copy(source, dest)
1505
1505
1506 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 def match(self, pats=[], include=None, exclude=None, default='glob',
1507 listsubrepos=False, badfn=None):
1507 listsubrepos=False, badfn=None):
1508 r = self._repo
1508 r = self._repo
1509
1509
1510 # Only a case insensitive filesystem needs magic to translate user input
1510 # Only a case insensitive filesystem needs magic to translate user input
1511 # to actual case in the filesystem.
1511 # to actual case in the filesystem.
1512 if not util.fscasesensitive(r.root):
1512 if not util.fscasesensitive(r.root):
1513 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1514 exclude, default, r.auditor, self,
1514 exclude, default, r.auditor, self,
1515 listsubrepos=listsubrepos,
1515 listsubrepos=listsubrepos,
1516 badfn=badfn)
1516 badfn=badfn)
1517 return matchmod.match(r.root, r.getcwd(), pats,
1517 return matchmod.match(r.root, r.getcwd(), pats,
1518 include, exclude, default,
1518 include, exclude, default,
1519 auditor=r.auditor, ctx=self,
1519 auditor=r.auditor, ctx=self,
1520 listsubrepos=listsubrepos, badfn=badfn)
1520 listsubrepos=listsubrepos, badfn=badfn)
1521
1521
1522 def _filtersuspectsymlink(self, files):
1522 def _filtersuspectsymlink(self, files):
1523 if not files or self._repo.dirstate._checklink:
1523 if not files or self._repo.dirstate._checklink:
1524 return files
1524 return files
1525
1525
1526 # Symlink placeholders may get non-symlink-like contents
1526 # Symlink placeholders may get non-symlink-like contents
1527 # via user error or dereferencing by NFS or Samba servers,
1527 # via user error or dereferencing by NFS or Samba servers,
1528 # so we filter out any placeholders that don't look like a
1528 # so we filter out any placeholders that don't look like a
1529 # symlink
1529 # symlink
1530 sane = []
1530 sane = []
1531 for f in files:
1531 for f in files:
1532 if self.flags(f) == 'l':
1532 if self.flags(f) == 'l':
1533 d = self[f].data()
1533 d = self[f].data()
1534 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1535 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 self._repo.ui.debug('ignoring suspect symlink placeholder'
1536 ' "%s"\n' % f)
1536 ' "%s"\n' % f)
1537 continue
1537 continue
1538 sane.append(f)
1538 sane.append(f)
1539 return sane
1539 return sane
1540
1540
1541 def _checklookup(self, files):
1541 def _checklookup(self, files):
1542 # check for any possibly clean files
1542 # check for any possibly clean files
1543 if not files:
1543 if not files:
1544 return [], []
1544 return [], []
1545
1545
1546 modified = []
1546 modified = []
1547 fixup = []
1547 fixup = []
1548 pctx = self._parents[0]
1548 pctx = self._parents[0]
1549 # do a full compare of any files that might have changed
1549 # do a full compare of any files that might have changed
1550 for f in sorted(files):
1550 for f in sorted(files):
1551 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 if (f not in pctx or self.flags(f) != pctx.flags(f)
1552 or pctx[f].cmp(self[f])):
1552 or pctx[f].cmp(self[f])):
1553 modified.append(f)
1553 modified.append(f)
1554 else:
1554 else:
1555 fixup.append(f)
1555 fixup.append(f)
1556
1556
1557 # update dirstate for files that are actually clean
1557 # update dirstate for files that are actually clean
1558 if fixup:
1558 if fixup:
1559 try:
1559 try:
1560 # updating the dirstate is optional
1560 # updating the dirstate is optional
1561 # so we don't wait on the lock
1561 # so we don't wait on the lock
1562 # wlock can invalidate the dirstate, so cache normal _after_
1562 # wlock can invalidate the dirstate, so cache normal _after_
1563 # taking the lock
1563 # taking the lock
1564 with self._repo.wlock(False):
1564 with self._repo.wlock(False):
1565 normal = self._repo.dirstate.normal
1565 normal = self._repo.dirstate.normal
1566 for f in fixup:
1566 for f in fixup:
1567 normal(f)
1567 normal(f)
1568 # write changes out explicitly, because nesting
1568 # write changes out explicitly, because nesting
1569 # wlock at runtime may prevent 'wlock.release()'
1569 # wlock at runtime may prevent 'wlock.release()'
1570 # after this block from doing so for subsequent
1570 # after this block from doing so for subsequent
1571 # changing files
1571 # changing files
1572 self._repo.dirstate.write(self._repo.currenttransaction())
1572 self._repo.dirstate.write(self._repo.currenttransaction())
1573 except error.LockError:
1573 except error.LockError:
1574 pass
1574 pass
1575 return modified, fixup
1575 return modified, fixup
1576
1576
1577 def _manifestmatches(self, match, s):
1577 def _manifestmatches(self, match, s):
1578 """Slow path for workingctx
1578 """Slow path for workingctx
1579
1579
1580 The fast path is when we compare the working directory to its parent
1580 The fast path is when we compare the working directory to its parent
1581 which means this function is comparing with a non-parent; therefore we
1581 which means this function is comparing with a non-parent; therefore we
1582 need to build a manifest and return what matches.
1582 need to build a manifest and return what matches.
1583 """
1583 """
1584 mf = self._repo['.']._manifestmatches(match, s)
1584 mf = self._repo['.']._manifestmatches(match, s)
1585 for f in s.modified + s.added:
1585 for f in s.modified + s.added:
1586 mf[f] = _newnode
1586 mf[f] = _newnode
1587 mf.setflag(f, self.flags(f))
1587 mf.setflag(f, self.flags(f))
1588 for f in s.removed:
1588 for f in s.removed:
1589 if f in mf:
1589 if f in mf:
1590 del mf[f]
1590 del mf[f]
1591 return mf
1591 return mf
1592
1592
1593 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1594 unknown=False):
1594 unknown=False):
1595 '''Gets the status from the dirstate -- internal use only.'''
1595 '''Gets the status from the dirstate -- internal use only.'''
1596 listignored, listclean, listunknown = ignored, clean, unknown
1596 listignored, listclean, listunknown = ignored, clean, unknown
1597 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1598 subrepos = []
1598 subrepos = []
1599 if '.hgsub' in self:
1599 if '.hgsub' in self:
1600 subrepos = sorted(self.substate)
1600 subrepos = sorted(self.substate)
1601 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1602 listclean, listunknown)
1602 listclean, listunknown)
1603
1603
1604 # check for any possibly clean files
1604 # check for any possibly clean files
1605 if cmp:
1605 if cmp:
1606 modified2, fixup = self._checklookup(cmp)
1606 modified2, fixup = self._checklookup(cmp)
1607 s.modified.extend(modified2)
1607 s.modified.extend(modified2)
1608
1608
1609 # update dirstate for files that are actually clean
1609 # update dirstate for files that are actually clean
1610 if fixup and listclean:
1610 if fixup and listclean:
1611 s.clean.extend(fixup)
1611 s.clean.extend(fixup)
1612
1612
1613 if match.always():
1613 if match.always():
1614 # cache for performance
1614 # cache for performance
1615 if s.unknown or s.ignored or s.clean:
1615 if s.unknown or s.ignored or s.clean:
1616 # "_status" is cached with list*=False in the normal route
1616 # "_status" is cached with list*=False in the normal route
1617 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 self._status = scmutil.status(s.modified, s.added, s.removed,
1618 s.deleted, [], [], [])
1618 s.deleted, [], [], [])
1619 else:
1619 else:
1620 self._status = s
1620 self._status = s
1621
1621
1622 return s
1622 return s
1623
1623
1624 def _buildstatus(self, other, s, match, listignored, listclean,
1624 def _buildstatus(self, other, s, match, listignored, listclean,
1625 listunknown):
1625 listunknown):
1626 """build a status with respect to another context
1626 """build a status with respect to another context
1627
1627
1628 This includes logic for maintaining the fast path of status when
1628 This includes logic for maintaining the fast path of status when
1629 comparing the working directory against its parent, which is to skip
1629 comparing the working directory against its parent, which is to skip
1630 building a new manifest if self (working directory) is not comparing
1630 building a new manifest if self (working directory) is not comparing
1631 against its parent (repo['.']).
1631 against its parent (repo['.']).
1632 """
1632 """
1633 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1634 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1635 # might have accidentally ended up with the entire contents of the file
1635 # might have accidentally ended up with the entire contents of the file
1636 # they are supposed to be linking to.
1636 # they are supposed to be linking to.
1637 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 s.modified[:] = self._filtersuspectsymlink(s.modified)
1638 if other != self._repo['.']:
1638 if other != self._repo['.']:
1639 s = super(workingctx, self)._buildstatus(other, s, match,
1639 s = super(workingctx, self)._buildstatus(other, s, match,
1640 listignored, listclean,
1640 listignored, listclean,
1641 listunknown)
1641 listunknown)
1642 return s
1642 return s
1643
1643
1644 def _matchstatus(self, other, match):
1644 def _matchstatus(self, other, match):
1645 """override the match method with a filter for directory patterns
1645 """override the match method with a filter for directory patterns
1646
1646
1647 We use inheritance to customize the match.bad method only in cases of
1647 We use inheritance to customize the match.bad method only in cases of
1648 workingctx since it belongs only to the working directory when
1648 workingctx since it belongs only to the working directory when
1649 comparing against the parent changeset.
1649 comparing against the parent changeset.
1650
1650
1651 If we aren't comparing against the working directory's parent, then we
1651 If we aren't comparing against the working directory's parent, then we
1652 just use the default match object sent to us.
1652 just use the default match object sent to us.
1653 """
1653 """
1654 superself = super(workingctx, self)
1654 superself = super(workingctx, self)
1655 match = superself._matchstatus(other, match)
1655 match = superself._matchstatus(other, match)
1656 if other != self._repo['.']:
1656 if other != self._repo['.']:
1657 def bad(f, msg):
1657 def bad(f, msg):
1658 # 'f' may be a directory pattern from 'match.files()',
1658 # 'f' may be a directory pattern from 'match.files()',
1659 # so 'f not in ctx1' is not enough
1659 # so 'f not in ctx1' is not enough
1660 if f not in other and not other.hasdir(f):
1660 if f not in other and not other.hasdir(f):
1661 self._repo.ui.warn('%s: %s\n' %
1661 self._repo.ui.warn('%s: %s\n' %
1662 (self._repo.dirstate.pathto(f), msg))
1662 (self._repo.dirstate.pathto(f), msg))
1663 match.bad = bad
1663 match.bad = bad
1664 return match
1664 return match
1665
1665
1666 class committablefilectx(basefilectx):
1666 class committablefilectx(basefilectx):
1667 """A committablefilectx provides common functionality for a file context
1667 """A committablefilectx provides common functionality for a file context
1668 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1669 def __init__(self, repo, path, filelog=None, ctx=None):
1669 def __init__(self, repo, path, filelog=None, ctx=None):
1670 self._repo = repo
1670 self._repo = repo
1671 self._path = path
1671 self._path = path
1672 self._changeid = None
1672 self._changeid = None
1673 self._filerev = self._filenode = None
1673 self._filerev = self._filenode = None
1674
1674
1675 if filelog is not None:
1675 if filelog is not None:
1676 self._filelog = filelog
1676 self._filelog = filelog
1677 if ctx:
1677 if ctx:
1678 self._changectx = ctx
1678 self._changectx = ctx
1679
1679
1680 def __nonzero__(self):
1680 def __nonzero__(self):
1681 return True
1681 return True
1682
1682
1683 def linkrev(self):
1683 def linkrev(self):
1684 # linked to self._changectx no matter if file is modified or not
1684 # linked to self._changectx no matter if file is modified or not
1685 return self.rev()
1685 return self.rev()
1686
1686
1687 def parents(self):
1687 def parents(self):
1688 '''return parent filectxs, following copies if necessary'''
1688 '''return parent filectxs, following copies if necessary'''
1689 def filenode(ctx, path):
1689 def filenode(ctx, path):
1690 return ctx._manifest.get(path, nullid)
1690 return ctx._manifest.get(path, nullid)
1691
1691
1692 path = self._path
1692 path = self._path
1693 fl = self._filelog
1693 fl = self._filelog
1694 pcl = self._changectx._parents
1694 pcl = self._changectx._parents
1695 renamed = self.renamed()
1695 renamed = self.renamed()
1696
1696
1697 if renamed:
1697 if renamed:
1698 pl = [renamed + (None,)]
1698 pl = [renamed + (None,)]
1699 else:
1699 else:
1700 pl = [(path, filenode(pcl[0], path), fl)]
1700 pl = [(path, filenode(pcl[0], path), fl)]
1701
1701
1702 for pc in pcl[1:]:
1702 for pc in pcl[1:]:
1703 pl.append((path, filenode(pc, path), fl))
1703 pl.append((path, filenode(pc, path), fl))
1704
1704
1705 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 return [self._parentfilectx(p, fileid=n, filelog=l)
1706 for p, n, l in pl if n != nullid]
1706 for p, n, l in pl if n != nullid]
1707
1707
1708 def children(self):
1708 def children(self):
1709 return []
1709 return []
1710
1710
1711 class workingfilectx(committablefilectx):
1711 class workingfilectx(committablefilectx):
1712 """A workingfilectx object makes access to data related to a particular
1712 """A workingfilectx object makes access to data related to a particular
1713 file in the working directory convenient."""
1713 file in the working directory convenient."""
1714 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 def __init__(self, repo, path, filelog=None, workingctx=None):
1715 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1716
1716
1717 @propertycache
1717 @propertycache
1718 def _changectx(self):
1718 def _changectx(self):
1719 return workingctx(self._repo)
1719 return workingctx(self._repo)
1720
1720
1721 def data(self):
1721 def data(self):
1722 return self._repo.wread(self._path)
1722 return self._repo.wread(self._path)
1723 def renamed(self):
1723 def renamed(self):
1724 rp = self._repo.dirstate.copied(self._path)
1724 rp = self._repo.dirstate.copied(self._path)
1725 if not rp:
1725 if not rp:
1726 return None
1726 return None
1727 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1728
1728
1729 def size(self):
1729 def size(self):
1730 return self._repo.wvfs.lstat(self._path).st_size
1730 return self._repo.wvfs.lstat(self._path).st_size
1731 def date(self):
1731 def date(self):
1732 t, tz = self._changectx.date()
1732 t, tz = self._changectx.date()
1733 try:
1733 try:
1734 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1735 except OSError as err:
1735 except OSError as err:
1736 if err.errno != errno.ENOENT:
1736 if err.errno != errno.ENOENT:
1737 raise
1737 raise
1738 return (t, tz)
1738 return (t, tz)
1739
1739
1740 def cmp(self, fctx):
1740 def cmp(self, fctx):
1741 """compare with other file context
1741 """compare with other file context
1742
1742
1743 returns True if different than fctx.
1743 returns True if different than fctx.
1744 """
1744 """
1745 # fctx should be a filectx (not a workingfilectx)
1745 # fctx should be a filectx (not a workingfilectx)
1746 # invert comparison to reuse the same code path
1746 # invert comparison to reuse the same code path
1747 return fctx.cmp(self)
1747 return fctx.cmp(self)
1748
1748
1749 def remove(self, ignoremissing=False):
1749 def remove(self, ignoremissing=False):
1750 """wraps unlink for a repo's working directory"""
1750 """wraps unlink for a repo's working directory"""
1751 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1752
1752
1753 def write(self, data, flags):
1753 def write(self, data, flags):
1754 """wraps repo.wwrite"""
1754 """wraps repo.wwrite"""
1755 self._repo.wwrite(self._path, data, flags)
1755 self._repo.wwrite(self._path, data, flags)
1756
1756
1757 class workingcommitctx(workingctx):
1757 class workingcommitctx(workingctx):
1758 """A workingcommitctx object makes access to data related to
1758 """A workingcommitctx object makes access to data related to
1759 the revision being committed convenient.
1759 the revision being committed convenient.
1760
1760
1761 This hides changes in the working directory, if they aren't
1761 This hides changes in the working directory, if they aren't
1762 committed in this context.
1762 committed in this context.
1763 """
1763 """
1764 def __init__(self, repo, changes,
1764 def __init__(self, repo, changes,
1765 text="", user=None, date=None, extra=None):
1765 text="", user=None, date=None, extra=None):
1766 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 super(workingctx, self).__init__(repo, text, user, date, extra,
1767 changes)
1767 changes)
1768
1768
1769 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1770 unknown=False):
1770 unknown=False):
1771 """Return matched files only in ``self._status``
1771 """Return matched files only in ``self._status``
1772
1772
1773 Uncommitted files appear "clean" via this context, even if
1773 Uncommitted files appear "clean" via this context, even if
1774 they aren't actually so in the working directory.
1774 they aren't actually so in the working directory.
1775 """
1775 """
1776 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1777 if clean:
1777 if clean:
1778 clean = [f for f in self._manifest if f not in self._changedset]
1778 clean = [f for f in self._manifest if f not in self._changedset]
1779 else:
1779 else:
1780 clean = []
1780 clean = []
1781 return scmutil.status([f for f in self._status.modified if match(f)],
1781 return scmutil.status([f for f in self._status.modified if match(f)],
1782 [f for f in self._status.added if match(f)],
1782 [f for f in self._status.added if match(f)],
1783 [f for f in self._status.removed if match(f)],
1783 [f for f in self._status.removed if match(f)],
1784 [], [], [], clean)
1784 [], [], [], clean)
1785
1785
1786 @propertycache
1786 @propertycache
1787 def _changedset(self):
1787 def _changedset(self):
1788 """Return the set of files changed in this context
1788 """Return the set of files changed in this context
1789 """
1789 """
1790 changed = set(self._status.modified)
1790 changed = set(self._status.modified)
1791 changed.update(self._status.added)
1791 changed.update(self._status.added)
1792 changed.update(self._status.removed)
1792 changed.update(self._status.removed)
1793 return changed
1793 return changed
1794
1794
1795 def makecachingfilectxfn(func):
1795 def makecachingfilectxfn(func):
1796 """Create a filectxfn that caches based on the path.
1796 """Create a filectxfn that caches based on the path.
1797
1797
1798 We can't use util.cachefunc because it uses all arguments as the cache
1798 We can't use util.cachefunc because it uses all arguments as the cache
1799 key and this creates a cycle since the arguments include the repo and
1799 key and this creates a cycle since the arguments include the repo and
1800 memctx.
1800 memctx.
1801 """
1801 """
1802 cache = {}
1802 cache = {}
1803
1803
1804 def getfilectx(repo, memctx, path):
1804 def getfilectx(repo, memctx, path):
1805 if path not in cache:
1805 if path not in cache:
1806 cache[path] = func(repo, memctx, path)
1806 cache[path] = func(repo, memctx, path)
1807 return cache[path]
1807 return cache[path]
1808
1808
1809 return getfilectx
1809 return getfilectx
1810
1810
1811 class memctx(committablectx):
1811 class memctx(committablectx):
1812 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812 """Use memctx to perform in-memory commits via localrepo.commitctx().
1813
1813
1814 Revision information is supplied at initialization time while
1814 Revision information is supplied at initialization time while
1815 related files data and is made available through a callback
1815 related files data and is made available through a callback
1816 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 mechanism. 'repo' is the current localrepo, 'parents' is a
1817 sequence of two parent revisions identifiers (pass None for every
1817 sequence of two parent revisions identifiers (pass None for every
1818 missing parent), 'text' is the commit message and 'files' lists
1818 missing parent), 'text' is the commit message and 'files' lists
1819 names of files touched by the revision (normalized and relative to
1819 names of files touched by the revision (normalized and relative to
1820 repository root).
1820 repository root).
1821
1821
1822 filectxfn(repo, memctx, path) is a callable receiving the
1822 filectxfn(repo, memctx, path) is a callable receiving the
1823 repository, the current memctx object and the normalized path of
1823 repository, the current memctx object and the normalized path of
1824 requested file, relative to repository root. It is fired by the
1824 requested file, relative to repository root. It is fired by the
1825 commit function for every file in 'files', but calls order is
1825 commit function for every file in 'files', but calls order is
1826 undefined. If the file is available in the revision being
1826 undefined. If the file is available in the revision being
1827 committed (updated or added), filectxfn returns a memfilectx
1827 committed (updated or added), filectxfn returns a memfilectx
1828 object. If the file was removed, filectxfn raises an
1828 object. If the file was removed, filectxfn raises an
1829 IOError. Moved files are represented by marking the source file
1829 IOError. Moved files are represented by marking the source file
1830 removed and the new file added with copy information (see
1830 removed and the new file added with copy information (see
1831 memfilectx).
1831 memfilectx).
1832
1832
1833 user receives the committer name and defaults to current
1833 user receives the committer name and defaults to current
1834 repository username, date is the commit date in any format
1834 repository username, date is the commit date in any format
1835 supported by util.parsedate() and defaults to current date, extra
1835 supported by util.parsedate() and defaults to current date, extra
1836 is a dictionary of metadata or is left empty.
1836 is a dictionary of metadata or is left empty.
1837 """
1837 """
1838
1838
1839 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1840 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1841 # this field to determine what to do in filectxfn.
1841 # this field to determine what to do in filectxfn.
1842 _returnnoneformissingfiles = True
1842 _returnnoneformissingfiles = True
1843
1843
1844 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1845 date=None, extra=None, editor=False):
1845 date=None, extra=None, editor=False):
1846 super(memctx, self).__init__(repo, text, user, date, extra)
1846 super(memctx, self).__init__(repo, text, user, date, extra)
1847 self._rev = None
1847 self._rev = None
1848 self._node = None
1848 self._node = None
1849 parents = [(p or nullid) for p in parents]
1849 parents = [(p or nullid) for p in parents]
1850 p1, p2 = parents
1850 p1, p2 = parents
1851 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1852 files = sorted(set(files))
1852 files = sorted(set(files))
1853 self._files = files
1853 self._files = files
1854 self.substate = {}
1854 self.substate = {}
1855
1855
1856 # if store is not callable, wrap it in a function
1856 # if store is not callable, wrap it in a function
1857 if not callable(filectxfn):
1857 if not callable(filectxfn):
1858 def getfilectx(repo, memctx, path):
1858 def getfilectx(repo, memctx, path):
1859 fctx = filectxfn[path]
1859 fctx = filectxfn[path]
1860 # this is weird but apparently we only keep track of one parent
1860 # this is weird but apparently we only keep track of one parent
1861 # (why not only store that instead of a tuple?)
1861 # (why not only store that instead of a tuple?)
1862 copied = fctx.renamed()
1862 copied = fctx.renamed()
1863 if copied:
1863 if copied:
1864 copied = copied[0]
1864 copied = copied[0]
1865 return memfilectx(repo, path, fctx.data(),
1865 return memfilectx(repo, path, fctx.data(),
1866 islink=fctx.islink(), isexec=fctx.isexec(),
1866 islink=fctx.islink(), isexec=fctx.isexec(),
1867 copied=copied, memctx=memctx)
1867 copied=copied, memctx=memctx)
1868 self._filectxfn = getfilectx
1868 self._filectxfn = getfilectx
1869 else:
1869 else:
1870 # memoizing increases performance for e.g. vcs convert scenarios.
1870 # memoizing increases performance for e.g. vcs convert scenarios.
1871 self._filectxfn = makecachingfilectxfn(filectxfn)
1871 self._filectxfn = makecachingfilectxfn(filectxfn)
1872
1872
1873 if extra:
1873 if extra:
1874 self._extra = extra.copy()
1874 self._extra = extra.copy()
1875 else:
1875 else:
1876 self._extra = {}
1876 self._extra = {}
1877
1877
1878 if self._extra.get('branch', '') == '':
1878 if self._extra.get('branch', '') == '':
1879 self._extra['branch'] = 'default'
1879 self._extra['branch'] = 'default'
1880
1880
1881 if editor:
1881 if editor:
1882 self._text = editor(self._repo, self, [])
1882 self._text = editor(self._repo, self, [])
1883 self._repo.savecommitmessage(self._text)
1883 self._repo.savecommitmessage(self._text)
1884
1884
1885 def filectx(self, path, filelog=None):
1885 def filectx(self, path, filelog=None):
1886 """get a file context from the working directory
1886 """get a file context from the working directory
1887
1887
1888 Returns None if file doesn't exist and should be removed."""
1888 Returns None if file doesn't exist and should be removed."""
1889 return self._filectxfn(self._repo, self, path)
1889 return self._filectxfn(self._repo, self, path)
1890
1890
1891 def commit(self):
1891 def commit(self):
1892 """commit context to the repo"""
1892 """commit context to the repo"""
1893 return self._repo.commitctx(self)
1893 return self._repo.commitctx(self)
1894
1894
1895 @propertycache
1895 @propertycache
1896 def _manifest(self):
1896 def _manifest(self):
1897 """generate a manifest based on the return values of filectxfn"""
1897 """generate a manifest based on the return values of filectxfn"""
1898
1898
1899 # keep this simple for now; just worry about p1
1899 # keep this simple for now; just worry about p1
1900 pctx = self._parents[0]
1900 pctx = self._parents[0]
1901 man = pctx.manifest().copy()
1901 man = pctx.manifest().copy()
1902
1902
1903 for f in self._status.modified:
1903 for f in self._status.modified:
1904 p1node = nullid
1904 p1node = nullid
1905 p2node = nullid
1905 p2node = nullid
1906 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 p = pctx[f].parents() # if file isn't in pctx, check p2?
1907 if len(p) > 0:
1907 if len(p) > 0:
1908 p1node = p[0].filenode()
1908 p1node = p[0].filenode()
1909 if len(p) > 1:
1909 if len(p) > 1:
1910 p2node = p[1].filenode()
1910 p2node = p[1].filenode()
1911 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1912
1912
1913 for f in self._status.added:
1913 for f in self._status.added:
1914 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1915
1915
1916 for f in self._status.removed:
1916 for f in self._status.removed:
1917 if f in man:
1917 if f in man:
1918 del man[f]
1918 del man[f]
1919
1919
1920 return man
1920 return man
1921
1921
1922 @propertycache
1922 @propertycache
1923 def _status(self):
1923 def _status(self):
1924 """Calculate exact status from ``files`` specified at construction
1924 """Calculate exact status from ``files`` specified at construction
1925 """
1925 """
1926 man1 = self.p1().manifest()
1926 man1 = self.p1().manifest()
1927 p2 = self._parents[1]
1927 p2 = self._parents[1]
1928 # "1 < len(self._parents)" can't be used for checking
1928 # "1 < len(self._parents)" can't be used for checking
1929 # existence of the 2nd parent, because "memctx._parents" is
1929 # existence of the 2nd parent, because "memctx._parents" is
1930 # explicitly initialized by the list, of which length is 2.
1930 # explicitly initialized by the list, of which length is 2.
1931 if p2.node() != nullid:
1931 if p2.node() != nullid:
1932 man2 = p2.manifest()
1932 man2 = p2.manifest()
1933 managing = lambda f: f in man1 or f in man2
1933 managing = lambda f: f in man1 or f in man2
1934 else:
1934 else:
1935 managing = lambda f: f in man1
1935 managing = lambda f: f in man1
1936
1936
1937 modified, added, removed = [], [], []
1937 modified, added, removed = [], [], []
1938 for f in self._files:
1938 for f in self._files:
1939 if not managing(f):
1939 if not managing(f):
1940 added.append(f)
1940 added.append(f)
1941 elif self[f]:
1941 elif self[f]:
1942 modified.append(f)
1942 modified.append(f)
1943 else:
1943 else:
1944 removed.append(f)
1944 removed.append(f)
1945
1945
1946 return scmutil.status(modified, added, removed, [], [], [], [])
1946 return scmutil.status(modified, added, removed, [], [], [], [])
1947
1947
1948 class memfilectx(committablefilectx):
1948 class memfilectx(committablefilectx):
1949 """memfilectx represents an in-memory file to commit.
1949 """memfilectx represents an in-memory file to commit.
1950
1950
1951 See memctx and committablefilectx for more details.
1951 See memctx and committablefilectx for more details.
1952 """
1952 """
1953 def __init__(self, repo, path, data, islink=False,
1953 def __init__(self, repo, path, data, islink=False,
1954 isexec=False, copied=None, memctx=None):
1954 isexec=False, copied=None, memctx=None):
1955 """
1955 """
1956 path is the normalized file path relative to repository root.
1956 path is the normalized file path relative to repository root.
1957 data is the file content as a string.
1957 data is the file content as a string.
1958 islink is True if the file is a symbolic link.
1958 islink is True if the file is a symbolic link.
1959 isexec is True if the file is executable.
1959 isexec is True if the file is executable.
1960 copied is the source file path if current file was copied in the
1960 copied is the source file path if current file was copied in the
1961 revision being committed, or None."""
1961 revision being committed, or None."""
1962 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 super(memfilectx, self).__init__(repo, path, None, memctx)
1963 self._data = data
1963 self._data = data
1964 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1965 self._copied = None
1965 self._copied = None
1966 if copied:
1966 if copied:
1967 self._copied = (copied, nullid)
1967 self._copied = (copied, nullid)
1968
1968
1969 def data(self):
1969 def data(self):
1970 return self._data
1970 return self._data
1971 def size(self):
1971 def size(self):
1972 return len(self.data())
1972 return len(self.data())
1973 def flags(self):
1973 def flags(self):
1974 return self._flags
1974 return self._flags
1975 def renamed(self):
1975 def renamed(self):
1976 return self._copied
1976 return self._copied
1977
1977
1978 def remove(self, ignoremissing=False):
1978 def remove(self, ignoremissing=False):
1979 """wraps unlink for a repo's working directory"""
1979 """wraps unlink for a repo's working directory"""
1980 # need to figure out what to do here
1980 # need to figure out what to do here
1981 del self._changectx[self._path]
1981 del self._changectx[self._path]
1982
1982
1983 def write(self, data, flags):
1983 def write(self, data, flags):
1984 """wraps repo.wwrite"""
1984 """wraps repo.wwrite"""
1985 self._data = data
1985 self._data = data
@@ -1,1278 +1,1280 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import array
10 import array
11 import heapq
11 import heapq
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 mdiff,
18 mdiff,
19 parsers,
19 parsers,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 def _parsev1(data):
26 def _parsev1(data):
27 # This method does a little bit of excessive-looking
27 # This method does a little bit of excessive-looking
28 # precondition checking. This is so that the behavior of this
28 # precondition checking. This is so that the behavior of this
29 # class exactly matches its C counterpart to try and help
29 # class exactly matches its C counterpart to try and help
30 # prevent surprise breakage for anyone that develops against
30 # prevent surprise breakage for anyone that develops against
31 # the pure version.
31 # the pure version.
32 if data and data[-1] != '\n':
32 if data and data[-1] != '\n':
33 raise ValueError('Manifest did not end in a newline.')
33 raise ValueError('Manifest did not end in a newline.')
34 prev = None
34 prev = None
35 for l in data.splitlines():
35 for l in data.splitlines():
36 if prev is not None and prev > l:
36 if prev is not None and prev > l:
37 raise ValueError('Manifest lines not in sorted order.')
37 raise ValueError('Manifest lines not in sorted order.')
38 prev = l
38 prev = l
39 f, n = l.split('\0')
39 f, n = l.split('\0')
40 if len(n) > 40:
40 if len(n) > 40:
41 yield f, revlog.bin(n[:40]), n[40:]
41 yield f, revlog.bin(n[:40]), n[40:]
42 else:
42 else:
43 yield f, revlog.bin(n), ''
43 yield f, revlog.bin(n), ''
44
44
45 def _parsev2(data):
45 def _parsev2(data):
46 metadataend = data.find('\n')
46 metadataend = data.find('\n')
47 # Just ignore metadata for now
47 # Just ignore metadata for now
48 pos = metadataend + 1
48 pos = metadataend + 1
49 prevf = ''
49 prevf = ''
50 while pos < len(data):
50 while pos < len(data):
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 if end == -1:
52 if end == -1:
53 raise ValueError('Manifest ended with incomplete file entry.')
53 raise ValueError('Manifest ended with incomplete file entry.')
54 stemlen = ord(data[pos])
54 stemlen = ord(data[pos])
55 items = data[pos + 1:end].split('\0')
55 items = data[pos + 1:end].split('\0')
56 f = prevf[:stemlen] + items[0]
56 f = prevf[:stemlen] + items[0]
57 if prevf > f:
57 if prevf > f:
58 raise ValueError('Manifest entries not in sorted order.')
58 raise ValueError('Manifest entries not in sorted order.')
59 fl = items[1]
59 fl = items[1]
60 # Just ignore metadata (items[2:] for now)
60 # Just ignore metadata (items[2:] for now)
61 n = data[end + 1:end + 21]
61 n = data[end + 1:end + 21]
62 yield f, n, fl
62 yield f, n, fl
63 pos = end + 22
63 pos = end + 22
64 prevf = f
64 prevf = f
65
65
66 def _parse(data):
66 def _parse(data):
67 """Generates (path, node, flags) tuples from a manifest text"""
67 """Generates (path, node, flags) tuples from a manifest text"""
68 if data.startswith('\0'):
68 if data.startswith('\0'):
69 return iter(_parsev2(data))
69 return iter(_parsev2(data))
70 else:
70 else:
71 return iter(_parsev1(data))
71 return iter(_parsev1(data))
72
72
73 def _text(it, usemanifestv2):
73 def _text(it, usemanifestv2):
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 text"""
75 text"""
76 if usemanifestv2:
76 if usemanifestv2:
77 return _textv2(it)
77 return _textv2(it)
78 else:
78 else:
79 return _textv1(it)
79 return _textv1(it)
80
80
81 def _textv1(it):
81 def _textv1(it):
82 files = []
82 files = []
83 lines = []
83 lines = []
84 _hex = revlog.hex
84 _hex = revlog.hex
85 for f, n, fl in it:
85 for f, n, fl in it:
86 files.append(f)
86 files.append(f)
87 # if this is changed to support newlines in filenames,
87 # if this is changed to support newlines in filenames,
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90
90
91 _checkforbidden(files)
91 _checkforbidden(files)
92 return ''.join(lines)
92 return ''.join(lines)
93
93
94 def _textv2(it):
94 def _textv2(it):
95 files = []
95 files = []
96 lines = ['\0\n']
96 lines = ['\0\n']
97 prevf = ''
97 prevf = ''
98 for f, n, fl in it:
98 for f, n, fl in it:
99 files.append(f)
99 files.append(f)
100 stem = os.path.commonprefix([prevf, f])
100 stem = os.path.commonprefix([prevf, f])
101 stemlen = min(len(stem), 255)
101 stemlen = min(len(stem), 255)
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 prevf = f
103 prevf = f
104 _checkforbidden(files)
104 _checkforbidden(files)
105 return ''.join(lines)
105 return ''.join(lines)
106
106
107 class _lazymanifest(dict):
107 class _lazymanifest(dict):
108 """This is the pure implementation of lazymanifest.
108 """This is the pure implementation of lazymanifest.
109
109
110 It has not been optimized *at all* and is not lazy.
110 It has not been optimized *at all* and is not lazy.
111 """
111 """
112
112
113 def __init__(self, data):
113 def __init__(self, data):
114 dict.__init__(self)
114 dict.__init__(self)
115 for f, n, fl in _parse(data):
115 for f, n, fl in _parse(data):
116 self[f] = n, fl
116 self[f] = n, fl
117
117
118 def __setitem__(self, k, v):
118 def __setitem__(self, k, v):
119 node, flag = v
119 node, flag = v
120 assert node is not None
120 assert node is not None
121 if len(node) > 21:
121 if len(node) > 21:
122 node = node[:21] # match c implementation behavior
122 node = node[:21] # match c implementation behavior
123 dict.__setitem__(self, k, (node, flag))
123 dict.__setitem__(self, k, (node, flag))
124
124
125 def __iter__(self):
125 def __iter__(self):
126 return iter(sorted(dict.keys(self)))
126 return iter(sorted(dict.keys(self)))
127
127
128 def iterkeys(self):
128 def iterkeys(self):
129 return iter(sorted(dict.keys(self)))
129 return iter(sorted(dict.keys(self)))
130
130
131 def iterentries(self):
131 def iterentries(self):
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
133
133
134 def copy(self):
134 def copy(self):
135 c = _lazymanifest('')
135 c = _lazymanifest('')
136 c.update(self)
136 c.update(self)
137 return c
137 return c
138
138
139 def diff(self, m2, clean=False):
139 def diff(self, m2, clean=False):
140 '''Finds changes between the current manifest and m2.'''
140 '''Finds changes between the current manifest and m2.'''
141 diff = {}
141 diff = {}
142
142
143 for fn, e1 in self.iteritems():
143 for fn, e1 in self.iteritems():
144 if fn not in m2:
144 if fn not in m2:
145 diff[fn] = e1, (None, '')
145 diff[fn] = e1, (None, '')
146 else:
146 else:
147 e2 = m2[fn]
147 e2 = m2[fn]
148 if e1 != e2:
148 if e1 != e2:
149 diff[fn] = e1, e2
149 diff[fn] = e1, e2
150 elif clean:
150 elif clean:
151 diff[fn] = None
151 diff[fn] = None
152
152
153 for fn, e2 in m2.iteritems():
153 for fn, e2 in m2.iteritems():
154 if fn not in self:
154 if fn not in self:
155 diff[fn] = (None, ''), e2
155 diff[fn] = (None, ''), e2
156
156
157 return diff
157 return diff
158
158
159 def filtercopy(self, filterfn):
159 def filtercopy(self, filterfn):
160 c = _lazymanifest('')
160 c = _lazymanifest('')
161 for f, n, fl in self.iterentries():
161 for f, n, fl in self.iterentries():
162 if filterfn(f):
162 if filterfn(f):
163 c[f] = n, fl
163 c[f] = n, fl
164 return c
164 return c
165
165
166 def text(self):
166 def text(self):
167 """Get the full data of this manifest as a bytestring."""
167 """Get the full data of this manifest as a bytestring."""
168 return _textv1(self.iterentries())
168 return _textv1(self.iterentries())
169
169
170 try:
170 try:
171 _lazymanifest = parsers.lazymanifest
171 _lazymanifest = parsers.lazymanifest
172 except AttributeError:
172 except AttributeError:
173 pass
173 pass
174
174
175 class manifestdict(object):
175 class manifestdict(object):
176 def __init__(self, data=''):
176 def __init__(self, data=''):
177 if data.startswith('\0'):
177 if data.startswith('\0'):
178 #_lazymanifest can not parse v2
178 #_lazymanifest can not parse v2
179 self._lm = _lazymanifest('')
179 self._lm = _lazymanifest('')
180 for f, n, fl in _parsev2(data):
180 for f, n, fl in _parsev2(data):
181 self._lm[f] = n, fl
181 self._lm[f] = n, fl
182 else:
182 else:
183 self._lm = _lazymanifest(data)
183 self._lm = _lazymanifest(data)
184
184
185 def __getitem__(self, key):
185 def __getitem__(self, key):
186 return self._lm[key][0]
186 return self._lm[key][0]
187
187
188 def find(self, key):
188 def find(self, key):
189 return self._lm[key]
189 return self._lm[key]
190
190
191 def __len__(self):
191 def __len__(self):
192 return len(self._lm)
192 return len(self._lm)
193
193
194 def __setitem__(self, key, node):
194 def __setitem__(self, key, node):
195 self._lm[key] = node, self.flags(key, '')
195 self._lm[key] = node, self.flags(key, '')
196
196
197 def __contains__(self, key):
197 def __contains__(self, key):
198 return key in self._lm
198 return key in self._lm
199
199
200 def __delitem__(self, key):
200 def __delitem__(self, key):
201 del self._lm[key]
201 del self._lm[key]
202
202
203 def __iter__(self):
203 def __iter__(self):
204 return self._lm.__iter__()
204 return self._lm.__iter__()
205
205
206 def iterkeys(self):
206 def iterkeys(self):
207 return self._lm.iterkeys()
207 return self._lm.iterkeys()
208
208
209 def keys(self):
209 def keys(self):
210 return list(self.iterkeys())
210 return list(self.iterkeys())
211
211
212 def filesnotin(self, m2):
212 def filesnotin(self, m2):
213 '''Set of files in this manifest that are not in the other'''
213 '''Set of files in this manifest that are not in the other'''
214 diff = self.diff(m2)
214 diff = self.diff(m2)
215 files = set(filepath
215 files = set(filepath
216 for filepath, hashflags in diff.iteritems()
216 for filepath, hashflags in diff.iteritems()
217 if hashflags[1][0] is None)
217 if hashflags[1][0] is None)
218 return files
218 return files
219
219
220 @propertycache
220 @propertycache
221 def _dirs(self):
221 def _dirs(self):
222 return util.dirs(self)
222 return util.dirs(self)
223
223
224 def dirs(self):
224 def dirs(self):
225 return self._dirs
225 return self._dirs
226
226
227 def hasdir(self, dir):
227 def hasdir(self, dir):
228 return dir in self._dirs
228 return dir in self._dirs
229
229
230 def _filesfastpath(self, match):
230 def _filesfastpath(self, match):
231 '''Checks whether we can correctly and quickly iterate over matcher
231 '''Checks whether we can correctly and quickly iterate over matcher
232 files instead of over manifest files.'''
232 files instead of over manifest files.'''
233 files = match.files()
233 files = match.files()
234 return (len(files) < 100 and (match.isexact() or
234 return (len(files) < 100 and (match.isexact() or
235 (match.prefix() and all(fn in self for fn in files))))
235 (match.prefix() and all(fn in self for fn in files))))
236
236
237 def walk(self, match):
237 def walk(self, match):
238 '''Generates matching file names.
238 '''Generates matching file names.
239
239
240 Equivalent to manifest.matches(match).iterkeys(), but without creating
240 Equivalent to manifest.matches(match).iterkeys(), but without creating
241 an entirely new manifest.
241 an entirely new manifest.
242
242
243 It also reports nonexistent files by marking them bad with match.bad().
243 It also reports nonexistent files by marking them bad with match.bad().
244 '''
244 '''
245 if match.always():
245 if match.always():
246 for f in iter(self):
246 for f in iter(self):
247 yield f
247 yield f
248 return
248 return
249
249
250 fset = set(match.files())
250 fset = set(match.files())
251
251
252 # avoid the entire walk if we're only looking for specific files
252 # avoid the entire walk if we're only looking for specific files
253 if self._filesfastpath(match):
253 if self._filesfastpath(match):
254 for fn in sorted(fset):
254 for fn in sorted(fset):
255 yield fn
255 yield fn
256 return
256 return
257
257
258 for fn in self:
258 for fn in self:
259 if fn in fset:
259 if fn in fset:
260 # specified pattern is the exact name
260 # specified pattern is the exact name
261 fset.remove(fn)
261 fset.remove(fn)
262 if match(fn):
262 if match(fn):
263 yield fn
263 yield fn
264
264
265 # for dirstate.walk, files=['.'] means "walk the whole tree".
265 # for dirstate.walk, files=['.'] means "walk the whole tree".
266 # follow that here, too
266 # follow that here, too
267 fset.discard('.')
267 fset.discard('.')
268
268
269 for fn in sorted(fset):
269 for fn in sorted(fset):
270 if not self.hasdir(fn):
270 if not self.hasdir(fn):
271 match.bad(fn, None)
271 match.bad(fn, None)
272
272
273 def matches(self, match):
273 def matches(self, match):
274 '''generate a new manifest filtered by the match argument'''
274 '''generate a new manifest filtered by the match argument'''
275 if match.always():
275 if match.always():
276 return self.copy()
276 return self.copy()
277
277
278 if self._filesfastpath(match):
278 if self._filesfastpath(match):
279 m = manifestdict()
279 m = manifestdict()
280 lm = self._lm
280 lm = self._lm
281 for fn in match.files():
281 for fn in match.files():
282 if fn in lm:
282 if fn in lm:
283 m._lm[fn] = lm[fn]
283 m._lm[fn] = lm[fn]
284 return m
284 return m
285
285
286 m = manifestdict()
286 m = manifestdict()
287 m._lm = self._lm.filtercopy(match)
287 m._lm = self._lm.filtercopy(match)
288 return m
288 return m
289
289
290 def diff(self, m2, clean=False):
290 def diff(self, m2, clean=False):
291 '''Finds changes between the current manifest and m2.
291 '''Finds changes between the current manifest and m2.
292
292
293 Args:
293 Args:
294 m2: the manifest to which this manifest should be compared.
294 m2: the manifest to which this manifest should be compared.
295 clean: if true, include files unchanged between these manifests
295 clean: if true, include files unchanged between these manifests
296 with a None value in the returned dictionary.
296 with a None value in the returned dictionary.
297
297
298 The result is returned as a dict with filename as key and
298 The result is returned as a dict with filename as key and
299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
300 nodeid in the current/other manifest and fl1/fl2 is the flag
300 nodeid in the current/other manifest and fl1/fl2 is the flag
301 in the current/other manifest. Where the file does not exist,
301 in the current/other manifest. Where the file does not exist,
302 the nodeid will be None and the flags will be the empty
302 the nodeid will be None and the flags will be the empty
303 string.
303 string.
304 '''
304 '''
305 return self._lm.diff(m2._lm, clean)
305 return self._lm.diff(m2._lm, clean)
306
306
307 def setflag(self, key, flag):
307 def setflag(self, key, flag):
308 self._lm[key] = self[key], flag
308 self._lm[key] = self[key], flag
309
309
310 def get(self, key, default=None):
310 def get(self, key, default=None):
311 try:
311 try:
312 return self._lm[key][0]
312 return self._lm[key][0]
313 except KeyError:
313 except KeyError:
314 return default
314 return default
315
315
316 def flags(self, key, default=''):
316 def flags(self, key, default=''):
317 try:
317 try:
318 return self._lm[key][1]
318 return self._lm[key][1]
319 except KeyError:
319 except KeyError:
320 return default
320 return default
321
321
322 def copy(self):
322 def copy(self):
323 c = manifestdict()
323 c = manifestdict()
324 c._lm = self._lm.copy()
324 c._lm = self._lm.copy()
325 return c
325 return c
326
326
327 def iteritems(self):
327 def iteritems(self):
328 return (x[:2] for x in self._lm.iterentries())
328 return (x[:2] for x in self._lm.iterentries())
329
329
330 def iterentries(self):
330 def iterentries(self):
331 return self._lm.iterentries()
331 return self._lm.iterentries()
332
332
333 def text(self, usemanifestv2=False):
333 def text(self, usemanifestv2=False):
334 if usemanifestv2:
334 if usemanifestv2:
335 return _textv2(self._lm.iterentries())
335 return _textv2(self._lm.iterentries())
336 else:
336 else:
337 # use (probably) native version for v1
337 # use (probably) native version for v1
338 return self._lm.text()
338 return self._lm.text()
339
339
340 def fastdelta(self, base, changes):
340 def fastdelta(self, base, changes):
341 """Given a base manifest text as an array.array and a list of changes
341 """Given a base manifest text as an array.array and a list of changes
342 relative to that text, compute a delta that can be used by revlog.
342 relative to that text, compute a delta that can be used by revlog.
343 """
343 """
344 delta = []
344 delta = []
345 dstart = None
345 dstart = None
346 dend = None
346 dend = None
347 dline = [""]
347 dline = [""]
348 start = 0
348 start = 0
349 # zero copy representation of base as a buffer
349 # zero copy representation of base as a buffer
350 addbuf = util.buffer(base)
350 addbuf = util.buffer(base)
351
351
352 changes = list(changes)
352 changes = list(changes)
353 if len(changes) < 1000:
353 if len(changes) < 1000:
354 # start with a readonly loop that finds the offset of
354 # start with a readonly loop that finds the offset of
355 # each line and creates the deltas
355 # each line and creates the deltas
356 for f, todelete in changes:
356 for f, todelete in changes:
357 # bs will either be the index of the item or the insert point
357 # bs will either be the index of the item or the insert point
358 start, end = _msearch(addbuf, f, start)
358 start, end = _msearch(addbuf, f, start)
359 if not todelete:
359 if not todelete:
360 h, fl = self._lm[f]
360 h, fl = self._lm[f]
361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
362 else:
362 else:
363 if start == end:
363 if start == end:
364 # item we want to delete was not found, error out
364 # item we want to delete was not found, error out
365 raise AssertionError(
365 raise AssertionError(
366 _("failed to remove %s from manifest") % f)
366 _("failed to remove %s from manifest") % f)
367 l = ""
367 l = ""
368 if dstart is not None and dstart <= start and dend >= start:
368 if dstart is not None and dstart <= start and dend >= start:
369 if dend < end:
369 if dend < end:
370 dend = end
370 dend = end
371 if l:
371 if l:
372 dline.append(l)
372 dline.append(l)
373 else:
373 else:
374 if dstart is not None:
374 if dstart is not None:
375 delta.append([dstart, dend, "".join(dline)])
375 delta.append([dstart, dend, "".join(dline)])
376 dstart = start
376 dstart = start
377 dend = end
377 dend = end
378 dline = [l]
378 dline = [l]
379
379
380 if dstart is not None:
380 if dstart is not None:
381 delta.append([dstart, dend, "".join(dline)])
381 delta.append([dstart, dend, "".join(dline)])
382 # apply the delta to the base, and get a delta for addrevision
382 # apply the delta to the base, and get a delta for addrevision
383 deltatext, arraytext = _addlistdelta(base, delta)
383 deltatext, arraytext = _addlistdelta(base, delta)
384 else:
384 else:
385 # For large changes, it's much cheaper to just build the text and
385 # For large changes, it's much cheaper to just build the text and
386 # diff it.
386 # diff it.
387 arraytext = array.array('c', self.text())
387 arraytext = array.array('c', self.text())
388 deltatext = mdiff.textdiff(base, arraytext)
388 deltatext = mdiff.textdiff(base, arraytext)
389
389
390 return arraytext, deltatext
390 return arraytext, deltatext
391
391
392 def _msearch(m, s, lo=0, hi=None):
392 def _msearch(m, s, lo=0, hi=None):
393 '''return a tuple (start, end) that says where to find s within m.
393 '''return a tuple (start, end) that says where to find s within m.
394
394
395 If the string is found m[start:end] are the line containing
395 If the string is found m[start:end] are the line containing
396 that string. If start == end the string was not found and
396 that string. If start == end the string was not found and
397 they indicate the proper sorted insertion point.
397 they indicate the proper sorted insertion point.
398
398
399 m should be a buffer or a string
399 m should be a buffer or a string
400 s is a string'''
400 s is a string'''
401 def advance(i, c):
401 def advance(i, c):
402 while i < lenm and m[i] != c:
402 while i < lenm and m[i] != c:
403 i += 1
403 i += 1
404 return i
404 return i
405 if not s:
405 if not s:
406 return (lo, lo)
406 return (lo, lo)
407 lenm = len(m)
407 lenm = len(m)
408 if not hi:
408 if not hi:
409 hi = lenm
409 hi = lenm
410 while lo < hi:
410 while lo < hi:
411 mid = (lo + hi) // 2
411 mid = (lo + hi) // 2
412 start = mid
412 start = mid
413 while start > 0 and m[start - 1] != '\n':
413 while start > 0 and m[start - 1] != '\n':
414 start -= 1
414 start -= 1
415 end = advance(start, '\0')
415 end = advance(start, '\0')
416 if m[start:end] < s:
416 if m[start:end] < s:
417 # we know that after the null there are 40 bytes of sha1
417 # we know that after the null there are 40 bytes of sha1
418 # this translates to the bisect lo = mid + 1
418 # this translates to the bisect lo = mid + 1
419 lo = advance(end + 40, '\n') + 1
419 lo = advance(end + 40, '\n') + 1
420 else:
420 else:
421 # this translates to the bisect hi = mid
421 # this translates to the bisect hi = mid
422 hi = start
422 hi = start
423 end = advance(lo, '\0')
423 end = advance(lo, '\0')
424 found = m[lo:end]
424 found = m[lo:end]
425 if s == found:
425 if s == found:
426 # we know that after the null there are 40 bytes of sha1
426 # we know that after the null there are 40 bytes of sha1
427 end = advance(end + 40, '\n')
427 end = advance(end + 40, '\n')
428 return (lo, end + 1)
428 return (lo, end + 1)
429 else:
429 else:
430 return (lo, lo)
430 return (lo, lo)
431
431
432 def _checkforbidden(l):
432 def _checkforbidden(l):
433 """Check filenames for illegal characters."""
433 """Check filenames for illegal characters."""
434 for f in l:
434 for f in l:
435 if '\n' in f or '\r' in f:
435 if '\n' in f or '\r' in f:
436 raise error.RevlogError(
436 raise error.RevlogError(
437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
438
438
439
439
440 # apply the changes collected during the bisect loop to our addlist
440 # apply the changes collected during the bisect loop to our addlist
441 # return a delta suitable for addrevision
441 # return a delta suitable for addrevision
442 def _addlistdelta(addlist, x):
442 def _addlistdelta(addlist, x):
443 # for large addlist arrays, building a new array is cheaper
443 # for large addlist arrays, building a new array is cheaper
444 # than repeatedly modifying the existing one
444 # than repeatedly modifying the existing one
445 currentposition = 0
445 currentposition = 0
446 newaddlist = array.array('c')
446 newaddlist = array.array('c')
447
447
448 for start, end, content in x:
448 for start, end, content in x:
449 newaddlist += addlist[currentposition:start]
449 newaddlist += addlist[currentposition:start]
450 if content:
450 if content:
451 newaddlist += array.array('c', content)
451 newaddlist += array.array('c', content)
452
452
453 currentposition = end
453 currentposition = end
454
454
455 newaddlist += addlist[currentposition:]
455 newaddlist += addlist[currentposition:]
456
456
457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
458 + content for start, end, content in x)
458 + content for start, end, content in x)
459 return deltatext, newaddlist
459 return deltatext, newaddlist
460
460
461 def _splittopdir(f):
461 def _splittopdir(f):
462 if '/' in f:
462 if '/' in f:
463 dir, subpath = f.split('/', 1)
463 dir, subpath = f.split('/', 1)
464 return dir + '/', subpath
464 return dir + '/', subpath
465 else:
465 else:
466 return '', f
466 return '', f
467
467
468 _noop = lambda s: None
468 _noop = lambda s: None
469
469
470 class treemanifest(object):
470 class treemanifest(object):
471 def __init__(self, dir='', text=''):
471 def __init__(self, dir='', text=''):
472 self._dir = dir
472 self._dir = dir
473 self._node = revlog.nullid
473 self._node = revlog.nullid
474 self._loadfunc = _noop
474 self._loadfunc = _noop
475 self._copyfunc = _noop
475 self._copyfunc = _noop
476 self._dirty = False
476 self._dirty = False
477 self._dirs = {}
477 self._dirs = {}
478 # Using _lazymanifest here is a little slower than plain old dicts
478 # Using _lazymanifest here is a little slower than plain old dicts
479 self._files = {}
479 self._files = {}
480 self._flags = {}
480 self._flags = {}
481 if text:
481 if text:
482 def readsubtree(subdir, subm):
482 def readsubtree(subdir, subm):
483 raise AssertionError('treemanifest constructor only accepts '
483 raise AssertionError('treemanifest constructor only accepts '
484 'flat manifests')
484 'flat manifests')
485 self.parse(text, readsubtree)
485 self.parse(text, readsubtree)
486 self._dirty = True # Mark flat manifest dirty after parsing
486 self._dirty = True # Mark flat manifest dirty after parsing
487
487
488 def _subpath(self, path):
488 def _subpath(self, path):
489 return self._dir + path
489 return self._dir + path
490
490
491 def __len__(self):
491 def __len__(self):
492 self._load()
492 self._load()
493 size = len(self._files)
493 size = len(self._files)
494 for m in self._dirs.values():
494 for m in self._dirs.values():
495 size += m.__len__()
495 size += m.__len__()
496 return size
496 return size
497
497
498 def _isempty(self):
498 def _isempty(self):
499 self._load() # for consistency; already loaded by all callers
499 self._load() # for consistency; already loaded by all callers
500 return (not self._files and (not self._dirs or
500 return (not self._files and (not self._dirs or
501 all(m._isempty() for m in self._dirs.values())))
501 all(m._isempty() for m in self._dirs.values())))
502
502
503 def __repr__(self):
503 def __repr__(self):
504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
505 (self._dir, revlog.hex(self._node),
505 (self._dir, revlog.hex(self._node),
506 bool(self._loadfunc is _noop),
506 bool(self._loadfunc is _noop),
507 self._dirty, id(self)))
507 self._dirty, id(self)))
508
508
509 def dir(self):
509 def dir(self):
510 '''The directory that this tree manifest represents, including a
510 '''The directory that this tree manifest represents, including a
511 trailing '/'. Empty string for the repo root directory.'''
511 trailing '/'. Empty string for the repo root directory.'''
512 return self._dir
512 return self._dir
513
513
514 def node(self):
514 def node(self):
515 '''This node of this instance. nullid for unsaved instances. Should
515 '''This node of this instance. nullid for unsaved instances. Should
516 be updated when the instance is read or written from a revlog.
516 be updated when the instance is read or written from a revlog.
517 '''
517 '''
518 assert not self._dirty
518 assert not self._dirty
519 return self._node
519 return self._node
520
520
521 def setnode(self, node):
521 def setnode(self, node):
522 self._node = node
522 self._node = node
523 self._dirty = False
523 self._dirty = False
524
524
525 def iterentries(self):
525 def iterentries(self):
526 self._load()
526 self._load()
527 for p, n in sorted(self._dirs.items() + self._files.items()):
527 for p, n in sorted(self._dirs.items() + self._files.items()):
528 if p in self._files:
528 if p in self._files:
529 yield self._subpath(p), n, self._flags.get(p, '')
529 yield self._subpath(p), n, self._flags.get(p, '')
530 else:
530 else:
531 for x in n.iterentries():
531 for x in n.iterentries():
532 yield x
532 yield x
533
533
534 def iteritems(self):
534 def iteritems(self):
535 self._load()
535 self._load()
536 for p, n in sorted(self._dirs.items() + self._files.items()):
536 for p, n in sorted(self._dirs.items() + self._files.items()):
537 if p in self._files:
537 if p in self._files:
538 yield self._subpath(p), n
538 yield self._subpath(p), n
539 else:
539 else:
540 for f, sn in n.iteritems():
540 for f, sn in n.iteritems():
541 yield f, sn
541 yield f, sn
542
542
543 def iterkeys(self):
543 def iterkeys(self):
544 self._load()
544 self._load()
545 for p in sorted(self._dirs.keys() + self._files.keys()):
545 for p in sorted(self._dirs.keys() + self._files.keys()):
546 if p in self._files:
546 if p in self._files:
547 yield self._subpath(p)
547 yield self._subpath(p)
548 else:
548 else:
549 for f in self._dirs[p].iterkeys():
549 for f in self._dirs[p].iterkeys():
550 yield f
550 yield f
551
551
552 def keys(self):
552 def keys(self):
553 return list(self.iterkeys())
553 return list(self.iterkeys())
554
554
555 def __iter__(self):
555 def __iter__(self):
556 return self.iterkeys()
556 return self.iterkeys()
557
557
558 def __contains__(self, f):
558 def __contains__(self, f):
559 if f is None:
559 if f is None:
560 return False
560 return False
561 self._load()
561 self._load()
562 dir, subpath = _splittopdir(f)
562 dir, subpath = _splittopdir(f)
563 if dir:
563 if dir:
564 if dir not in self._dirs:
564 if dir not in self._dirs:
565 return False
565 return False
566 return self._dirs[dir].__contains__(subpath)
566 return self._dirs[dir].__contains__(subpath)
567 else:
567 else:
568 return f in self._files
568 return f in self._files
569
569
570 def get(self, f, default=None):
570 def get(self, f, default=None):
571 self._load()
571 self._load()
572 dir, subpath = _splittopdir(f)
572 dir, subpath = _splittopdir(f)
573 if dir:
573 if dir:
574 if dir not in self._dirs:
574 if dir not in self._dirs:
575 return default
575 return default
576 return self._dirs[dir].get(subpath, default)
576 return self._dirs[dir].get(subpath, default)
577 else:
577 else:
578 return self._files.get(f, default)
578 return self._files.get(f, default)
579
579
580 def __getitem__(self, f):
580 def __getitem__(self, f):
581 self._load()
581 self._load()
582 dir, subpath = _splittopdir(f)
582 dir, subpath = _splittopdir(f)
583 if dir:
583 if dir:
584 return self._dirs[dir].__getitem__(subpath)
584 return self._dirs[dir].__getitem__(subpath)
585 else:
585 else:
586 return self._files[f]
586 return self._files[f]
587
587
588 def flags(self, f):
588 def flags(self, f):
589 self._load()
589 self._load()
590 dir, subpath = _splittopdir(f)
590 dir, subpath = _splittopdir(f)
591 if dir:
591 if dir:
592 if dir not in self._dirs:
592 if dir not in self._dirs:
593 return ''
593 return ''
594 return self._dirs[dir].flags(subpath)
594 return self._dirs[dir].flags(subpath)
595 else:
595 else:
596 if f in self._dirs:
596 if f in self._dirs:
597 return ''
597 return ''
598 return self._flags.get(f, '')
598 return self._flags.get(f, '')
599
599
600 def find(self, f):
600 def find(self, f):
601 self._load()
601 self._load()
602 dir, subpath = _splittopdir(f)
602 dir, subpath = _splittopdir(f)
603 if dir:
603 if dir:
604 return self._dirs[dir].find(subpath)
604 return self._dirs[dir].find(subpath)
605 else:
605 else:
606 return self._files[f], self._flags.get(f, '')
606 return self._files[f], self._flags.get(f, '')
607
607
608 def __delitem__(self, f):
608 def __delitem__(self, f):
609 self._load()
609 self._load()
610 dir, subpath = _splittopdir(f)
610 dir, subpath = _splittopdir(f)
611 if dir:
611 if dir:
612 self._dirs[dir].__delitem__(subpath)
612 self._dirs[dir].__delitem__(subpath)
613 # If the directory is now empty, remove it
613 # If the directory is now empty, remove it
614 if self._dirs[dir]._isempty():
614 if self._dirs[dir]._isempty():
615 del self._dirs[dir]
615 del self._dirs[dir]
616 else:
616 else:
617 del self._files[f]
617 del self._files[f]
618 if f in self._flags:
618 if f in self._flags:
619 del self._flags[f]
619 del self._flags[f]
620 self._dirty = True
620 self._dirty = True
621
621
622 def __setitem__(self, f, n):
622 def __setitem__(self, f, n):
623 assert n is not None
623 assert n is not None
624 self._load()
624 self._load()
625 dir, subpath = _splittopdir(f)
625 dir, subpath = _splittopdir(f)
626 if dir:
626 if dir:
627 if dir not in self._dirs:
627 if dir not in self._dirs:
628 self._dirs[dir] = treemanifest(self._subpath(dir))
628 self._dirs[dir] = treemanifest(self._subpath(dir))
629 self._dirs[dir].__setitem__(subpath, n)
629 self._dirs[dir].__setitem__(subpath, n)
630 else:
630 else:
631 self._files[f] = n[:21] # to match manifestdict's behavior
631 self._files[f] = n[:21] # to match manifestdict's behavior
632 self._dirty = True
632 self._dirty = True
633
633
634 def _load(self):
634 def _load(self):
635 if self._loadfunc is not _noop:
635 if self._loadfunc is not _noop:
636 lf, self._loadfunc = self._loadfunc, _noop
636 lf, self._loadfunc = self._loadfunc, _noop
637 lf(self)
637 lf(self)
638 elif self._copyfunc is not _noop:
638 elif self._copyfunc is not _noop:
639 cf, self._copyfunc = self._copyfunc, _noop
639 cf, self._copyfunc = self._copyfunc, _noop
640 cf(self)
640 cf(self)
641
641
642 def setflag(self, f, flags):
642 def setflag(self, f, flags):
643 """Set the flags (symlink, executable) for path f."""
643 """Set the flags (symlink, executable) for path f."""
644 self._load()
644 self._load()
645 dir, subpath = _splittopdir(f)
645 dir, subpath = _splittopdir(f)
646 if dir:
646 if dir:
647 if dir not in self._dirs:
647 if dir not in self._dirs:
648 self._dirs[dir] = treemanifest(self._subpath(dir))
648 self._dirs[dir] = treemanifest(self._subpath(dir))
649 self._dirs[dir].setflag(subpath, flags)
649 self._dirs[dir].setflag(subpath, flags)
650 else:
650 else:
651 self._flags[f] = flags
651 self._flags[f] = flags
652 self._dirty = True
652 self._dirty = True
653
653
654 def copy(self):
654 def copy(self):
655 copy = treemanifest(self._dir)
655 copy = treemanifest(self._dir)
656 copy._node = self._node
656 copy._node = self._node
657 copy._dirty = self._dirty
657 copy._dirty = self._dirty
658 if self._copyfunc is _noop:
658 if self._copyfunc is _noop:
659 def _copyfunc(s):
659 def _copyfunc(s):
660 self._load()
660 self._load()
661 for d in self._dirs:
661 for d in self._dirs:
662 s._dirs[d] = self._dirs[d].copy()
662 s._dirs[d] = self._dirs[d].copy()
663 s._files = dict.copy(self._files)
663 s._files = dict.copy(self._files)
664 s._flags = dict.copy(self._flags)
664 s._flags = dict.copy(self._flags)
665 if self._loadfunc is _noop:
665 if self._loadfunc is _noop:
666 _copyfunc(copy)
666 _copyfunc(copy)
667 else:
667 else:
668 copy._copyfunc = _copyfunc
668 copy._copyfunc = _copyfunc
669 else:
669 else:
670 copy._copyfunc = self._copyfunc
670 copy._copyfunc = self._copyfunc
671 return copy
671 return copy
672
672
673 def filesnotin(self, m2):
673 def filesnotin(self, m2):
674 '''Set of files in this manifest that are not in the other'''
674 '''Set of files in this manifest that are not in the other'''
675 files = set()
675 files = set()
676 def _filesnotin(t1, t2):
676 def _filesnotin(t1, t2):
677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
678 return
678 return
679 t1._load()
679 t1._load()
680 t2._load()
680 t2._load()
681 for d, m1 in t1._dirs.iteritems():
681 for d, m1 in t1._dirs.iteritems():
682 if d in t2._dirs:
682 if d in t2._dirs:
683 m2 = t2._dirs[d]
683 m2 = t2._dirs[d]
684 _filesnotin(m1, m2)
684 _filesnotin(m1, m2)
685 else:
685 else:
686 files.update(m1.iterkeys())
686 files.update(m1.iterkeys())
687
687
688 for fn in t1._files.iterkeys():
688 for fn in t1._files.iterkeys():
689 if fn not in t2._files:
689 if fn not in t2._files:
690 files.add(t1._subpath(fn))
690 files.add(t1._subpath(fn))
691
691
692 _filesnotin(self, m2)
692 _filesnotin(self, m2)
693 return files
693 return files
694
694
695 @propertycache
695 @propertycache
696 def _alldirs(self):
696 def _alldirs(self):
697 return util.dirs(self)
697 return util.dirs(self)
698
698
699 def dirs(self):
699 def dirs(self):
700 return self._alldirs
700 return self._alldirs
701
701
702 def hasdir(self, dir):
702 def hasdir(self, dir):
703 self._load()
703 self._load()
704 topdir, subdir = _splittopdir(dir)
704 topdir, subdir = _splittopdir(dir)
705 if topdir:
705 if topdir:
706 if topdir in self._dirs:
706 if topdir in self._dirs:
707 return self._dirs[topdir].hasdir(subdir)
707 return self._dirs[topdir].hasdir(subdir)
708 return False
708 return False
709 return (dir + '/') in self._dirs
709 return (dir + '/') in self._dirs
710
710
711 def walk(self, match):
711 def walk(self, match):
712 '''Generates matching file names.
712 '''Generates matching file names.
713
713
714 Equivalent to manifest.matches(match).iterkeys(), but without creating
714 Equivalent to manifest.matches(match).iterkeys(), but without creating
715 an entirely new manifest.
715 an entirely new manifest.
716
716
717 It also reports nonexistent files by marking them bad with match.bad().
717 It also reports nonexistent files by marking them bad with match.bad().
718 '''
718 '''
719 if match.always():
719 if match.always():
720 for f in iter(self):
720 for f in iter(self):
721 yield f
721 yield f
722 return
722 return
723
723
724 fset = set(match.files())
724 fset = set(match.files())
725
725
726 for fn in self._walk(match):
726 for fn in self._walk(match):
727 if fn in fset:
727 if fn in fset:
728 # specified pattern is the exact name
728 # specified pattern is the exact name
729 fset.remove(fn)
729 fset.remove(fn)
730 yield fn
730 yield fn
731
731
732 # for dirstate.walk, files=['.'] means "walk the whole tree".
732 # for dirstate.walk, files=['.'] means "walk the whole tree".
733 # follow that here, too
733 # follow that here, too
734 fset.discard('.')
734 fset.discard('.')
735
735
736 for fn in sorted(fset):
736 for fn in sorted(fset):
737 if not self.hasdir(fn):
737 if not self.hasdir(fn):
738 match.bad(fn, None)
738 match.bad(fn, None)
739
739
740 def _walk(self, match):
740 def _walk(self, match):
741 '''Recursively generates matching file names for walk().'''
741 '''Recursively generates matching file names for walk().'''
742 if not match.visitdir(self._dir[:-1] or '.'):
742 if not match.visitdir(self._dir[:-1] or '.'):
743 return
743 return
744
744
745 # yield this dir's files and walk its submanifests
745 # yield this dir's files and walk its submanifests
746 self._load()
746 self._load()
747 for p in sorted(self._dirs.keys() + self._files.keys()):
747 for p in sorted(self._dirs.keys() + self._files.keys()):
748 if p in self._files:
748 if p in self._files:
749 fullp = self._subpath(p)
749 fullp = self._subpath(p)
750 if match(fullp):
750 if match(fullp):
751 yield fullp
751 yield fullp
752 else:
752 else:
753 for f in self._dirs[p]._walk(match):
753 for f in self._dirs[p]._walk(match):
754 yield f
754 yield f
755
755
756 def matches(self, match):
756 def matches(self, match):
757 '''generate a new manifest filtered by the match argument'''
757 '''generate a new manifest filtered by the match argument'''
758 if match.always():
758 if match.always():
759 return self.copy()
759 return self.copy()
760
760
761 return self._matches(match)
761 return self._matches(match)
762
762
763 def _matches(self, match):
763 def _matches(self, match):
764 '''recursively generate a new manifest filtered by the match argument.
764 '''recursively generate a new manifest filtered by the match argument.
765 '''
765 '''
766
766
767 visit = match.visitdir(self._dir[:-1] or '.')
767 visit = match.visitdir(self._dir[:-1] or '.')
768 if visit == 'all':
768 if visit == 'all':
769 return self.copy()
769 return self.copy()
770 ret = treemanifest(self._dir)
770 ret = treemanifest(self._dir)
771 if not visit:
771 if not visit:
772 return ret
772 return ret
773
773
774 self._load()
774 self._load()
775 for fn in self._files:
775 for fn in self._files:
776 fullp = self._subpath(fn)
776 fullp = self._subpath(fn)
777 if not match(fullp):
777 if not match(fullp):
778 continue
778 continue
779 ret._files[fn] = self._files[fn]
779 ret._files[fn] = self._files[fn]
780 if fn in self._flags:
780 if fn in self._flags:
781 ret._flags[fn] = self._flags[fn]
781 ret._flags[fn] = self._flags[fn]
782
782
783 for dir, subm in self._dirs.iteritems():
783 for dir, subm in self._dirs.iteritems():
784 m = subm._matches(match)
784 m = subm._matches(match)
785 if not m._isempty():
785 if not m._isempty():
786 ret._dirs[dir] = m
786 ret._dirs[dir] = m
787
787
788 if not ret._isempty():
788 if not ret._isempty():
789 ret._dirty = True
789 ret._dirty = True
790 return ret
790 return ret
791
791
792 def diff(self, m2, clean=False):
792 def diff(self, m2, clean=False):
793 '''Finds changes between the current manifest and m2.
793 '''Finds changes between the current manifest and m2.
794
794
795 Args:
795 Args:
796 m2: the manifest to which this manifest should be compared.
796 m2: the manifest to which this manifest should be compared.
797 clean: if true, include files unchanged between these manifests
797 clean: if true, include files unchanged between these manifests
798 with a None value in the returned dictionary.
798 with a None value in the returned dictionary.
799
799
800 The result is returned as a dict with filename as key and
800 The result is returned as a dict with filename as key and
801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
802 nodeid in the current/other manifest and fl1/fl2 is the flag
802 nodeid in the current/other manifest and fl1/fl2 is the flag
803 in the current/other manifest. Where the file does not exist,
803 in the current/other manifest. Where the file does not exist,
804 the nodeid will be None and the flags will be the empty
804 the nodeid will be None and the flags will be the empty
805 string.
805 string.
806 '''
806 '''
807 result = {}
807 result = {}
808 emptytree = treemanifest()
808 emptytree = treemanifest()
809 def _diff(t1, t2):
809 def _diff(t1, t2):
810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
811 return
811 return
812 t1._load()
812 t1._load()
813 t2._load()
813 t2._load()
814 for d, m1 in t1._dirs.iteritems():
814 for d, m1 in t1._dirs.iteritems():
815 m2 = t2._dirs.get(d, emptytree)
815 m2 = t2._dirs.get(d, emptytree)
816 _diff(m1, m2)
816 _diff(m1, m2)
817
817
818 for d, m2 in t2._dirs.iteritems():
818 for d, m2 in t2._dirs.iteritems():
819 if d not in t1._dirs:
819 if d not in t1._dirs:
820 _diff(emptytree, m2)
820 _diff(emptytree, m2)
821
821
822 for fn, n1 in t1._files.iteritems():
822 for fn, n1 in t1._files.iteritems():
823 fl1 = t1._flags.get(fn, '')
823 fl1 = t1._flags.get(fn, '')
824 n2 = t2._files.get(fn, None)
824 n2 = t2._files.get(fn, None)
825 fl2 = t2._flags.get(fn, '')
825 fl2 = t2._flags.get(fn, '')
826 if n1 != n2 or fl1 != fl2:
826 if n1 != n2 or fl1 != fl2:
827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
828 elif clean:
828 elif clean:
829 result[t1._subpath(fn)] = None
829 result[t1._subpath(fn)] = None
830
830
831 for fn, n2 in t2._files.iteritems():
831 for fn, n2 in t2._files.iteritems():
832 if fn not in t1._files:
832 if fn not in t1._files:
833 fl2 = t2._flags.get(fn, '')
833 fl2 = t2._flags.get(fn, '')
834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
835
835
836 _diff(self, m2)
836 _diff(self, m2)
837 return result
837 return result
838
838
839 def unmodifiedsince(self, m2):
839 def unmodifiedsince(self, m2):
840 return not self._dirty and not m2._dirty and self._node == m2._node
840 return not self._dirty and not m2._dirty and self._node == m2._node
841
841
842 def parse(self, text, readsubtree):
842 def parse(self, text, readsubtree):
843 for f, n, fl in _parse(text):
843 for f, n, fl in _parse(text):
844 if fl == 't':
844 if fl == 't':
845 f = f + '/'
845 f = f + '/'
846 self._dirs[f] = readsubtree(self._subpath(f), n)
846 self._dirs[f] = readsubtree(self._subpath(f), n)
847 elif '/' in f:
847 elif '/' in f:
848 # This is a flat manifest, so use __setitem__ and setflag rather
848 # This is a flat manifest, so use __setitem__ and setflag rather
849 # than assigning directly to _files and _flags, so we can
849 # than assigning directly to _files and _flags, so we can
850 # assign a path in a subdirectory, and to mark dirty (compared
850 # assign a path in a subdirectory, and to mark dirty (compared
851 # to nullid).
851 # to nullid).
852 self[f] = n
852 self[f] = n
853 if fl:
853 if fl:
854 self.setflag(f, fl)
854 self.setflag(f, fl)
855 else:
855 else:
856 # Assigning to _files and _flags avoids marking as dirty,
856 # Assigning to _files and _flags avoids marking as dirty,
857 # and should be a little faster.
857 # and should be a little faster.
858 self._files[f] = n
858 self._files[f] = n
859 if fl:
859 if fl:
860 self._flags[f] = fl
860 self._flags[f] = fl
861
861
862 def text(self, usemanifestv2=False):
862 def text(self, usemanifestv2=False):
863 """Get the full data of this manifest as a bytestring."""
863 """Get the full data of this manifest as a bytestring."""
864 self._load()
864 self._load()
865 return _text(self.iterentries(), usemanifestv2)
865 return _text(self.iterentries(), usemanifestv2)
866
866
867 def dirtext(self, usemanifestv2=False):
867 def dirtext(self, usemanifestv2=False):
868 """Get the full data of this directory as a bytestring. Make sure that
868 """Get the full data of this directory as a bytestring. Make sure that
869 any submanifests have been written first, so their nodeids are correct.
869 any submanifests have been written first, so their nodeids are correct.
870 """
870 """
871 self._load()
871 self._load()
872 flags = self.flags
872 flags = self.flags
873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
874 files = [(f, self._files[f], flags(f)) for f in self._files]
874 files = [(f, self._files[f], flags(f)) for f in self._files]
875 return _text(sorted(dirs + files), usemanifestv2)
875 return _text(sorted(dirs + files), usemanifestv2)
876
876
877 def read(self, gettext, readsubtree):
877 def read(self, gettext, readsubtree):
878 def _load_for_read(s):
878 def _load_for_read(s):
879 s.parse(gettext(), readsubtree)
879 s.parse(gettext(), readsubtree)
880 s._dirty = False
880 s._dirty = False
881 self._loadfunc = _load_for_read
881 self._loadfunc = _load_for_read
882
882
883 def writesubtrees(self, m1, m2, writesubtree):
883 def writesubtrees(self, m1, m2, writesubtree):
884 self._load() # for consistency; should never have any effect here
884 self._load() # for consistency; should never have any effect here
885 m1._load()
885 m1._load()
886 m2._load()
886 m2._load()
887 emptytree = treemanifest()
887 emptytree = treemanifest()
888 for d, subm in self._dirs.iteritems():
888 for d, subm in self._dirs.iteritems():
889 subp1 = m1._dirs.get(d, emptytree)._node
889 subp1 = m1._dirs.get(d, emptytree)._node
890 subp2 = m2._dirs.get(d, emptytree)._node
890 subp2 = m2._dirs.get(d, emptytree)._node
891 if subp1 == revlog.nullid:
891 if subp1 == revlog.nullid:
892 subp1, subp2 = subp2, subp1
892 subp1, subp2 = subp2, subp1
893 writesubtree(subm, subp1, subp2)
893 writesubtree(subm, subp1, subp2)
894
894
895 class manifestrevlog(revlog.revlog):
895 class manifestrevlog(revlog.revlog):
896 '''A revlog that stores manifest texts. This is responsible for caching the
896 '''A revlog that stores manifest texts. This is responsible for caching the
897 full-text manifest contents.
897 full-text manifest contents.
898 '''
898 '''
899 def __init__(self, opener, indexfile):
899 def __init__(self, opener, indexfile):
900 super(manifestrevlog, self).__init__(opener, indexfile)
900 super(manifestrevlog, self).__init__(opener, indexfile)
901
901
902 # During normal operations, we expect to deal with not more than four
902 # During normal operations, we expect to deal with not more than four
903 # revs at a time (such as during commit --amend). When rebasing large
903 # revs at a time (such as during commit --amend). When rebasing large
904 # stacks of commits, the number can go up, hence the config knob below.
904 # stacks of commits, the number can go up, hence the config knob below.
905 cachesize = 4
905 cachesize = 4
906 opts = getattr(opener, 'options', None)
906 opts = getattr(opener, 'options', None)
907 if opts is not None:
907 if opts is not None:
908 cachesize = opts.get('manifestcachesize', cachesize)
908 cachesize = opts.get('manifestcachesize', cachesize)
909 self._fulltextcache = util.lrucachedict(cachesize)
909 self._fulltextcache = util.lrucachedict(cachesize)
910
910
911 @property
911 @property
912 def fulltextcache(self):
912 def fulltextcache(self):
913 return self._fulltextcache
913 return self._fulltextcache
914
914
915 def clearcaches(self):
915 def clearcaches(self):
916 super(manifestrevlog, self).clearcaches()
916 super(manifestrevlog, self).clearcaches()
917 self._fulltextcache.clear()
917 self._fulltextcache.clear()
918
918
919 class manifestlog(object):
919 class manifestlog(object):
920 """A collection class representing the collection of manifest snapshots
920 """A collection class representing the collection of manifest snapshots
921 referenced by commits in the repository.
921 referenced by commits in the repository.
922
922
923 In this situation, 'manifest' refers to the abstract concept of a snapshot
923 In this situation, 'manifest' refers to the abstract concept of a snapshot
924 of the list of files in the given commit. Consumers of the output of this
924 of the list of files in the given commit. Consumers of the output of this
925 class do not care about the implementation details of the actual manifests
925 class do not care about the implementation details of the actual manifests
926 they receive (i.e. tree or flat or lazily loaded, etc)."""
926 they receive (i.e. tree or flat or lazily loaded, etc)."""
927 def __init__(self, opener, repo):
927 def __init__(self, opener, repo):
928 self._repo = repo
928 self._repo = repo
929
929
930 # We'll separate this into it's own cache once oldmanifest is no longer
930 # We'll separate this into it's own cache once oldmanifest is no longer
931 # used
931 # used
932 self._mancache = repo.manifest._mancache
932 self._mancache = repo.manifest._mancache
933
933
934 @property
934 @property
935 def _revlog(self):
935 def _revlog(self):
936 return self._repo.manifest
936 return self._repo.manifest
937
937
938 @property
938 @property
939 def _oldmanifest(self):
939 def _oldmanifest(self):
940 # _revlog is the same as _oldmanifest right now, but we eventually want
940 # _revlog is the same as _oldmanifest right now, but we eventually want
941 # to delete _oldmanifest while still allowing manifestlog to access the
941 # to delete _oldmanifest while still allowing manifestlog to access the
942 # revlog specific apis.
942 # revlog specific apis.
943 return self._repo.manifest
943 return self._repo.manifest
944
944
945 def __getitem__(self, node):
945 def __getitem__(self, node):
946 """Retrieves the manifest instance for the given node. Throws a KeyError
946 """Retrieves the manifest instance for the given node. Throws a KeyError
947 if not found.
947 if not found.
948 """
948 """
949 if node in self._mancache:
949 if node in self._mancache:
950 cachemf = self._mancache[node]
950 cachemf = self._mancache[node]
951 # The old manifest may put non-ctx manifests in the cache, so skip
951 # The old manifest may put non-ctx manifests in the cache, so skip
952 # those since they don't implement the full api.
952 # those since they don't implement the full api.
953 if (isinstance(cachemf, manifestctx) or
953 if (isinstance(cachemf, manifestctx) or
954 isinstance(cachemf, treemanifestctx)):
954 isinstance(cachemf, treemanifestctx)):
955 return cachemf
955 return cachemf
956
956
957 if self._oldmanifest._treeinmem:
957 if self._oldmanifest._treeinmem:
958 m = treemanifestctx(self._revlog, '', node)
958 m = treemanifestctx(self._revlog, '', node)
959 else:
959 else:
960 m = manifestctx(self._revlog, node)
960 m = manifestctx(self._revlog, node)
961 if node != revlog.nullid:
961 if node != revlog.nullid:
962 self._mancache[node] = m
962 self._mancache[node] = m
963 return m
963 return m
964
964
965 class manifestctx(object):
965 class manifestctx(object):
966 """A class representing a single revision of a manifest, including its
966 """A class representing a single revision of a manifest, including its
967 contents, its parent revs, and its linkrev.
967 contents, its parent revs, and its linkrev.
968 """
968 """
969 def __init__(self, revlog, node):
969 def __init__(self, revlog, node):
970 self._revlog = revlog
970 self._revlog = revlog
971 self._data = None
971 self._data = None
972
972
973 self._node = node
973 self._node = node
974
974
975 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
975 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
976 # but let's add it later when something needs it and we can load it
976 # but let's add it later when something needs it and we can load it
977 # lazily.
977 # lazily.
978 #self.p1, self.p2 = revlog.parents(node)
978 #self.p1, self.p2 = revlog.parents(node)
979 #rev = revlog.rev(node)
979 #rev = revlog.rev(node)
980 #self.linkrev = revlog.linkrev(rev)
980 #self.linkrev = revlog.linkrev(rev)
981
981
982 def node(self):
982 def node(self):
983 return self._node
983 return self._node
984
984
985 def read(self):
985 def read(self):
986 if not self._data:
986 if not self._data:
987 if self._node == revlog.nullid:
987 if self._node == revlog.nullid:
988 self._data = manifestdict()
988 self._data = manifestdict()
989 else:
989 else:
990 text = self._revlog.revision(self._node)
990 text = self._revlog.revision(self._node)
991 arraytext = array.array('c', text)
991 arraytext = array.array('c', text)
992 self._revlog._fulltextcache[self._node] = arraytext
992 self._revlog._fulltextcache[self._node] = arraytext
993 self._data = manifestdict(text)
993 self._data = manifestdict(text)
994 return self._data
994 return self._data
995
995
996 def readfast(self):
997 rl = self._revlog
998 r = rl.rev(self._node)
999 deltaparent = rl.deltaparent(r)
1000 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1001 return self.readdelta()
1002 return self.read()
1003
996 def readdelta(self):
1004 def readdelta(self):
997 revlog = self._revlog
1005 revlog = self._revlog
998 if revlog._usemanifestv2:
1006 if revlog._usemanifestv2:
999 # Need to perform a slow delta
1007 # Need to perform a slow delta
1000 r0 = revlog.deltaparent(revlog.rev(self._node))
1008 r0 = revlog.deltaparent(revlog.rev(self._node))
1001 m0 = manifestctx(revlog, revlog.node(r0)).read()
1009 m0 = manifestctx(revlog, revlog.node(r0)).read()
1002 m1 = self.read()
1010 m1 = self.read()
1003 md = manifestdict()
1011 md = manifestdict()
1004 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1012 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1005 if n1:
1013 if n1:
1006 md[f] = n1
1014 md[f] = n1
1007 if fl1:
1015 if fl1:
1008 md.setflag(f, fl1)
1016 md.setflag(f, fl1)
1009 return md
1017 return md
1010
1018
1011 r = revlog.rev(self._node)
1019 r = revlog.rev(self._node)
1012 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1020 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1013 return manifestdict(d)
1021 return manifestdict(d)
1014
1022
1015 class treemanifestctx(object):
1023 class treemanifestctx(object):
1016 def __init__(self, revlog, dir, node):
1024 def __init__(self, revlog, dir, node):
1017 revlog = revlog.dirlog(dir)
1025 revlog = revlog.dirlog(dir)
1018 self._revlog = revlog
1026 self._revlog = revlog
1019 self._dir = dir
1027 self._dir = dir
1020 self._data = None
1028 self._data = None
1021
1029
1022 self._node = node
1030 self._node = node
1023
1031
1024 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1032 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1025 # we can instantiate treemanifestctx objects for directories we don't
1033 # we can instantiate treemanifestctx objects for directories we don't
1026 # have on disk.
1034 # have on disk.
1027 #self.p1, self.p2 = revlog.parents(node)
1035 #self.p1, self.p2 = revlog.parents(node)
1028 #rev = revlog.rev(node)
1036 #rev = revlog.rev(node)
1029 #self.linkrev = revlog.linkrev(rev)
1037 #self.linkrev = revlog.linkrev(rev)
1030
1038
1031 def read(self):
1039 def read(self):
1032 if not self._data:
1040 if not self._data:
1033 if self._node == revlog.nullid:
1041 if self._node == revlog.nullid:
1034 self._data = treemanifest()
1042 self._data = treemanifest()
1035 elif self._revlog._treeondisk:
1043 elif self._revlog._treeondisk:
1036 m = treemanifest(dir=self._dir)
1044 m = treemanifest(dir=self._dir)
1037 def gettext():
1045 def gettext():
1038 return self._revlog.revision(self._node)
1046 return self._revlog.revision(self._node)
1039 def readsubtree(dir, subm):
1047 def readsubtree(dir, subm):
1040 return treemanifestctx(self._revlog, dir, subm).read()
1048 return treemanifestctx(self._revlog, dir, subm).read()
1041 m.read(gettext, readsubtree)
1049 m.read(gettext, readsubtree)
1042 m.setnode(self._node)
1050 m.setnode(self._node)
1043 self._data = m
1051 self._data = m
1044 else:
1052 else:
1045 text = self._revlog.revision(self._node)
1053 text = self._revlog.revision(self._node)
1046 arraytext = array.array('c', text)
1054 arraytext = array.array('c', text)
1047 self._revlog.fulltextcache[self._node] = arraytext
1055 self._revlog.fulltextcache[self._node] = arraytext
1048 self._data = treemanifest(dir=self._dir, text=text)
1056 self._data = treemanifest(dir=self._dir, text=text)
1049
1057
1050 return self._data
1058 return self._data
1051
1059
1052 def node(self):
1060 def node(self):
1053 return self._node
1061 return self._node
1054
1062
1055 def readdelta(self):
1063 def readdelta(self):
1056 # Need to perform a slow delta
1064 # Need to perform a slow delta
1057 revlog = self._revlog
1065 revlog = self._revlog
1058 r0 = revlog.deltaparent(revlog.rev(self._node))
1066 r0 = revlog.deltaparent(revlog.rev(self._node))
1059 m0 = treemanifestctx(revlog, revlog.node(r0), dir=self._dir).read()
1067 m0 = treemanifestctx(revlog, revlog.node(r0), dir=self._dir).read()
1060 m1 = self.read()
1068 m1 = self.read()
1061 md = treemanifest(dir=self._dir)
1069 md = treemanifest(dir=self._dir)
1062 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1070 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1063 if n1:
1071 if n1:
1064 md[f] = n1
1072 md[f] = n1
1065 if fl1:
1073 if fl1:
1066 md.setflag(f, fl1)
1074 md.setflag(f, fl1)
1067 return md
1075 return md
1068
1076
1077 def readfast(self):
1078 rl = self._revlog
1079 r = rl.rev(self._node)
1080 deltaparent = rl.deltaparent(r)
1081 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1082 return self.readdelta()
1083 return self.read()
1084
1069 class manifest(manifestrevlog):
1085 class manifest(manifestrevlog):
1070 def __init__(self, opener, dir='', dirlogcache=None):
1086 def __init__(self, opener, dir='', dirlogcache=None):
1071 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1087 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1072 manifest.manifest only. External users should create a root manifest
1088 manifest.manifest only. External users should create a root manifest
1073 log with manifest.manifest(opener) and call dirlog() on it.
1089 log with manifest.manifest(opener) and call dirlog() on it.
1074 '''
1090 '''
1075 # During normal operations, we expect to deal with not more than four
1091 # During normal operations, we expect to deal with not more than four
1076 # revs at a time (such as during commit --amend). When rebasing large
1092 # revs at a time (such as during commit --amend). When rebasing large
1077 # stacks of commits, the number can go up, hence the config knob below.
1093 # stacks of commits, the number can go up, hence the config knob below.
1078 cachesize = 4
1094 cachesize = 4
1079 usetreemanifest = False
1095 usetreemanifest = False
1080 usemanifestv2 = False
1096 usemanifestv2 = False
1081 opts = getattr(opener, 'options', None)
1097 opts = getattr(opener, 'options', None)
1082 if opts is not None:
1098 if opts is not None:
1083 cachesize = opts.get('manifestcachesize', cachesize)
1099 cachesize = opts.get('manifestcachesize', cachesize)
1084 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1100 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1085 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1101 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1086 self._mancache = util.lrucachedict(cachesize)
1102 self._mancache = util.lrucachedict(cachesize)
1087 self._treeinmem = usetreemanifest
1103 self._treeinmem = usetreemanifest
1088 self._treeondisk = usetreemanifest
1104 self._treeondisk = usetreemanifest
1089 self._usemanifestv2 = usemanifestv2
1105 self._usemanifestv2 = usemanifestv2
1090 indexfile = "00manifest.i"
1106 indexfile = "00manifest.i"
1091 if dir:
1107 if dir:
1092 assert self._treeondisk, 'opts is %r' % opts
1108 assert self._treeondisk, 'opts is %r' % opts
1093 if not dir.endswith('/'):
1109 if not dir.endswith('/'):
1094 dir = dir + '/'
1110 dir = dir + '/'
1095 indexfile = "meta/" + dir + "00manifest.i"
1111 indexfile = "meta/" + dir + "00manifest.i"
1096 super(manifest, self).__init__(opener, indexfile)
1112 super(manifest, self).__init__(opener, indexfile)
1097 self._dir = dir
1113 self._dir = dir
1098 # The dirlogcache is kept on the root manifest log
1114 # The dirlogcache is kept on the root manifest log
1099 if dir:
1115 if dir:
1100 self._dirlogcache = dirlogcache
1116 self._dirlogcache = dirlogcache
1101 else:
1117 else:
1102 self._dirlogcache = {'': self}
1118 self._dirlogcache = {'': self}
1103
1119
1104 def _newmanifest(self, data=''):
1120 def _newmanifest(self, data=''):
1105 if self._treeinmem:
1121 if self._treeinmem:
1106 return treemanifest(self._dir, data)
1122 return treemanifest(self._dir, data)
1107 return manifestdict(data)
1123 return manifestdict(data)
1108
1124
1109 def dirlog(self, dir):
1125 def dirlog(self, dir):
1110 if dir:
1126 if dir:
1111 assert self._treeondisk
1127 assert self._treeondisk
1112 if dir not in self._dirlogcache:
1128 if dir not in self._dirlogcache:
1113 self._dirlogcache[dir] = manifest(self.opener, dir,
1129 self._dirlogcache[dir] = manifest(self.opener, dir,
1114 self._dirlogcache)
1130 self._dirlogcache)
1115 return self._dirlogcache[dir]
1131 return self._dirlogcache[dir]
1116
1132
1117 def _slowreaddelta(self, node):
1133 def _slowreaddelta(self, node):
1118 r0 = self.deltaparent(self.rev(node))
1134 r0 = self.deltaparent(self.rev(node))
1119 m0 = self.read(self.node(r0))
1135 m0 = self.read(self.node(r0))
1120 m1 = self.read(node)
1136 m1 = self.read(node)
1121 md = self._newmanifest()
1137 md = self._newmanifest()
1122 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1138 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1123 if n1:
1139 if n1:
1124 md[f] = n1
1140 md[f] = n1
1125 if fl1:
1141 if fl1:
1126 md.setflag(f, fl1)
1142 md.setflag(f, fl1)
1127 return md
1143 return md
1128
1144
1129 def readdelta(self, node):
1145 def readdelta(self, node):
1130 if self._usemanifestv2 or self._treeondisk:
1146 if self._usemanifestv2 or self._treeondisk:
1131 return self._slowreaddelta(node)
1147 return self._slowreaddelta(node)
1132 r = self.rev(node)
1148 r = self.rev(node)
1133 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1149 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1134 return self._newmanifest(d)
1150 return self._newmanifest(d)
1135
1151
1136 def readshallowdelta(self, node):
1152 def readshallowdelta(self, node):
1137 '''For flat manifests, this is the same as readdelta(). For
1153 '''For flat manifests, this is the same as readdelta(). For
1138 treemanifests, this will read the delta for this revlog's directory,
1154 treemanifests, this will read the delta for this revlog's directory,
1139 without recursively reading subdirectory manifests. Instead, any
1155 without recursively reading subdirectory manifests. Instead, any
1140 subdirectory entry will be reported as it appears in the manifests, i.e.
1156 subdirectory entry will be reported as it appears in the manifests, i.e.
1141 the subdirectory will be reported among files and distinguished only by
1157 the subdirectory will be reported among files and distinguished only by
1142 its 't' flag.'''
1158 its 't' flag.'''
1143 if not self._treeondisk:
1159 if not self._treeondisk:
1144 return self.readdelta(node)
1160 return self.readdelta(node)
1145 if self._usemanifestv2:
1161 if self._usemanifestv2:
1146 raise error.Abort(
1162 raise error.Abort(
1147 _("readshallowdelta() not implemented for manifestv2"))
1163 _("readshallowdelta() not implemented for manifestv2"))
1148 r = self.rev(node)
1164 r = self.rev(node)
1149 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1165 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
1150 return manifestdict(d)
1166 return manifestdict(d)
1151
1167
1152 def readfast(self, node):
1153 '''use the faster of readdelta or read
1154
1155 This will return a manifest which is either only the files
1156 added/modified relative to p1, or all files in the
1157 manifest. Which one is returned depends on the codepath used
1158 to retrieve the data.
1159 '''
1160 r = self.rev(node)
1161 deltaparent = self.deltaparent(r)
1162 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1163 return self.readdelta(node)
1164 return self.read(node)
1165
1166 def readshallowfast(self, node):
1168 def readshallowfast(self, node):
1167 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1169 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1168 '''
1170 '''
1169 r = self.rev(node)
1171 r = self.rev(node)
1170 deltaparent = self.deltaparent(r)
1172 deltaparent = self.deltaparent(r)
1171 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1173 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1172 return self.readshallowdelta(node)
1174 return self.readshallowdelta(node)
1173 return self.readshallow(node)
1175 return self.readshallow(node)
1174
1176
1175 def read(self, node):
1177 def read(self, node):
1176 if node == revlog.nullid:
1178 if node == revlog.nullid:
1177 return self._newmanifest() # don't upset local cache
1179 return self._newmanifest() # don't upset local cache
1178 if node in self._mancache:
1180 if node in self._mancache:
1179 cached = self._mancache[node]
1181 cached = self._mancache[node]
1180 if (isinstance(cached, manifestctx) or
1182 if (isinstance(cached, manifestctx) or
1181 isinstance(cached, treemanifestctx)):
1183 isinstance(cached, treemanifestctx)):
1182 cached = cached.read()
1184 cached = cached.read()
1183 return cached
1185 return cached
1184 if self._treeondisk:
1186 if self._treeondisk:
1185 def gettext():
1187 def gettext():
1186 return self.revision(node)
1188 return self.revision(node)
1187 def readsubtree(dir, subm):
1189 def readsubtree(dir, subm):
1188 return self.dirlog(dir).read(subm)
1190 return self.dirlog(dir).read(subm)
1189 m = self._newmanifest()
1191 m = self._newmanifest()
1190 m.read(gettext, readsubtree)
1192 m.read(gettext, readsubtree)
1191 m.setnode(node)
1193 m.setnode(node)
1192 arraytext = None
1194 arraytext = None
1193 else:
1195 else:
1194 text = self.revision(node)
1196 text = self.revision(node)
1195 m = self._newmanifest(text)
1197 m = self._newmanifest(text)
1196 arraytext = array.array('c', text)
1198 arraytext = array.array('c', text)
1197 self._mancache[node] = m
1199 self._mancache[node] = m
1198 self.fulltextcache[node] = arraytext
1200 self.fulltextcache[node] = arraytext
1199 return m
1201 return m
1200
1202
1201 def readshallow(self, node):
1203 def readshallow(self, node):
1202 '''Reads the manifest in this directory. When using flat manifests,
1204 '''Reads the manifest in this directory. When using flat manifests,
1203 this manifest will generally have files in subdirectories in it. Does
1205 this manifest will generally have files in subdirectories in it. Does
1204 not cache the manifest as the callers generally do not read the same
1206 not cache the manifest as the callers generally do not read the same
1205 version twice.'''
1207 version twice.'''
1206 return manifestdict(self.revision(node))
1208 return manifestdict(self.revision(node))
1207
1209
1208 def find(self, node, f):
1210 def find(self, node, f):
1209 '''look up entry for a single file efficiently.
1211 '''look up entry for a single file efficiently.
1210 return (node, flags) pair if found, (None, None) if not.'''
1212 return (node, flags) pair if found, (None, None) if not.'''
1211 m = self.read(node)
1213 m = self.read(node)
1212 try:
1214 try:
1213 return m.find(f)
1215 return m.find(f)
1214 except KeyError:
1216 except KeyError:
1215 return None, None
1217 return None, None
1216
1218
1217 def add(self, m, transaction, link, p1, p2, added, removed):
1219 def add(self, m, transaction, link, p1, p2, added, removed):
1218 if (p1 in self.fulltextcache and not self._treeinmem
1220 if (p1 in self.fulltextcache and not self._treeinmem
1219 and not self._usemanifestv2):
1221 and not self._usemanifestv2):
1220 # If our first parent is in the manifest cache, we can
1222 # If our first parent is in the manifest cache, we can
1221 # compute a delta here using properties we know about the
1223 # compute a delta here using properties we know about the
1222 # manifest up-front, which may save time later for the
1224 # manifest up-front, which may save time later for the
1223 # revlog layer.
1225 # revlog layer.
1224
1226
1225 _checkforbidden(added)
1227 _checkforbidden(added)
1226 # combine the changed lists into one sorted iterator
1228 # combine the changed lists into one sorted iterator
1227 work = heapq.merge([(x, False) for x in added],
1229 work = heapq.merge([(x, False) for x in added],
1228 [(x, True) for x in removed])
1230 [(x, True) for x in removed])
1229
1231
1230 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1232 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1231 cachedelta = self.rev(p1), deltatext
1233 cachedelta = self.rev(p1), deltatext
1232 text = util.buffer(arraytext)
1234 text = util.buffer(arraytext)
1233 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1235 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1234 else:
1236 else:
1235 # The first parent manifest isn't already loaded, so we'll
1237 # The first parent manifest isn't already loaded, so we'll
1236 # just encode a fulltext of the manifest and pass that
1238 # just encode a fulltext of the manifest and pass that
1237 # through to the revlog layer, and let it handle the delta
1239 # through to the revlog layer, and let it handle the delta
1238 # process.
1240 # process.
1239 if self._treeondisk:
1241 if self._treeondisk:
1240 m1 = self.read(p1)
1242 m1 = self.read(p1)
1241 m2 = self.read(p2)
1243 m2 = self.read(p2)
1242 n = self._addtree(m, transaction, link, m1, m2)
1244 n = self._addtree(m, transaction, link, m1, m2)
1243 arraytext = None
1245 arraytext = None
1244 else:
1246 else:
1245 text = m.text(self._usemanifestv2)
1247 text = m.text(self._usemanifestv2)
1246 n = self.addrevision(text, transaction, link, p1, p2)
1248 n = self.addrevision(text, transaction, link, p1, p2)
1247 arraytext = array.array('c', text)
1249 arraytext = array.array('c', text)
1248
1250
1249 self._mancache[n] = m
1251 self._mancache[n] = m
1250 self.fulltextcache[n] = arraytext
1252 self.fulltextcache[n] = arraytext
1251
1253
1252 return n
1254 return n
1253
1255
1254 def _addtree(self, m, transaction, link, m1, m2):
1256 def _addtree(self, m, transaction, link, m1, m2):
1255 # If the manifest is unchanged compared to one parent,
1257 # If the manifest is unchanged compared to one parent,
1256 # don't write a new revision
1258 # don't write a new revision
1257 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1259 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1258 return m.node()
1260 return m.node()
1259 def writesubtree(subm, subp1, subp2):
1261 def writesubtree(subm, subp1, subp2):
1260 sublog = self.dirlog(subm.dir())
1262 sublog = self.dirlog(subm.dir())
1261 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1263 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1262 m.writesubtrees(m1, m2, writesubtree)
1264 m.writesubtrees(m1, m2, writesubtree)
1263 text = m.dirtext(self._usemanifestv2)
1265 text = m.dirtext(self._usemanifestv2)
1264 # Double-check whether contents are unchanged to one parent
1266 # Double-check whether contents are unchanged to one parent
1265 if text == m1.dirtext(self._usemanifestv2):
1267 if text == m1.dirtext(self._usemanifestv2):
1266 n = m1.node()
1268 n = m1.node()
1267 elif text == m2.dirtext(self._usemanifestv2):
1269 elif text == m2.dirtext(self._usemanifestv2):
1268 n = m2.node()
1270 n = m2.node()
1269 else:
1271 else:
1270 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1272 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1271 # Save nodeid so parent manifest can calculate its nodeid
1273 # Save nodeid so parent manifest can calculate its nodeid
1272 m.setnode(n)
1274 m.setnode(n)
1273 return n
1275 return n
1274
1276
1275 def clearcaches(self):
1277 def clearcaches(self):
1276 super(manifest, self).clearcaches()
1278 super(manifest, self).clearcaches()
1277 self._mancache.clear()
1279 self._mancache.clear()
1278 self._dirlogcache = {'': self}
1280 self._dirlogcache = {'': self}
General Comments 0
You need to be logged in to leave comments. Login now