##// END OF EJS Templates
mdiff: remove unused parameter 'refine' from allblocks()
Philippe Pepiot -
r30023:ff17dff9 default
parent child Browse files
Show More
@@ -1,1985 +1,1984
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
263 if not node:
263 if not node:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266
266
267 return node, flag
267 return node, flag
268
268
269 def filenode(self, path):
269 def filenode(self, path):
270 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
271
271
272 def flags(self, path):
272 def flags(self, path):
273 try:
273 try:
274 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
275 except error.LookupError:
275 except error.LookupError:
276 return ''
276 return ''
277
277
278 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
279 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281
281
282 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
284
284
285 def workingsub(self, path):
285 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
287 context.
288 '''
288 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
290
290
291 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
293 r = self._repo
293 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
295 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
298
298
299 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
301 if ctx2 is None:
302 ctx2 = self.p1()
302 ctx2 = self.p1()
303 if ctx2 is not None:
303 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
307
308 def dirs(self):
308 def dirs(self):
309 return self._manifest.dirs()
309 return self._manifest.dirs()
310
310
311 def hasdir(self, dir):
311 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
313
313
314 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
315 return False
315 return False
316
316
317 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
320 directory.
320 directory.
321
321
322 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
323
323
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
325 """
326
326
327 ctx1 = self
327 ctx1 = self
328 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
329
329
330 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
333 # with its first parent.
334 #
334 #
335 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
336 #
336 #
337 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
338 #
338 #
339 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
342 reversed = False
342 reversed = False
343 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
345 reversed = True
345 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
347
347
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380
380
381 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
382 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
385 if data is None:
385 if data is None:
386 return None
386 return None
387 islink, isexec = mode
387 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
390 if extra is None:
390 if extra is None:
391 extra = {}
391 extra = {}
392 if branch:
392 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
395 date, extra, editor)
396 return ctx
396 return ctx
397
397
398 class changectx(basectx):
398 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
401 the repo."""
401 the repo."""
402 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
404
404
405 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
408 return
408 return
409
409
410 if changeid == '':
410 if changeid == '':
411 changeid = '.'
411 changeid = '.'
412 self._repo = repo
412 self._repo = repo
413
413
414 try:
414 try:
415 if isinstance(changeid, int):
415 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
417 self._rev = changeid
418 return
418 return
419 if isinstance(changeid, long):
419 if isinstance(changeid, long):
420 changeid = str(changeid)
420 changeid = str(changeid)
421 if changeid == 'null':
421 if changeid == 'null':
422 self._node = nullid
422 self._node = nullid
423 self._rev = nullrev
423 self._rev = nullrev
424 return
424 return
425 if changeid == 'tip':
425 if changeid == 'tip':
426 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
428 return
428 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
434 return
435 if len(changeid) == 20:
435 if len(changeid) == 20:
436 try:
436 try:
437 self._node = changeid
437 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
439 return
439 return
440 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
441 raise
441 raise
442 except LookupError:
442 except LookupError:
443 pass
443 pass
444
444
445 try:
445 try:
446 r = int(changeid)
446 r = int(changeid)
447 if str(r) != changeid:
447 if str(r) != changeid:
448 raise ValueError
448 raise ValueError
449 l = len(repo.changelog)
449 l = len(repo.changelog)
450 if r < 0:
450 if r < 0:
451 r += l
451 r += l
452 if r < 0 or r >= l:
452 if r < 0 or r >= l:
453 raise ValueError
453 raise ValueError
454 self._rev = r
454 self._rev = r
455 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
456 return
456 return
457 except error.FilteredIndexError:
457 except error.FilteredIndexError:
458 raise
458 raise
459 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
460 pass
460 pass
461
461
462 if len(changeid) == 40:
462 if len(changeid) == 40:
463 try:
463 try:
464 self._node = bin(changeid)
464 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
466 return
466 return
467 except error.FilteredLookupError:
467 except error.FilteredLookupError:
468 raise
468 raise
469 except (TypeError, LookupError):
469 except (TypeError, LookupError):
470 pass
470 pass
471
471
472 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
473 try:
473 try:
474 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
476 return
476 return
477 except KeyError:
477 except KeyError:
478 pass
478 pass
479 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
480 raise
480 raise
481 except error.RepoLookupError:
481 except error.RepoLookupError:
482 pass
482 pass
483
483
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
485 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488
488
489 # lookup failed
489 # lookup failed
490 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
491 #
491 #
492 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
493 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
497 try:
497 try:
498 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
499 changeid = hex(changeid)
500 except TypeError:
500 except TypeError:
501 pass
501 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
511 except IndexError:
512 pass
512 pass
513 raise error.RepoLookupError(
513 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
515
515
516 def __hash__(self):
516 def __hash__(self):
517 try:
517 try:
518 return hash(self._rev)
518 return hash(self._rev)
519 except AttributeError:
519 except AttributeError:
520 return id(self)
520 return id(self)
521
521
522 def __nonzero__(self):
522 def __nonzero__(self):
523 return self._rev != nullrev
523 return self._rev != nullrev
524
524
525 @propertycache
525 @propertycache
526 def _changeset(self):
526 def _changeset(self):
527 return self._repo.changelog.changelogrevision(self.rev())
527 return self._repo.changelog.changelogrevision(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._repo.manifestlog[self._changeset.manifest].read()
531 return self._repo.manifestlog[self._changeset.manifest].read()
532
532
533 @propertycache
533 @propertycache
534 def _manifestdelta(self):
534 def _manifestdelta(self):
535 mfnode = self._changeset.manifest
535 mfnode = self._changeset.manifest
536 return self._repo.manifestlog[mfnode].readdelta()
536 return self._repo.manifestlog[mfnode].readdelta()
537
537
538 @propertycache
538 @propertycache
539 def _parents(self):
539 def _parents(self):
540 repo = self._repo
540 repo = self._repo
541 p1, p2 = repo.changelog.parentrevs(self._rev)
541 p1, p2 = repo.changelog.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1)]
543 return [changectx(repo, p1)]
544 return [changectx(repo, p1), changectx(repo, p2)]
544 return [changectx(repo, p1), changectx(repo, p2)]
545
545
546 def changeset(self):
546 def changeset(self):
547 c = self._changeset
547 c = self._changeset
548 return (
548 return (
549 c.manifest,
549 c.manifest,
550 c.user,
550 c.user,
551 c.date,
551 c.date,
552 c.files,
552 c.files,
553 c.description,
553 c.description,
554 c.extra,
554 c.extra,
555 )
555 )
556 def manifestnode(self):
556 def manifestnode(self):
557 return self._changeset.manifest
557 return self._changeset.manifest
558
558
559 def user(self):
559 def user(self):
560 return self._changeset.user
560 return self._changeset.user
561 def date(self):
561 def date(self):
562 return self._changeset.date
562 return self._changeset.date
563 def files(self):
563 def files(self):
564 return self._changeset.files
564 return self._changeset.files
565 def description(self):
565 def description(self):
566 return self._changeset.description
566 return self._changeset.description
567 def branch(self):
567 def branch(self):
568 return encoding.tolocal(self._changeset.extra.get("branch"))
568 return encoding.tolocal(self._changeset.extra.get("branch"))
569 def closesbranch(self):
569 def closesbranch(self):
570 return 'close' in self._changeset.extra
570 return 'close' in self._changeset.extra
571 def extra(self):
571 def extra(self):
572 return self._changeset.extra
572 return self._changeset.extra
573 def tags(self):
573 def tags(self):
574 return self._repo.nodetags(self._node)
574 return self._repo.nodetags(self._node)
575 def bookmarks(self):
575 def bookmarks(self):
576 return self._repo.nodebookmarks(self._node)
576 return self._repo.nodebookmarks(self._node)
577 def phase(self):
577 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
578 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
579 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
581
582 def children(self):
582 def children(self):
583 """return contexts for each child changeset"""
583 """return contexts for each child changeset"""
584 c = self._repo.changelog.children(self._node)
584 c = self._repo.changelog.children(self._node)
585 return [changectx(self._repo, x) for x in c]
585 return [changectx(self._repo, x) for x in c]
586
586
587 def ancestors(self):
587 def ancestors(self):
588 for a in self._repo.changelog.ancestors([self._rev]):
588 for a in self._repo.changelog.ancestors([self._rev]):
589 yield changectx(self._repo, a)
589 yield changectx(self._repo, a)
590
590
591 def descendants(self):
591 def descendants(self):
592 for d in self._repo.changelog.descendants([self._rev]):
592 for d in self._repo.changelog.descendants([self._rev]):
593 yield changectx(self._repo, d)
593 yield changectx(self._repo, d)
594
594
595 def filectx(self, path, fileid=None, filelog=None):
595 def filectx(self, path, fileid=None, filelog=None):
596 """get a file context from this changeset"""
596 """get a file context from this changeset"""
597 if fileid is None:
597 if fileid is None:
598 fileid = self.filenode(path)
598 fileid = self.filenode(path)
599 return filectx(self._repo, path, fileid=fileid,
599 return filectx(self._repo, path, fileid=fileid,
600 changectx=self, filelog=filelog)
600 changectx=self, filelog=filelog)
601
601
602 def ancestor(self, c2, warn=False):
602 def ancestor(self, c2, warn=False):
603 """return the "best" ancestor context of self and c2
603 """return the "best" ancestor context of self and c2
604
604
605 If there are multiple candidates, it will show a message and check
605 If there are multiple candidates, it will show a message and check
606 merge.preferancestor configuration before falling back to the
606 merge.preferancestor configuration before falling back to the
607 revlog ancestor."""
607 revlog ancestor."""
608 # deal with workingctxs
608 # deal with workingctxs
609 n2 = c2._node
609 n2 = c2._node
610 if n2 is None:
610 if n2 is None:
611 n2 = c2._parents[0]._node
611 n2 = c2._parents[0]._node
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 if not cahs:
613 if not cahs:
614 anc = nullid
614 anc = nullid
615 elif len(cahs) == 1:
615 elif len(cahs) == 1:
616 anc = cahs[0]
616 anc = cahs[0]
617 else:
617 else:
618 # experimental config: merge.preferancestor
618 # experimental config: merge.preferancestor
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 try:
620 try:
621 ctx = changectx(self._repo, r)
621 ctx = changectx(self._repo, r)
622 except error.RepoLookupError:
622 except error.RepoLookupError:
623 continue
623 continue
624 anc = ctx.node()
624 anc = ctx.node()
625 if anc in cahs:
625 if anc in cahs:
626 break
626 break
627 else:
627 else:
628 anc = self._repo.changelog.ancestor(self._node, n2)
628 anc = self._repo.changelog.ancestor(self._node, n2)
629 if warn:
629 if warn:
630 self._repo.ui.status(
630 self._repo.ui.status(
631 (_("note: using %s as ancestor of %s and %s\n") %
631 (_("note: using %s as ancestor of %s and %s\n") %
632 (short(anc), short(self._node), short(n2))) +
632 (short(anc), short(self._node), short(n2))) +
633 ''.join(_(" alternatively, use --config "
633 ''.join(_(" alternatively, use --config "
634 "merge.preferancestor=%s\n") %
634 "merge.preferancestor=%s\n") %
635 short(n) for n in sorted(cahs) if n != anc))
635 short(n) for n in sorted(cahs) if n != anc))
636 return changectx(self._repo, anc)
636 return changectx(self._repo, anc)
637
637
638 def descendant(self, other):
638 def descendant(self, other):
639 """True if other is descendant of this changeset"""
639 """True if other is descendant of this changeset"""
640 return self._repo.changelog.descendant(self._rev, other._rev)
640 return self._repo.changelog.descendant(self._rev, other._rev)
641
641
642 def walk(self, match):
642 def walk(self, match):
643 '''Generates matching file names.'''
643 '''Generates matching file names.'''
644
644
645 # Wrap match.bad method to have message with nodeid
645 # Wrap match.bad method to have message with nodeid
646 def bad(fn, msg):
646 def bad(fn, msg):
647 # The manifest doesn't know about subrepos, so don't complain about
647 # The manifest doesn't know about subrepos, so don't complain about
648 # paths into valid subrepos.
648 # paths into valid subrepos.
649 if any(fn == s or fn.startswith(s + '/')
649 if any(fn == s or fn.startswith(s + '/')
650 for s in self.substate):
650 for s in self.substate):
651 return
651 return
652 match.bad(fn, _('no such file in rev %s') % self)
652 match.bad(fn, _('no such file in rev %s') % self)
653
653
654 m = matchmod.badmatch(match, bad)
654 m = matchmod.badmatch(match, bad)
655 return self._manifest.walk(m)
655 return self._manifest.walk(m)
656
656
657 def matches(self, match):
657 def matches(self, match):
658 return self.walk(match)
658 return self.walk(match)
659
659
660 class basefilectx(object):
660 class basefilectx(object):
661 """A filecontext object represents the common logic for its children:
661 """A filecontext object represents the common logic for its children:
662 filectx: read-only access to a filerevision that is already present
662 filectx: read-only access to a filerevision that is already present
663 in the repo,
663 in the repo,
664 workingfilectx: a filecontext that represents files from the working
664 workingfilectx: a filecontext that represents files from the working
665 directory,
665 directory,
666 memfilectx: a filecontext that represents files in-memory."""
666 memfilectx: a filecontext that represents files in-memory."""
667 def __new__(cls, repo, path, *args, **kwargs):
667 def __new__(cls, repo, path, *args, **kwargs):
668 return super(basefilectx, cls).__new__(cls)
668 return super(basefilectx, cls).__new__(cls)
669
669
670 @propertycache
670 @propertycache
671 def _filelog(self):
671 def _filelog(self):
672 return self._repo.file(self._path)
672 return self._repo.file(self._path)
673
673
674 @propertycache
674 @propertycache
675 def _changeid(self):
675 def _changeid(self):
676 if '_changeid' in self.__dict__:
676 if '_changeid' in self.__dict__:
677 return self._changeid
677 return self._changeid
678 elif '_changectx' in self.__dict__:
678 elif '_changectx' in self.__dict__:
679 return self._changectx.rev()
679 return self._changectx.rev()
680 elif '_descendantrev' in self.__dict__:
680 elif '_descendantrev' in self.__dict__:
681 # this file context was created from a revision with a known
681 # this file context was created from a revision with a known
682 # descendant, we can (lazily) correct for linkrev aliases
682 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._path, self._filelog,
683 return self._adjustlinkrev(self._path, self._filelog,
684 self._filenode, self._descendantrev)
684 self._filenode, self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if '_fileid' in self.__dict__:
690 if '_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 def __str__(self):
711 def __str__(self):
712 return "%s@%s" % (self.path(), self._changectx)
712 return "%s@%s" % (self.path(), self._changectx)
713
713
714 def __repr__(self):
714 def __repr__(self):
715 return "<%s %s>" % (type(self).__name__, str(self))
715 return "<%s %s>" % (type(self).__name__, str(self))
716
716
717 def __hash__(self):
717 def __hash__(self):
718 try:
718 try:
719 return hash((self._path, self._filenode))
719 return hash((self._path, self._filenode))
720 except AttributeError:
720 except AttributeError:
721 return id(self)
721 return id(self)
722
722
723 def __eq__(self, other):
723 def __eq__(self, other):
724 try:
724 try:
725 return (type(self) == type(other) and self._path == other._path
725 return (type(self) == type(other) and self._path == other._path
726 and self._filenode == other._filenode)
726 and self._filenode == other._filenode)
727 except AttributeError:
727 except AttributeError:
728 return False
728 return False
729
729
730 def __ne__(self, other):
730 def __ne__(self, other):
731 return not (self == other)
731 return not (self == other)
732
732
733 def filerev(self):
733 def filerev(self):
734 return self._filerev
734 return self._filerev
735 def filenode(self):
735 def filenode(self):
736 return self._filenode
736 return self._filenode
737 def flags(self):
737 def flags(self):
738 return self._changectx.flags(self._path)
738 return self._changectx.flags(self._path)
739 def filelog(self):
739 def filelog(self):
740 return self._filelog
740 return self._filelog
741 def rev(self):
741 def rev(self):
742 return self._changeid
742 return self._changeid
743 def linkrev(self):
743 def linkrev(self):
744 return self._filelog.linkrev(self._filerev)
744 return self._filelog.linkrev(self._filerev)
745 def node(self):
745 def node(self):
746 return self._changectx.node()
746 return self._changectx.node()
747 def hex(self):
747 def hex(self):
748 return self._changectx.hex()
748 return self._changectx.hex()
749 def user(self):
749 def user(self):
750 return self._changectx.user()
750 return self._changectx.user()
751 def date(self):
751 def date(self):
752 return self._changectx.date()
752 return self._changectx.date()
753 def files(self):
753 def files(self):
754 return self._changectx.files()
754 return self._changectx.files()
755 def description(self):
755 def description(self):
756 return self._changectx.description()
756 return self._changectx.description()
757 def branch(self):
757 def branch(self):
758 return self._changectx.branch()
758 return self._changectx.branch()
759 def extra(self):
759 def extra(self):
760 return self._changectx.extra()
760 return self._changectx.extra()
761 def phase(self):
761 def phase(self):
762 return self._changectx.phase()
762 return self._changectx.phase()
763 def phasestr(self):
763 def phasestr(self):
764 return self._changectx.phasestr()
764 return self._changectx.phasestr()
765 def manifest(self):
765 def manifest(self):
766 return self._changectx.manifest()
766 return self._changectx.manifest()
767 def changectx(self):
767 def changectx(self):
768 return self._changectx
768 return self._changectx
769 def repo(self):
769 def repo(self):
770 return self._repo
770 return self._repo
771
771
772 def path(self):
772 def path(self):
773 return self._path
773 return self._path
774
774
775 def isbinary(self):
775 def isbinary(self):
776 try:
776 try:
777 return util.binary(self.data())
777 return util.binary(self.data())
778 except IOError:
778 except IOError:
779 return False
779 return False
780 def isexec(self):
780 def isexec(self):
781 return 'x' in self.flags()
781 return 'x' in self.flags()
782 def islink(self):
782 def islink(self):
783 return 'l' in self.flags()
783 return 'l' in self.flags()
784
784
785 def isabsent(self):
785 def isabsent(self):
786 """whether this filectx represents a file not in self._changectx
786 """whether this filectx represents a file not in self._changectx
787
787
788 This is mainly for merge code to detect change/delete conflicts. This is
788 This is mainly for merge code to detect change/delete conflicts. This is
789 expected to be True for all subclasses of basectx."""
789 expected to be True for all subclasses of basectx."""
790 return False
790 return False
791
791
792 _customcmp = False
792 _customcmp = False
793 def cmp(self, fctx):
793 def cmp(self, fctx):
794 """compare with other file context
794 """compare with other file context
795
795
796 returns True if different than fctx.
796 returns True if different than fctx.
797 """
797 """
798 if fctx._customcmp:
798 if fctx._customcmp:
799 return fctx.cmp(self)
799 return fctx.cmp(self)
800
800
801 if (fctx._filenode is None
801 if (fctx._filenode is None
802 and (self._repo._encodefilterpats
802 and (self._repo._encodefilterpats
803 # if file data starts with '\1\n', empty metadata block is
803 # if file data starts with '\1\n', empty metadata block is
804 # prepended, which adds 4 bytes to filelog.size().
804 # prepended, which adds 4 bytes to filelog.size().
805 or self.size() - 4 == fctx.size())
805 or self.size() - 4 == fctx.size())
806 or self.size() == fctx.size()):
806 or self.size() == fctx.size()):
807 return self._filelog.cmp(self._filenode, fctx.data())
807 return self._filelog.cmp(self._filenode, fctx.data())
808
808
809 return True
809 return True
810
810
811 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
811 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
812 """return the first ancestor of <srcrev> introducing <fnode>
812 """return the first ancestor of <srcrev> introducing <fnode>
813
813
814 If the linkrev of the file revision does not point to an ancestor of
814 If the linkrev of the file revision does not point to an ancestor of
815 srcrev, we'll walk down the ancestors until we find one introducing
815 srcrev, we'll walk down the ancestors until we find one introducing
816 this file revision.
816 this file revision.
817
817
818 :repo: a localrepository object (used to access changelog and manifest)
818 :repo: a localrepository object (used to access changelog and manifest)
819 :path: the file path
819 :path: the file path
820 :fnode: the nodeid of the file revision
820 :fnode: the nodeid of the file revision
821 :filelog: the filelog of this path
821 :filelog: the filelog of this path
822 :srcrev: the changeset revision we search ancestors from
822 :srcrev: the changeset revision we search ancestors from
823 :inclusive: if true, the src revision will also be checked
823 :inclusive: if true, the src revision will also be checked
824 """
824 """
825 repo = self._repo
825 repo = self._repo
826 cl = repo.unfiltered().changelog
826 cl = repo.unfiltered().changelog
827 mfl = repo.manifestlog
827 mfl = repo.manifestlog
828 # fetch the linkrev
828 # fetch the linkrev
829 fr = filelog.rev(fnode)
829 fr = filelog.rev(fnode)
830 lkr = filelog.linkrev(fr)
830 lkr = filelog.linkrev(fr)
831 # hack to reuse ancestor computation when searching for renames
831 # hack to reuse ancestor computation when searching for renames
832 memberanc = getattr(self, '_ancestrycontext', None)
832 memberanc = getattr(self, '_ancestrycontext', None)
833 iteranc = None
833 iteranc = None
834 if srcrev is None:
834 if srcrev is None:
835 # wctx case, used by workingfilectx during mergecopy
835 # wctx case, used by workingfilectx during mergecopy
836 revs = [p.rev() for p in self._repo[None].parents()]
836 revs = [p.rev() for p in self._repo[None].parents()]
837 inclusive = True # we skipped the real (revless) source
837 inclusive = True # we skipped the real (revless) source
838 else:
838 else:
839 revs = [srcrev]
839 revs = [srcrev]
840 if memberanc is None:
840 if memberanc is None:
841 memberanc = iteranc = cl.ancestors(revs, lkr,
841 memberanc = iteranc = cl.ancestors(revs, lkr,
842 inclusive=inclusive)
842 inclusive=inclusive)
843 # check if this linkrev is an ancestor of srcrev
843 # check if this linkrev is an ancestor of srcrev
844 if lkr not in memberanc:
844 if lkr not in memberanc:
845 if iteranc is None:
845 if iteranc is None:
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
847 for a in iteranc:
847 for a in iteranc:
848 ac = cl.read(a) # get changeset data (we avoid object creation)
848 ac = cl.read(a) # get changeset data (we avoid object creation)
849 if path in ac[3]: # checking the 'files' field.
849 if path in ac[3]: # checking the 'files' field.
850 # The file has been touched, check if the content is
850 # The file has been touched, check if the content is
851 # similar to the one we search for.
851 # similar to the one we search for.
852 if fnode == mfl[ac[0]].readfast().get(path):
852 if fnode == mfl[ac[0]].readfast().get(path):
853 return a
853 return a
854 # In theory, we should never get out of that loop without a result.
854 # In theory, we should never get out of that loop without a result.
855 # But if manifest uses a buggy file revision (not children of the
855 # But if manifest uses a buggy file revision (not children of the
856 # one it replaces) we could. Such a buggy situation will likely
856 # one it replaces) we could. Such a buggy situation will likely
857 # result is crash somewhere else at to some point.
857 # result is crash somewhere else at to some point.
858 return lkr
858 return lkr
859
859
860 def introrev(self):
860 def introrev(self):
861 """return the rev of the changeset which introduced this file revision
861 """return the rev of the changeset which introduced this file revision
862
862
863 This method is different from linkrev because it take into account the
863 This method is different from linkrev because it take into account the
864 changeset the filectx was created from. It ensures the returned
864 changeset the filectx was created from. It ensures the returned
865 revision is one of its ancestors. This prevents bugs from
865 revision is one of its ancestors. This prevents bugs from
866 'linkrev-shadowing' when a file revision is used by multiple
866 'linkrev-shadowing' when a file revision is used by multiple
867 changesets.
867 changesets.
868 """
868 """
869 lkr = self.linkrev()
869 lkr = self.linkrev()
870 attrs = vars(self)
870 attrs = vars(self)
871 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
871 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
872 if noctx or self.rev() == lkr:
872 if noctx or self.rev() == lkr:
873 return self.linkrev()
873 return self.linkrev()
874 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
874 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
875 self.rev(), inclusive=True)
875 self.rev(), inclusive=True)
876
876
877 def _parentfilectx(self, path, fileid, filelog):
877 def _parentfilectx(self, path, fileid, filelog):
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 # If self is associated with a changeset (probably explicitly
881 # If self is associated with a changeset (probably explicitly
882 # fed), ensure the created filectx is associated with a
882 # fed), ensure the created filectx is associated with a
883 # changeset that is an ancestor of self.changectx.
883 # changeset that is an ancestor of self.changectx.
884 # This lets us later use _adjustlinkrev to get a correct link.
884 # This lets us later use _adjustlinkrev to get a correct link.
885 fctx._descendantrev = self.rev()
885 fctx._descendantrev = self.rev()
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 elif '_descendantrev' in vars(self):
887 elif '_descendantrev' in vars(self):
888 # Otherwise propagate _descendantrev if we have one associated.
888 # Otherwise propagate _descendantrev if we have one associated.
889 fctx._descendantrev = self._descendantrev
889 fctx._descendantrev = self._descendantrev
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 return fctx
891 return fctx
892
892
893 def parents(self):
893 def parents(self):
894 _path = self._path
894 _path = self._path
895 fl = self._filelog
895 fl = self._filelog
896 parents = self._filelog.parents(self._filenode)
896 parents = self._filelog.parents(self._filenode)
897 pl = [(_path, node, fl) for node in parents if node != nullid]
897 pl = [(_path, node, fl) for node in parents if node != nullid]
898
898
899 r = fl.renamed(self._filenode)
899 r = fl.renamed(self._filenode)
900 if r:
900 if r:
901 # - In the simple rename case, both parent are nullid, pl is empty.
901 # - In the simple rename case, both parent are nullid, pl is empty.
902 # - In case of merge, only one of the parent is null id and should
902 # - In case of merge, only one of the parent is null id and should
903 # be replaced with the rename information. This parent is -always-
903 # be replaced with the rename information. This parent is -always-
904 # the first one.
904 # the first one.
905 #
905 #
906 # As null id have always been filtered out in the previous list
906 # As null id have always been filtered out in the previous list
907 # comprehension, inserting to 0 will always result in "replacing
907 # comprehension, inserting to 0 will always result in "replacing
908 # first nullid parent with rename information.
908 # first nullid parent with rename information.
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910
910
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912
912
913 def p1(self):
913 def p1(self):
914 return self.parents()[0]
914 return self.parents()[0]
915
915
916 def p2(self):
916 def p2(self):
917 p = self.parents()
917 p = self.parents()
918 if len(p) == 2:
918 if len(p) == 2:
919 return p[1]
919 return p[1]
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921
921
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 '''returns a list of tuples of ((ctx, number), line) for each line
923 '''returns a list of tuples of ((ctx, number), line) for each line
924 in the file, where ctx is the filectx of the node where
924 in the file, where ctx is the filectx of the node where
925 that line was last changed; if linenumber parameter is true, number is
925 that line was last changed; if linenumber parameter is true, number is
926 the line number at the first appearance in the managed file, otherwise,
926 the line number at the first appearance in the managed file, otherwise,
927 number has a fixed value of False.
927 number has a fixed value of False.
928 '''
928 '''
929
929
930 def lines(text):
930 def lines(text):
931 if text.endswith("\n"):
931 if text.endswith("\n"):
932 return text.count("\n")
932 return text.count("\n")
933 return text.count("\n") + 1
933 return text.count("\n") + 1
934
934
935 if linenumber:
935 if linenumber:
936 def decorate(text, rev):
936 def decorate(text, rev):
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 else:
938 else:
939 def decorate(text, rev):
939 def decorate(text, rev):
940 return ([(rev, False)] * lines(text), text)
940 return ([(rev, False)] * lines(text), text)
941
941
942 def pair(parent, child):
942 def pair(parent, child):
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 refine=True)
945 for (a1, a2, b1, b2), t in blocks:
944 for (a1, a2, b1, b2), t in blocks:
946 # Changed blocks ('!') or blocks made only of blank lines ('~')
945 # Changed blocks ('!') or blocks made only of blank lines ('~')
947 # belong to the child.
946 # belong to the child.
948 if t == '=':
947 if t == '=':
949 child[0][b1:b2] = parent[0][a1:a2]
948 child[0][b1:b2] = parent[0][a1:a2]
950 return child
949 return child
951
950
952 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
953
952
954 def parents(f):
953 def parents(f):
955 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
956 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
957 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
958 # isn't an ancestor of the srcrev.
957 # isn't an ancestor of the srcrev.
959 f._changeid
958 f._changeid
960 pl = f.parents()
959 pl = f.parents()
961
960
962 # Don't return renamed parents if we aren't following.
961 # Don't return renamed parents if we aren't following.
963 if not follow:
962 if not follow:
964 pl = [p for p in pl if p.path() == f.path()]
963 pl = [p for p in pl if p.path() == f.path()]
965
964
966 # renamed filectx won't have a filelog yet, so set it
965 # renamed filectx won't have a filelog yet, so set it
967 # from the cache to save time
966 # from the cache to save time
968 for p in pl:
967 for p in pl:
969 if not '_filelog' in p.__dict__:
968 if not '_filelog' in p.__dict__:
970 p._filelog = getlog(p.path())
969 p._filelog = getlog(p.path())
971
970
972 return pl
971 return pl
973
972
974 # use linkrev to find the first changeset where self appeared
973 # use linkrev to find the first changeset where self appeared
975 base = self
974 base = self
976 introrev = self.introrev()
975 introrev = self.introrev()
977 if self.rev() != introrev:
976 if self.rev() != introrev:
978 base = self.filectx(self.filenode(), changeid=introrev)
977 base = self.filectx(self.filenode(), changeid=introrev)
979 if getattr(base, '_ancestrycontext', None) is None:
978 if getattr(base, '_ancestrycontext', None) is None:
980 cl = self._repo.changelog
979 cl = self._repo.changelog
981 if introrev is None:
980 if introrev is None:
982 # wctx is not inclusive, but works because _ancestrycontext
981 # wctx is not inclusive, but works because _ancestrycontext
983 # is used to test filelog revisions
982 # is used to test filelog revisions
984 ac = cl.ancestors([p.rev() for p in base.parents()],
983 ac = cl.ancestors([p.rev() for p in base.parents()],
985 inclusive=True)
984 inclusive=True)
986 else:
985 else:
987 ac = cl.ancestors([introrev], inclusive=True)
986 ac = cl.ancestors([introrev], inclusive=True)
988 base._ancestrycontext = ac
987 base._ancestrycontext = ac
989
988
990 # This algorithm would prefer to be recursive, but Python is a
989 # This algorithm would prefer to be recursive, but Python is a
991 # bit recursion-hostile. Instead we do an iterative
990 # bit recursion-hostile. Instead we do an iterative
992 # depth-first search.
991 # depth-first search.
993
992
994 # 1st DFS pre-calculates pcache and needed
993 # 1st DFS pre-calculates pcache and needed
995 visit = [base]
994 visit = [base]
996 pcache = {}
995 pcache = {}
997 needed = {base: 1}
996 needed = {base: 1}
998 while visit:
997 while visit:
999 f = visit.pop()
998 f = visit.pop()
1000 if f in pcache:
999 if f in pcache:
1001 continue
1000 continue
1002 pl = parents(f)
1001 pl = parents(f)
1003 pcache[f] = pl
1002 pcache[f] = pl
1004 for p in pl:
1003 for p in pl:
1005 needed[p] = needed.get(p, 0) + 1
1004 needed[p] = needed.get(p, 0) + 1
1006 if p not in pcache:
1005 if p not in pcache:
1007 visit.append(p)
1006 visit.append(p)
1008
1007
1009 # 2nd DFS does the actual annotate
1008 # 2nd DFS does the actual annotate
1010 visit[:] = [base]
1009 visit[:] = [base]
1011 hist = {}
1010 hist = {}
1012 while visit:
1011 while visit:
1013 f = visit[-1]
1012 f = visit[-1]
1014 if f in hist:
1013 if f in hist:
1015 visit.pop()
1014 visit.pop()
1016 continue
1015 continue
1017
1016
1018 ready = True
1017 ready = True
1019 pl = pcache[f]
1018 pl = pcache[f]
1020 for p in pl:
1019 for p in pl:
1021 if p not in hist:
1020 if p not in hist:
1022 ready = False
1021 ready = False
1023 visit.append(p)
1022 visit.append(p)
1024 if ready:
1023 if ready:
1025 visit.pop()
1024 visit.pop()
1026 curr = decorate(f.data(), f)
1025 curr = decorate(f.data(), f)
1027 for p in pl:
1026 for p in pl:
1028 curr = pair(hist[p], curr)
1027 curr = pair(hist[p], curr)
1029 if needed[p] == 1:
1028 if needed[p] == 1:
1030 del hist[p]
1029 del hist[p]
1031 del needed[p]
1030 del needed[p]
1032 else:
1031 else:
1033 needed[p] -= 1
1032 needed[p] -= 1
1034
1033
1035 hist[f] = curr
1034 hist[f] = curr
1036 del pcache[f]
1035 del pcache[f]
1037
1036
1038 return zip(hist[base][0], hist[base][1].splitlines(True))
1037 return zip(hist[base][0], hist[base][1].splitlines(True))
1039
1038
1040 def ancestors(self, followfirst=False):
1039 def ancestors(self, followfirst=False):
1041 visit = {}
1040 visit = {}
1042 c = self
1041 c = self
1043 if followfirst:
1042 if followfirst:
1044 cut = 1
1043 cut = 1
1045 else:
1044 else:
1046 cut = None
1045 cut = None
1047
1046
1048 while True:
1047 while True:
1049 for parent in c.parents()[:cut]:
1048 for parent in c.parents()[:cut]:
1050 visit[(parent.linkrev(), parent.filenode())] = parent
1049 visit[(parent.linkrev(), parent.filenode())] = parent
1051 if not visit:
1050 if not visit:
1052 break
1051 break
1053 c = visit.pop(max(visit))
1052 c = visit.pop(max(visit))
1054 yield c
1053 yield c
1055
1054
1056 class filectx(basefilectx):
1055 class filectx(basefilectx):
1057 """A filecontext object makes access to data related to a particular
1056 """A filecontext object makes access to data related to a particular
1058 filerevision convenient."""
1057 filerevision convenient."""
1059 def __init__(self, repo, path, changeid=None, fileid=None,
1058 def __init__(self, repo, path, changeid=None, fileid=None,
1060 filelog=None, changectx=None):
1059 filelog=None, changectx=None):
1061 """changeid can be a changeset revision, node, or tag.
1060 """changeid can be a changeset revision, node, or tag.
1062 fileid can be a file revision or node."""
1061 fileid can be a file revision or node."""
1063 self._repo = repo
1062 self._repo = repo
1064 self._path = path
1063 self._path = path
1065
1064
1066 assert (changeid is not None
1065 assert (changeid is not None
1067 or fileid is not None
1066 or fileid is not None
1068 or changectx is not None), \
1067 or changectx is not None), \
1069 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1068 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1070 % (changeid, fileid, changectx))
1069 % (changeid, fileid, changectx))
1071
1070
1072 if filelog is not None:
1071 if filelog is not None:
1073 self._filelog = filelog
1072 self._filelog = filelog
1074
1073
1075 if changeid is not None:
1074 if changeid is not None:
1076 self._changeid = changeid
1075 self._changeid = changeid
1077 if changectx is not None:
1076 if changectx is not None:
1078 self._changectx = changectx
1077 self._changectx = changectx
1079 if fileid is not None:
1078 if fileid is not None:
1080 self._fileid = fileid
1079 self._fileid = fileid
1081
1080
1082 @propertycache
1081 @propertycache
1083 def _changectx(self):
1082 def _changectx(self):
1084 try:
1083 try:
1085 return changectx(self._repo, self._changeid)
1084 return changectx(self._repo, self._changeid)
1086 except error.FilteredRepoLookupError:
1085 except error.FilteredRepoLookupError:
1087 # Linkrev may point to any revision in the repository. When the
1086 # Linkrev may point to any revision in the repository. When the
1088 # repository is filtered this may lead to `filectx` trying to build
1087 # repository is filtered this may lead to `filectx` trying to build
1089 # `changectx` for filtered revision. In such case we fallback to
1088 # `changectx` for filtered revision. In such case we fallback to
1090 # creating `changectx` on the unfiltered version of the reposition.
1089 # creating `changectx` on the unfiltered version of the reposition.
1091 # This fallback should not be an issue because `changectx` from
1090 # This fallback should not be an issue because `changectx` from
1092 # `filectx` are not used in complex operations that care about
1091 # `filectx` are not used in complex operations that care about
1093 # filtering.
1092 # filtering.
1094 #
1093 #
1095 # This fallback is a cheap and dirty fix that prevent several
1094 # This fallback is a cheap and dirty fix that prevent several
1096 # crashes. It does not ensure the behavior is correct. However the
1095 # crashes. It does not ensure the behavior is correct. However the
1097 # behavior was not correct before filtering either and "incorrect
1096 # behavior was not correct before filtering either and "incorrect
1098 # behavior" is seen as better as "crash"
1097 # behavior" is seen as better as "crash"
1099 #
1098 #
1100 # Linkrevs have several serious troubles with filtering that are
1099 # Linkrevs have several serious troubles with filtering that are
1101 # complicated to solve. Proper handling of the issue here should be
1100 # complicated to solve. Proper handling of the issue here should be
1102 # considered when solving linkrev issue are on the table.
1101 # considered when solving linkrev issue are on the table.
1103 return changectx(self._repo.unfiltered(), self._changeid)
1102 return changectx(self._repo.unfiltered(), self._changeid)
1104
1103
1105 def filectx(self, fileid, changeid=None):
1104 def filectx(self, fileid, changeid=None):
1106 '''opens an arbitrary revision of the file without
1105 '''opens an arbitrary revision of the file without
1107 opening a new filelog'''
1106 opening a new filelog'''
1108 return filectx(self._repo, self._path, fileid=fileid,
1107 return filectx(self._repo, self._path, fileid=fileid,
1109 filelog=self._filelog, changeid=changeid)
1108 filelog=self._filelog, changeid=changeid)
1110
1109
1111 def data(self):
1110 def data(self):
1112 try:
1111 try:
1113 return self._filelog.read(self._filenode)
1112 return self._filelog.read(self._filenode)
1114 except error.CensoredNodeError:
1113 except error.CensoredNodeError:
1115 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1114 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1116 return ""
1115 return ""
1117 raise error.Abort(_("censored node: %s") % short(self._filenode),
1116 raise error.Abort(_("censored node: %s") % short(self._filenode),
1118 hint=_("set censor.policy to ignore errors"))
1117 hint=_("set censor.policy to ignore errors"))
1119
1118
1120 def size(self):
1119 def size(self):
1121 return self._filelog.size(self._filerev)
1120 return self._filelog.size(self._filerev)
1122
1121
1123 def renamed(self):
1122 def renamed(self):
1124 """check if file was actually renamed in this changeset revision
1123 """check if file was actually renamed in this changeset revision
1125
1124
1126 If rename logged in file revision, we report copy for changeset only
1125 If rename logged in file revision, we report copy for changeset only
1127 if file revisions linkrev points back to the changeset in question
1126 if file revisions linkrev points back to the changeset in question
1128 or both changeset parents contain different file revisions.
1127 or both changeset parents contain different file revisions.
1129 """
1128 """
1130
1129
1131 renamed = self._filelog.renamed(self._filenode)
1130 renamed = self._filelog.renamed(self._filenode)
1132 if not renamed:
1131 if not renamed:
1133 return renamed
1132 return renamed
1134
1133
1135 if self.rev() == self.linkrev():
1134 if self.rev() == self.linkrev():
1136 return renamed
1135 return renamed
1137
1136
1138 name = self.path()
1137 name = self.path()
1139 fnode = self._filenode
1138 fnode = self._filenode
1140 for p in self._changectx.parents():
1139 for p in self._changectx.parents():
1141 try:
1140 try:
1142 if fnode == p.filenode(name):
1141 if fnode == p.filenode(name):
1143 return None
1142 return None
1144 except error.LookupError:
1143 except error.LookupError:
1145 pass
1144 pass
1146 return renamed
1145 return renamed
1147
1146
1148 def children(self):
1147 def children(self):
1149 # hard for renames
1148 # hard for renames
1150 c = self._filelog.children(self._filenode)
1149 c = self._filelog.children(self._filenode)
1151 return [filectx(self._repo, self._path, fileid=x,
1150 return [filectx(self._repo, self._path, fileid=x,
1152 filelog=self._filelog) for x in c]
1151 filelog=self._filelog) for x in c]
1153
1152
1154 class committablectx(basectx):
1153 class committablectx(basectx):
1155 """A committablectx object provides common functionality for a context that
1154 """A committablectx object provides common functionality for a context that
1156 wants the ability to commit, e.g. workingctx or memctx."""
1155 wants the ability to commit, e.g. workingctx or memctx."""
1157 def __init__(self, repo, text="", user=None, date=None, extra=None,
1156 def __init__(self, repo, text="", user=None, date=None, extra=None,
1158 changes=None):
1157 changes=None):
1159 self._repo = repo
1158 self._repo = repo
1160 self._rev = None
1159 self._rev = None
1161 self._node = None
1160 self._node = None
1162 self._text = text
1161 self._text = text
1163 if date:
1162 if date:
1164 self._date = util.parsedate(date)
1163 self._date = util.parsedate(date)
1165 if user:
1164 if user:
1166 self._user = user
1165 self._user = user
1167 if changes:
1166 if changes:
1168 self._status = changes
1167 self._status = changes
1169
1168
1170 self._extra = {}
1169 self._extra = {}
1171 if extra:
1170 if extra:
1172 self._extra = extra.copy()
1171 self._extra = extra.copy()
1173 if 'branch' not in self._extra:
1172 if 'branch' not in self._extra:
1174 try:
1173 try:
1175 branch = encoding.fromlocal(self._repo.dirstate.branch())
1174 branch = encoding.fromlocal(self._repo.dirstate.branch())
1176 except UnicodeDecodeError:
1175 except UnicodeDecodeError:
1177 raise error.Abort(_('branch name not in UTF-8!'))
1176 raise error.Abort(_('branch name not in UTF-8!'))
1178 self._extra['branch'] = branch
1177 self._extra['branch'] = branch
1179 if self._extra['branch'] == '':
1178 if self._extra['branch'] == '':
1180 self._extra['branch'] = 'default'
1179 self._extra['branch'] = 'default'
1181
1180
1182 def __str__(self):
1181 def __str__(self):
1183 return str(self._parents[0]) + "+"
1182 return str(self._parents[0]) + "+"
1184
1183
1185 def __nonzero__(self):
1184 def __nonzero__(self):
1186 return True
1185 return True
1187
1186
1188 def _buildflagfunc(self):
1187 def _buildflagfunc(self):
1189 # Create a fallback function for getting file flags when the
1188 # Create a fallback function for getting file flags when the
1190 # filesystem doesn't support them
1189 # filesystem doesn't support them
1191
1190
1192 copiesget = self._repo.dirstate.copies().get
1191 copiesget = self._repo.dirstate.copies().get
1193 parents = self.parents()
1192 parents = self.parents()
1194 if len(parents) < 2:
1193 if len(parents) < 2:
1195 # when we have one parent, it's easy: copy from parent
1194 # when we have one parent, it's easy: copy from parent
1196 man = parents[0].manifest()
1195 man = parents[0].manifest()
1197 def func(f):
1196 def func(f):
1198 f = copiesget(f, f)
1197 f = copiesget(f, f)
1199 return man.flags(f)
1198 return man.flags(f)
1200 else:
1199 else:
1201 # merges are tricky: we try to reconstruct the unstored
1200 # merges are tricky: we try to reconstruct the unstored
1202 # result from the merge (issue1802)
1201 # result from the merge (issue1802)
1203 p1, p2 = parents
1202 p1, p2 = parents
1204 pa = p1.ancestor(p2)
1203 pa = p1.ancestor(p2)
1205 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1204 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1206
1205
1207 def func(f):
1206 def func(f):
1208 f = copiesget(f, f) # may be wrong for merges with copies
1207 f = copiesget(f, f) # may be wrong for merges with copies
1209 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1208 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1210 if fl1 == fl2:
1209 if fl1 == fl2:
1211 return fl1
1210 return fl1
1212 if fl1 == fla:
1211 if fl1 == fla:
1213 return fl2
1212 return fl2
1214 if fl2 == fla:
1213 if fl2 == fla:
1215 return fl1
1214 return fl1
1216 return '' # punt for conflicts
1215 return '' # punt for conflicts
1217
1216
1218 return func
1217 return func
1219
1218
1220 @propertycache
1219 @propertycache
1221 def _flagfunc(self):
1220 def _flagfunc(self):
1222 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1221 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1223
1222
1224 @propertycache
1223 @propertycache
1225 def _manifest(self):
1224 def _manifest(self):
1226 """generate a manifest corresponding to the values in self._status
1225 """generate a manifest corresponding to the values in self._status
1227
1226
1228 This reuse the file nodeid from parent, but we append an extra letter
1227 This reuse the file nodeid from parent, but we append an extra letter
1229 when modified. Modified files get an extra 'm' while added files get
1228 when modified. Modified files get an extra 'm' while added files get
1230 an extra 'a'. This is used by manifests merge to see that files
1229 an extra 'a'. This is used by manifests merge to see that files
1231 are different and by update logic to avoid deleting newly added files.
1230 are different and by update logic to avoid deleting newly added files.
1232 """
1231 """
1233 parents = self.parents()
1232 parents = self.parents()
1234
1233
1235 man1 = parents[0].manifest()
1234 man1 = parents[0].manifest()
1236 man = man1.copy()
1235 man = man1.copy()
1237 if len(parents) > 1:
1236 if len(parents) > 1:
1238 man2 = self.p2().manifest()
1237 man2 = self.p2().manifest()
1239 def getman(f):
1238 def getman(f):
1240 if f in man1:
1239 if f in man1:
1241 return man1
1240 return man1
1242 return man2
1241 return man2
1243 else:
1242 else:
1244 getman = lambda f: man1
1243 getman = lambda f: man1
1245
1244
1246 copied = self._repo.dirstate.copies()
1245 copied = self._repo.dirstate.copies()
1247 ff = self._flagfunc
1246 ff = self._flagfunc
1248 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1247 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1249 for f in l:
1248 for f in l:
1250 orig = copied.get(f, f)
1249 orig = copied.get(f, f)
1251 man[f] = getman(orig).get(orig, nullid) + i
1250 man[f] = getman(orig).get(orig, nullid) + i
1252 try:
1251 try:
1253 man.setflag(f, ff(f))
1252 man.setflag(f, ff(f))
1254 except OSError:
1253 except OSError:
1255 pass
1254 pass
1256
1255
1257 for f in self._status.deleted + self._status.removed:
1256 for f in self._status.deleted + self._status.removed:
1258 if f in man:
1257 if f in man:
1259 del man[f]
1258 del man[f]
1260
1259
1261 return man
1260 return man
1262
1261
1263 @propertycache
1262 @propertycache
1264 def _status(self):
1263 def _status(self):
1265 return self._repo.status()
1264 return self._repo.status()
1266
1265
1267 @propertycache
1266 @propertycache
1268 def _user(self):
1267 def _user(self):
1269 return self._repo.ui.username()
1268 return self._repo.ui.username()
1270
1269
1271 @propertycache
1270 @propertycache
1272 def _date(self):
1271 def _date(self):
1273 return util.makedate()
1272 return util.makedate()
1274
1273
1275 def subrev(self, subpath):
1274 def subrev(self, subpath):
1276 return None
1275 return None
1277
1276
1278 def manifestnode(self):
1277 def manifestnode(self):
1279 return None
1278 return None
1280 def user(self):
1279 def user(self):
1281 return self._user or self._repo.ui.username()
1280 return self._user or self._repo.ui.username()
1282 def date(self):
1281 def date(self):
1283 return self._date
1282 return self._date
1284 def description(self):
1283 def description(self):
1285 return self._text
1284 return self._text
1286 def files(self):
1285 def files(self):
1287 return sorted(self._status.modified + self._status.added +
1286 return sorted(self._status.modified + self._status.added +
1288 self._status.removed)
1287 self._status.removed)
1289
1288
1290 def modified(self):
1289 def modified(self):
1291 return self._status.modified
1290 return self._status.modified
1292 def added(self):
1291 def added(self):
1293 return self._status.added
1292 return self._status.added
1294 def removed(self):
1293 def removed(self):
1295 return self._status.removed
1294 return self._status.removed
1296 def deleted(self):
1295 def deleted(self):
1297 return self._status.deleted
1296 return self._status.deleted
1298 def branch(self):
1297 def branch(self):
1299 return encoding.tolocal(self._extra['branch'])
1298 return encoding.tolocal(self._extra['branch'])
1300 def closesbranch(self):
1299 def closesbranch(self):
1301 return 'close' in self._extra
1300 return 'close' in self._extra
1302 def extra(self):
1301 def extra(self):
1303 return self._extra
1302 return self._extra
1304
1303
1305 def tags(self):
1304 def tags(self):
1306 return []
1305 return []
1307
1306
1308 def bookmarks(self):
1307 def bookmarks(self):
1309 b = []
1308 b = []
1310 for p in self.parents():
1309 for p in self.parents():
1311 b.extend(p.bookmarks())
1310 b.extend(p.bookmarks())
1312 return b
1311 return b
1313
1312
1314 def phase(self):
1313 def phase(self):
1315 phase = phases.draft # default phase to draft
1314 phase = phases.draft # default phase to draft
1316 for p in self.parents():
1315 for p in self.parents():
1317 phase = max(phase, p.phase())
1316 phase = max(phase, p.phase())
1318 return phase
1317 return phase
1319
1318
1320 def hidden(self):
1319 def hidden(self):
1321 return False
1320 return False
1322
1321
1323 def children(self):
1322 def children(self):
1324 return []
1323 return []
1325
1324
1326 def flags(self, path):
1325 def flags(self, path):
1327 if '_manifest' in self.__dict__:
1326 if '_manifest' in self.__dict__:
1328 try:
1327 try:
1329 return self._manifest.flags(path)
1328 return self._manifest.flags(path)
1330 except KeyError:
1329 except KeyError:
1331 return ''
1330 return ''
1332
1331
1333 try:
1332 try:
1334 return self._flagfunc(path)
1333 return self._flagfunc(path)
1335 except OSError:
1334 except OSError:
1336 return ''
1335 return ''
1337
1336
1338 def ancestor(self, c2):
1337 def ancestor(self, c2):
1339 """return the "best" ancestor context of self and c2"""
1338 """return the "best" ancestor context of self and c2"""
1340 return self._parents[0].ancestor(c2) # punt on two parents for now
1339 return self._parents[0].ancestor(c2) # punt on two parents for now
1341
1340
1342 def walk(self, match):
1341 def walk(self, match):
1343 '''Generates matching file names.'''
1342 '''Generates matching file names.'''
1344 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1343 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1345 True, False))
1344 True, False))
1346
1345
1347 def matches(self, match):
1346 def matches(self, match):
1348 return sorted(self._repo.dirstate.matches(match))
1347 return sorted(self._repo.dirstate.matches(match))
1349
1348
1350 def ancestors(self):
1349 def ancestors(self):
1351 for p in self._parents:
1350 for p in self._parents:
1352 yield p
1351 yield p
1353 for a in self._repo.changelog.ancestors(
1352 for a in self._repo.changelog.ancestors(
1354 [p.rev() for p in self._parents]):
1353 [p.rev() for p in self._parents]):
1355 yield changectx(self._repo, a)
1354 yield changectx(self._repo, a)
1356
1355
1357 def markcommitted(self, node):
1356 def markcommitted(self, node):
1358 """Perform post-commit cleanup necessary after committing this ctx
1357 """Perform post-commit cleanup necessary after committing this ctx
1359
1358
1360 Specifically, this updates backing stores this working context
1359 Specifically, this updates backing stores this working context
1361 wraps to reflect the fact that the changes reflected by this
1360 wraps to reflect the fact that the changes reflected by this
1362 workingctx have been committed. For example, it marks
1361 workingctx have been committed. For example, it marks
1363 modified and added files as normal in the dirstate.
1362 modified and added files as normal in the dirstate.
1364
1363
1365 """
1364 """
1366
1365
1367 self._repo.dirstate.beginparentchange()
1366 self._repo.dirstate.beginparentchange()
1368 for f in self.modified() + self.added():
1367 for f in self.modified() + self.added():
1369 self._repo.dirstate.normal(f)
1368 self._repo.dirstate.normal(f)
1370 for f in self.removed():
1369 for f in self.removed():
1371 self._repo.dirstate.drop(f)
1370 self._repo.dirstate.drop(f)
1372 self._repo.dirstate.setparents(node)
1371 self._repo.dirstate.setparents(node)
1373 self._repo.dirstate.endparentchange()
1372 self._repo.dirstate.endparentchange()
1374
1373
1375 # write changes out explicitly, because nesting wlock at
1374 # write changes out explicitly, because nesting wlock at
1376 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1375 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1377 # from immediately doing so for subsequent changing files
1376 # from immediately doing so for subsequent changing files
1378 self._repo.dirstate.write(self._repo.currenttransaction())
1377 self._repo.dirstate.write(self._repo.currenttransaction())
1379
1378
1380 class workingctx(committablectx):
1379 class workingctx(committablectx):
1381 """A workingctx object makes access to data related to
1380 """A workingctx object makes access to data related to
1382 the current working directory convenient.
1381 the current working directory convenient.
1383 date - any valid date string or (unixtime, offset), or None.
1382 date - any valid date string or (unixtime, offset), or None.
1384 user - username string, or None.
1383 user - username string, or None.
1385 extra - a dictionary of extra values, or None.
1384 extra - a dictionary of extra values, or None.
1386 changes - a list of file lists as returned by localrepo.status()
1385 changes - a list of file lists as returned by localrepo.status()
1387 or None to use the repository status.
1386 or None to use the repository status.
1388 """
1387 """
1389 def __init__(self, repo, text="", user=None, date=None, extra=None,
1388 def __init__(self, repo, text="", user=None, date=None, extra=None,
1390 changes=None):
1389 changes=None):
1391 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1390 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1392
1391
1393 def __iter__(self):
1392 def __iter__(self):
1394 d = self._repo.dirstate
1393 d = self._repo.dirstate
1395 for f in d:
1394 for f in d:
1396 if d[f] != 'r':
1395 if d[f] != 'r':
1397 yield f
1396 yield f
1398
1397
1399 def __contains__(self, key):
1398 def __contains__(self, key):
1400 return self._repo.dirstate[key] not in "?r"
1399 return self._repo.dirstate[key] not in "?r"
1401
1400
1402 def hex(self):
1401 def hex(self):
1403 return hex(wdirid)
1402 return hex(wdirid)
1404
1403
1405 @propertycache
1404 @propertycache
1406 def _parents(self):
1405 def _parents(self):
1407 p = self._repo.dirstate.parents()
1406 p = self._repo.dirstate.parents()
1408 if p[1] == nullid:
1407 if p[1] == nullid:
1409 p = p[:-1]
1408 p = p[:-1]
1410 return [changectx(self._repo, x) for x in p]
1409 return [changectx(self._repo, x) for x in p]
1411
1410
1412 def filectx(self, path, filelog=None):
1411 def filectx(self, path, filelog=None):
1413 """get a file context from the working directory"""
1412 """get a file context from the working directory"""
1414 return workingfilectx(self._repo, path, workingctx=self,
1413 return workingfilectx(self._repo, path, workingctx=self,
1415 filelog=filelog)
1414 filelog=filelog)
1416
1415
1417 def dirty(self, missing=False, merge=True, branch=True):
1416 def dirty(self, missing=False, merge=True, branch=True):
1418 "check whether a working directory is modified"
1417 "check whether a working directory is modified"
1419 # check subrepos first
1418 # check subrepos first
1420 for s in sorted(self.substate):
1419 for s in sorted(self.substate):
1421 if self.sub(s).dirty():
1420 if self.sub(s).dirty():
1422 return True
1421 return True
1423 # check current working dir
1422 # check current working dir
1424 return ((merge and self.p2()) or
1423 return ((merge and self.p2()) or
1425 (branch and self.branch() != self.p1().branch()) or
1424 (branch and self.branch() != self.p1().branch()) or
1426 self.modified() or self.added() or self.removed() or
1425 self.modified() or self.added() or self.removed() or
1427 (missing and self.deleted()))
1426 (missing and self.deleted()))
1428
1427
1429 def add(self, list, prefix=""):
1428 def add(self, list, prefix=""):
1430 join = lambda f: os.path.join(prefix, f)
1429 join = lambda f: os.path.join(prefix, f)
1431 with self._repo.wlock():
1430 with self._repo.wlock():
1432 ui, ds = self._repo.ui, self._repo.dirstate
1431 ui, ds = self._repo.ui, self._repo.dirstate
1433 rejected = []
1432 rejected = []
1434 lstat = self._repo.wvfs.lstat
1433 lstat = self._repo.wvfs.lstat
1435 for f in list:
1434 for f in list:
1436 scmutil.checkportable(ui, join(f))
1435 scmutil.checkportable(ui, join(f))
1437 try:
1436 try:
1438 st = lstat(f)
1437 st = lstat(f)
1439 except OSError:
1438 except OSError:
1440 ui.warn(_("%s does not exist!\n") % join(f))
1439 ui.warn(_("%s does not exist!\n") % join(f))
1441 rejected.append(f)
1440 rejected.append(f)
1442 continue
1441 continue
1443 if st.st_size > 10000000:
1442 if st.st_size > 10000000:
1444 ui.warn(_("%s: up to %d MB of RAM may be required "
1443 ui.warn(_("%s: up to %d MB of RAM may be required "
1445 "to manage this file\n"
1444 "to manage this file\n"
1446 "(use 'hg revert %s' to cancel the "
1445 "(use 'hg revert %s' to cancel the "
1447 "pending addition)\n")
1446 "pending addition)\n")
1448 % (f, 3 * st.st_size // 1000000, join(f)))
1447 % (f, 3 * st.st_size // 1000000, join(f)))
1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1450 ui.warn(_("%s not added: only files and symlinks "
1449 ui.warn(_("%s not added: only files and symlinks "
1451 "supported currently\n") % join(f))
1450 "supported currently\n") % join(f))
1452 rejected.append(f)
1451 rejected.append(f)
1453 elif ds[f] in 'amn':
1452 elif ds[f] in 'amn':
1454 ui.warn(_("%s already tracked!\n") % join(f))
1453 ui.warn(_("%s already tracked!\n") % join(f))
1455 elif ds[f] == 'r':
1454 elif ds[f] == 'r':
1456 ds.normallookup(f)
1455 ds.normallookup(f)
1457 else:
1456 else:
1458 ds.add(f)
1457 ds.add(f)
1459 return rejected
1458 return rejected
1460
1459
1461 def forget(self, files, prefix=""):
1460 def forget(self, files, prefix=""):
1462 join = lambda f: os.path.join(prefix, f)
1461 join = lambda f: os.path.join(prefix, f)
1463 with self._repo.wlock():
1462 with self._repo.wlock():
1464 rejected = []
1463 rejected = []
1465 for f in files:
1464 for f in files:
1466 if f not in self._repo.dirstate:
1465 if f not in self._repo.dirstate:
1467 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1466 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1468 rejected.append(f)
1467 rejected.append(f)
1469 elif self._repo.dirstate[f] != 'a':
1468 elif self._repo.dirstate[f] != 'a':
1470 self._repo.dirstate.remove(f)
1469 self._repo.dirstate.remove(f)
1471 else:
1470 else:
1472 self._repo.dirstate.drop(f)
1471 self._repo.dirstate.drop(f)
1473 return rejected
1472 return rejected
1474
1473
1475 def undelete(self, list):
1474 def undelete(self, list):
1476 pctxs = self.parents()
1475 pctxs = self.parents()
1477 with self._repo.wlock():
1476 with self._repo.wlock():
1478 for f in list:
1477 for f in list:
1479 if self._repo.dirstate[f] != 'r':
1478 if self._repo.dirstate[f] != 'r':
1480 self._repo.ui.warn(_("%s not removed!\n") % f)
1479 self._repo.ui.warn(_("%s not removed!\n") % f)
1481 else:
1480 else:
1482 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1481 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1483 t = fctx.data()
1482 t = fctx.data()
1484 self._repo.wwrite(f, t, fctx.flags())
1483 self._repo.wwrite(f, t, fctx.flags())
1485 self._repo.dirstate.normal(f)
1484 self._repo.dirstate.normal(f)
1486
1485
1487 def copy(self, source, dest):
1486 def copy(self, source, dest):
1488 try:
1487 try:
1489 st = self._repo.wvfs.lstat(dest)
1488 st = self._repo.wvfs.lstat(dest)
1490 except OSError as err:
1489 except OSError as err:
1491 if err.errno != errno.ENOENT:
1490 if err.errno != errno.ENOENT:
1492 raise
1491 raise
1493 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1492 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1494 return
1493 return
1495 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1494 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1496 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1495 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1497 "symbolic link\n") % dest)
1496 "symbolic link\n") % dest)
1498 else:
1497 else:
1499 with self._repo.wlock():
1498 with self._repo.wlock():
1500 if self._repo.dirstate[dest] in '?':
1499 if self._repo.dirstate[dest] in '?':
1501 self._repo.dirstate.add(dest)
1500 self._repo.dirstate.add(dest)
1502 elif self._repo.dirstate[dest] in 'r':
1501 elif self._repo.dirstate[dest] in 'r':
1503 self._repo.dirstate.normallookup(dest)
1502 self._repo.dirstate.normallookup(dest)
1504 self._repo.dirstate.copy(source, dest)
1503 self._repo.dirstate.copy(source, dest)
1505
1504
1506 def match(self, pats=[], include=None, exclude=None, default='glob',
1505 def match(self, pats=[], include=None, exclude=None, default='glob',
1507 listsubrepos=False, badfn=None):
1506 listsubrepos=False, badfn=None):
1508 r = self._repo
1507 r = self._repo
1509
1508
1510 # Only a case insensitive filesystem needs magic to translate user input
1509 # Only a case insensitive filesystem needs magic to translate user input
1511 # to actual case in the filesystem.
1510 # to actual case in the filesystem.
1512 if not util.fscasesensitive(r.root):
1511 if not util.fscasesensitive(r.root):
1513 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1512 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1514 exclude, default, r.auditor, self,
1513 exclude, default, r.auditor, self,
1515 listsubrepos=listsubrepos,
1514 listsubrepos=listsubrepos,
1516 badfn=badfn)
1515 badfn=badfn)
1517 return matchmod.match(r.root, r.getcwd(), pats,
1516 return matchmod.match(r.root, r.getcwd(), pats,
1518 include, exclude, default,
1517 include, exclude, default,
1519 auditor=r.auditor, ctx=self,
1518 auditor=r.auditor, ctx=self,
1520 listsubrepos=listsubrepos, badfn=badfn)
1519 listsubrepos=listsubrepos, badfn=badfn)
1521
1520
1522 def _filtersuspectsymlink(self, files):
1521 def _filtersuspectsymlink(self, files):
1523 if not files or self._repo.dirstate._checklink:
1522 if not files or self._repo.dirstate._checklink:
1524 return files
1523 return files
1525
1524
1526 # Symlink placeholders may get non-symlink-like contents
1525 # Symlink placeholders may get non-symlink-like contents
1527 # via user error or dereferencing by NFS or Samba servers,
1526 # via user error or dereferencing by NFS or Samba servers,
1528 # so we filter out any placeholders that don't look like a
1527 # so we filter out any placeholders that don't look like a
1529 # symlink
1528 # symlink
1530 sane = []
1529 sane = []
1531 for f in files:
1530 for f in files:
1532 if self.flags(f) == 'l':
1531 if self.flags(f) == 'l':
1533 d = self[f].data()
1532 d = self[f].data()
1534 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1533 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1535 self._repo.ui.debug('ignoring suspect symlink placeholder'
1534 self._repo.ui.debug('ignoring suspect symlink placeholder'
1536 ' "%s"\n' % f)
1535 ' "%s"\n' % f)
1537 continue
1536 continue
1538 sane.append(f)
1537 sane.append(f)
1539 return sane
1538 return sane
1540
1539
1541 def _checklookup(self, files):
1540 def _checklookup(self, files):
1542 # check for any possibly clean files
1541 # check for any possibly clean files
1543 if not files:
1542 if not files:
1544 return [], []
1543 return [], []
1545
1544
1546 modified = []
1545 modified = []
1547 fixup = []
1546 fixup = []
1548 pctx = self._parents[0]
1547 pctx = self._parents[0]
1549 # do a full compare of any files that might have changed
1548 # do a full compare of any files that might have changed
1550 for f in sorted(files):
1549 for f in sorted(files):
1551 if (f not in pctx or self.flags(f) != pctx.flags(f)
1550 if (f not in pctx or self.flags(f) != pctx.flags(f)
1552 or pctx[f].cmp(self[f])):
1551 or pctx[f].cmp(self[f])):
1553 modified.append(f)
1552 modified.append(f)
1554 else:
1553 else:
1555 fixup.append(f)
1554 fixup.append(f)
1556
1555
1557 # update dirstate for files that are actually clean
1556 # update dirstate for files that are actually clean
1558 if fixup:
1557 if fixup:
1559 try:
1558 try:
1560 # updating the dirstate is optional
1559 # updating the dirstate is optional
1561 # so we don't wait on the lock
1560 # so we don't wait on the lock
1562 # wlock can invalidate the dirstate, so cache normal _after_
1561 # wlock can invalidate the dirstate, so cache normal _after_
1563 # taking the lock
1562 # taking the lock
1564 with self._repo.wlock(False):
1563 with self._repo.wlock(False):
1565 normal = self._repo.dirstate.normal
1564 normal = self._repo.dirstate.normal
1566 for f in fixup:
1565 for f in fixup:
1567 normal(f)
1566 normal(f)
1568 # write changes out explicitly, because nesting
1567 # write changes out explicitly, because nesting
1569 # wlock at runtime may prevent 'wlock.release()'
1568 # wlock at runtime may prevent 'wlock.release()'
1570 # after this block from doing so for subsequent
1569 # after this block from doing so for subsequent
1571 # changing files
1570 # changing files
1572 self._repo.dirstate.write(self._repo.currenttransaction())
1571 self._repo.dirstate.write(self._repo.currenttransaction())
1573 except error.LockError:
1572 except error.LockError:
1574 pass
1573 pass
1575 return modified, fixup
1574 return modified, fixup
1576
1575
1577 def _manifestmatches(self, match, s):
1576 def _manifestmatches(self, match, s):
1578 """Slow path for workingctx
1577 """Slow path for workingctx
1579
1578
1580 The fast path is when we compare the working directory to its parent
1579 The fast path is when we compare the working directory to its parent
1581 which means this function is comparing with a non-parent; therefore we
1580 which means this function is comparing with a non-parent; therefore we
1582 need to build a manifest and return what matches.
1581 need to build a manifest and return what matches.
1583 """
1582 """
1584 mf = self._repo['.']._manifestmatches(match, s)
1583 mf = self._repo['.']._manifestmatches(match, s)
1585 for f in s.modified + s.added:
1584 for f in s.modified + s.added:
1586 mf[f] = _newnode
1585 mf[f] = _newnode
1587 mf.setflag(f, self.flags(f))
1586 mf.setflag(f, self.flags(f))
1588 for f in s.removed:
1587 for f in s.removed:
1589 if f in mf:
1588 if f in mf:
1590 del mf[f]
1589 del mf[f]
1591 return mf
1590 return mf
1592
1591
1593 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1592 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1594 unknown=False):
1593 unknown=False):
1595 '''Gets the status from the dirstate -- internal use only.'''
1594 '''Gets the status from the dirstate -- internal use only.'''
1596 listignored, listclean, listunknown = ignored, clean, unknown
1595 listignored, listclean, listunknown = ignored, clean, unknown
1597 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1596 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1598 subrepos = []
1597 subrepos = []
1599 if '.hgsub' in self:
1598 if '.hgsub' in self:
1600 subrepos = sorted(self.substate)
1599 subrepos = sorted(self.substate)
1601 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1600 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1602 listclean, listunknown)
1601 listclean, listunknown)
1603
1602
1604 # check for any possibly clean files
1603 # check for any possibly clean files
1605 if cmp:
1604 if cmp:
1606 modified2, fixup = self._checklookup(cmp)
1605 modified2, fixup = self._checklookup(cmp)
1607 s.modified.extend(modified2)
1606 s.modified.extend(modified2)
1608
1607
1609 # update dirstate for files that are actually clean
1608 # update dirstate for files that are actually clean
1610 if fixup and listclean:
1609 if fixup and listclean:
1611 s.clean.extend(fixup)
1610 s.clean.extend(fixup)
1612
1611
1613 if match.always():
1612 if match.always():
1614 # cache for performance
1613 # cache for performance
1615 if s.unknown or s.ignored or s.clean:
1614 if s.unknown or s.ignored or s.clean:
1616 # "_status" is cached with list*=False in the normal route
1615 # "_status" is cached with list*=False in the normal route
1617 self._status = scmutil.status(s.modified, s.added, s.removed,
1616 self._status = scmutil.status(s.modified, s.added, s.removed,
1618 s.deleted, [], [], [])
1617 s.deleted, [], [], [])
1619 else:
1618 else:
1620 self._status = s
1619 self._status = s
1621
1620
1622 return s
1621 return s
1623
1622
1624 def _buildstatus(self, other, s, match, listignored, listclean,
1623 def _buildstatus(self, other, s, match, listignored, listclean,
1625 listunknown):
1624 listunknown):
1626 """build a status with respect to another context
1625 """build a status with respect to another context
1627
1626
1628 This includes logic for maintaining the fast path of status when
1627 This includes logic for maintaining the fast path of status when
1629 comparing the working directory against its parent, which is to skip
1628 comparing the working directory against its parent, which is to skip
1630 building a new manifest if self (working directory) is not comparing
1629 building a new manifest if self (working directory) is not comparing
1631 against its parent (repo['.']).
1630 against its parent (repo['.']).
1632 """
1631 """
1633 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1632 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1634 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1633 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1635 # might have accidentally ended up with the entire contents of the file
1634 # might have accidentally ended up with the entire contents of the file
1636 # they are supposed to be linking to.
1635 # they are supposed to be linking to.
1637 s.modified[:] = self._filtersuspectsymlink(s.modified)
1636 s.modified[:] = self._filtersuspectsymlink(s.modified)
1638 if other != self._repo['.']:
1637 if other != self._repo['.']:
1639 s = super(workingctx, self)._buildstatus(other, s, match,
1638 s = super(workingctx, self)._buildstatus(other, s, match,
1640 listignored, listclean,
1639 listignored, listclean,
1641 listunknown)
1640 listunknown)
1642 return s
1641 return s
1643
1642
1644 def _matchstatus(self, other, match):
1643 def _matchstatus(self, other, match):
1645 """override the match method with a filter for directory patterns
1644 """override the match method with a filter for directory patterns
1646
1645
1647 We use inheritance to customize the match.bad method only in cases of
1646 We use inheritance to customize the match.bad method only in cases of
1648 workingctx since it belongs only to the working directory when
1647 workingctx since it belongs only to the working directory when
1649 comparing against the parent changeset.
1648 comparing against the parent changeset.
1650
1649
1651 If we aren't comparing against the working directory's parent, then we
1650 If we aren't comparing against the working directory's parent, then we
1652 just use the default match object sent to us.
1651 just use the default match object sent to us.
1653 """
1652 """
1654 superself = super(workingctx, self)
1653 superself = super(workingctx, self)
1655 match = superself._matchstatus(other, match)
1654 match = superself._matchstatus(other, match)
1656 if other != self._repo['.']:
1655 if other != self._repo['.']:
1657 def bad(f, msg):
1656 def bad(f, msg):
1658 # 'f' may be a directory pattern from 'match.files()',
1657 # 'f' may be a directory pattern from 'match.files()',
1659 # so 'f not in ctx1' is not enough
1658 # so 'f not in ctx1' is not enough
1660 if f not in other and not other.hasdir(f):
1659 if f not in other and not other.hasdir(f):
1661 self._repo.ui.warn('%s: %s\n' %
1660 self._repo.ui.warn('%s: %s\n' %
1662 (self._repo.dirstate.pathto(f), msg))
1661 (self._repo.dirstate.pathto(f), msg))
1663 match.bad = bad
1662 match.bad = bad
1664 return match
1663 return match
1665
1664
1666 class committablefilectx(basefilectx):
1665 class committablefilectx(basefilectx):
1667 """A committablefilectx provides common functionality for a file context
1666 """A committablefilectx provides common functionality for a file context
1668 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1669 def __init__(self, repo, path, filelog=None, ctx=None):
1668 def __init__(self, repo, path, filelog=None, ctx=None):
1670 self._repo = repo
1669 self._repo = repo
1671 self._path = path
1670 self._path = path
1672 self._changeid = None
1671 self._changeid = None
1673 self._filerev = self._filenode = None
1672 self._filerev = self._filenode = None
1674
1673
1675 if filelog is not None:
1674 if filelog is not None:
1676 self._filelog = filelog
1675 self._filelog = filelog
1677 if ctx:
1676 if ctx:
1678 self._changectx = ctx
1677 self._changectx = ctx
1679
1678
1680 def __nonzero__(self):
1679 def __nonzero__(self):
1681 return True
1680 return True
1682
1681
1683 def linkrev(self):
1682 def linkrev(self):
1684 # linked to self._changectx no matter if file is modified or not
1683 # linked to self._changectx no matter if file is modified or not
1685 return self.rev()
1684 return self.rev()
1686
1685
1687 def parents(self):
1686 def parents(self):
1688 '''return parent filectxs, following copies if necessary'''
1687 '''return parent filectxs, following copies if necessary'''
1689 def filenode(ctx, path):
1688 def filenode(ctx, path):
1690 return ctx._manifest.get(path, nullid)
1689 return ctx._manifest.get(path, nullid)
1691
1690
1692 path = self._path
1691 path = self._path
1693 fl = self._filelog
1692 fl = self._filelog
1694 pcl = self._changectx._parents
1693 pcl = self._changectx._parents
1695 renamed = self.renamed()
1694 renamed = self.renamed()
1696
1695
1697 if renamed:
1696 if renamed:
1698 pl = [renamed + (None,)]
1697 pl = [renamed + (None,)]
1699 else:
1698 else:
1700 pl = [(path, filenode(pcl[0], path), fl)]
1699 pl = [(path, filenode(pcl[0], path), fl)]
1701
1700
1702 for pc in pcl[1:]:
1701 for pc in pcl[1:]:
1703 pl.append((path, filenode(pc, path), fl))
1702 pl.append((path, filenode(pc, path), fl))
1704
1703
1705 return [self._parentfilectx(p, fileid=n, filelog=l)
1704 return [self._parentfilectx(p, fileid=n, filelog=l)
1706 for p, n, l in pl if n != nullid]
1705 for p, n, l in pl if n != nullid]
1707
1706
1708 def children(self):
1707 def children(self):
1709 return []
1708 return []
1710
1709
1711 class workingfilectx(committablefilectx):
1710 class workingfilectx(committablefilectx):
1712 """A workingfilectx object makes access to data related to a particular
1711 """A workingfilectx object makes access to data related to a particular
1713 file in the working directory convenient."""
1712 file in the working directory convenient."""
1714 def __init__(self, repo, path, filelog=None, workingctx=None):
1713 def __init__(self, repo, path, filelog=None, workingctx=None):
1715 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1714 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1716
1715
1717 @propertycache
1716 @propertycache
1718 def _changectx(self):
1717 def _changectx(self):
1719 return workingctx(self._repo)
1718 return workingctx(self._repo)
1720
1719
1721 def data(self):
1720 def data(self):
1722 return self._repo.wread(self._path)
1721 return self._repo.wread(self._path)
1723 def renamed(self):
1722 def renamed(self):
1724 rp = self._repo.dirstate.copied(self._path)
1723 rp = self._repo.dirstate.copied(self._path)
1725 if not rp:
1724 if not rp:
1726 return None
1725 return None
1727 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1726 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1728
1727
1729 def size(self):
1728 def size(self):
1730 return self._repo.wvfs.lstat(self._path).st_size
1729 return self._repo.wvfs.lstat(self._path).st_size
1731 def date(self):
1730 def date(self):
1732 t, tz = self._changectx.date()
1731 t, tz = self._changectx.date()
1733 try:
1732 try:
1734 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1733 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1735 except OSError as err:
1734 except OSError as err:
1736 if err.errno != errno.ENOENT:
1735 if err.errno != errno.ENOENT:
1737 raise
1736 raise
1738 return (t, tz)
1737 return (t, tz)
1739
1738
1740 def cmp(self, fctx):
1739 def cmp(self, fctx):
1741 """compare with other file context
1740 """compare with other file context
1742
1741
1743 returns True if different than fctx.
1742 returns True if different than fctx.
1744 """
1743 """
1745 # fctx should be a filectx (not a workingfilectx)
1744 # fctx should be a filectx (not a workingfilectx)
1746 # invert comparison to reuse the same code path
1745 # invert comparison to reuse the same code path
1747 return fctx.cmp(self)
1746 return fctx.cmp(self)
1748
1747
1749 def remove(self, ignoremissing=False):
1748 def remove(self, ignoremissing=False):
1750 """wraps unlink for a repo's working directory"""
1749 """wraps unlink for a repo's working directory"""
1751 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1750 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1752
1751
1753 def write(self, data, flags):
1752 def write(self, data, flags):
1754 """wraps repo.wwrite"""
1753 """wraps repo.wwrite"""
1755 self._repo.wwrite(self._path, data, flags)
1754 self._repo.wwrite(self._path, data, flags)
1756
1755
1757 class workingcommitctx(workingctx):
1756 class workingcommitctx(workingctx):
1758 """A workingcommitctx object makes access to data related to
1757 """A workingcommitctx object makes access to data related to
1759 the revision being committed convenient.
1758 the revision being committed convenient.
1760
1759
1761 This hides changes in the working directory, if they aren't
1760 This hides changes in the working directory, if they aren't
1762 committed in this context.
1761 committed in this context.
1763 """
1762 """
1764 def __init__(self, repo, changes,
1763 def __init__(self, repo, changes,
1765 text="", user=None, date=None, extra=None):
1764 text="", user=None, date=None, extra=None):
1766 super(workingctx, self).__init__(repo, text, user, date, extra,
1765 super(workingctx, self).__init__(repo, text, user, date, extra,
1767 changes)
1766 changes)
1768
1767
1769 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1768 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1770 unknown=False):
1769 unknown=False):
1771 """Return matched files only in ``self._status``
1770 """Return matched files only in ``self._status``
1772
1771
1773 Uncommitted files appear "clean" via this context, even if
1772 Uncommitted files appear "clean" via this context, even if
1774 they aren't actually so in the working directory.
1773 they aren't actually so in the working directory.
1775 """
1774 """
1776 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1775 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1777 if clean:
1776 if clean:
1778 clean = [f for f in self._manifest if f not in self._changedset]
1777 clean = [f for f in self._manifest if f not in self._changedset]
1779 else:
1778 else:
1780 clean = []
1779 clean = []
1781 return scmutil.status([f for f in self._status.modified if match(f)],
1780 return scmutil.status([f for f in self._status.modified if match(f)],
1782 [f for f in self._status.added if match(f)],
1781 [f for f in self._status.added if match(f)],
1783 [f for f in self._status.removed if match(f)],
1782 [f for f in self._status.removed if match(f)],
1784 [], [], [], clean)
1783 [], [], [], clean)
1785
1784
1786 @propertycache
1785 @propertycache
1787 def _changedset(self):
1786 def _changedset(self):
1788 """Return the set of files changed in this context
1787 """Return the set of files changed in this context
1789 """
1788 """
1790 changed = set(self._status.modified)
1789 changed = set(self._status.modified)
1791 changed.update(self._status.added)
1790 changed.update(self._status.added)
1792 changed.update(self._status.removed)
1791 changed.update(self._status.removed)
1793 return changed
1792 return changed
1794
1793
1795 def makecachingfilectxfn(func):
1794 def makecachingfilectxfn(func):
1796 """Create a filectxfn that caches based on the path.
1795 """Create a filectxfn that caches based on the path.
1797
1796
1798 We can't use util.cachefunc because it uses all arguments as the cache
1797 We can't use util.cachefunc because it uses all arguments as the cache
1799 key and this creates a cycle since the arguments include the repo and
1798 key and this creates a cycle since the arguments include the repo and
1800 memctx.
1799 memctx.
1801 """
1800 """
1802 cache = {}
1801 cache = {}
1803
1802
1804 def getfilectx(repo, memctx, path):
1803 def getfilectx(repo, memctx, path):
1805 if path not in cache:
1804 if path not in cache:
1806 cache[path] = func(repo, memctx, path)
1805 cache[path] = func(repo, memctx, path)
1807 return cache[path]
1806 return cache[path]
1808
1807
1809 return getfilectx
1808 return getfilectx
1810
1809
1811 class memctx(committablectx):
1810 class memctx(committablectx):
1812 """Use memctx to perform in-memory commits via localrepo.commitctx().
1811 """Use memctx to perform in-memory commits via localrepo.commitctx().
1813
1812
1814 Revision information is supplied at initialization time while
1813 Revision information is supplied at initialization time while
1815 related files data and is made available through a callback
1814 related files data and is made available through a callback
1816 mechanism. 'repo' is the current localrepo, 'parents' is a
1815 mechanism. 'repo' is the current localrepo, 'parents' is a
1817 sequence of two parent revisions identifiers (pass None for every
1816 sequence of two parent revisions identifiers (pass None for every
1818 missing parent), 'text' is the commit message and 'files' lists
1817 missing parent), 'text' is the commit message and 'files' lists
1819 names of files touched by the revision (normalized and relative to
1818 names of files touched by the revision (normalized and relative to
1820 repository root).
1819 repository root).
1821
1820
1822 filectxfn(repo, memctx, path) is a callable receiving the
1821 filectxfn(repo, memctx, path) is a callable receiving the
1823 repository, the current memctx object and the normalized path of
1822 repository, the current memctx object and the normalized path of
1824 requested file, relative to repository root. It is fired by the
1823 requested file, relative to repository root. It is fired by the
1825 commit function for every file in 'files', but calls order is
1824 commit function for every file in 'files', but calls order is
1826 undefined. If the file is available in the revision being
1825 undefined. If the file is available in the revision being
1827 committed (updated or added), filectxfn returns a memfilectx
1826 committed (updated or added), filectxfn returns a memfilectx
1828 object. If the file was removed, filectxfn raises an
1827 object. If the file was removed, filectxfn raises an
1829 IOError. Moved files are represented by marking the source file
1828 IOError. Moved files are represented by marking the source file
1830 removed and the new file added with copy information (see
1829 removed and the new file added with copy information (see
1831 memfilectx).
1830 memfilectx).
1832
1831
1833 user receives the committer name and defaults to current
1832 user receives the committer name and defaults to current
1834 repository username, date is the commit date in any format
1833 repository username, date is the commit date in any format
1835 supported by util.parsedate() and defaults to current date, extra
1834 supported by util.parsedate() and defaults to current date, extra
1836 is a dictionary of metadata or is left empty.
1835 is a dictionary of metadata or is left empty.
1837 """
1836 """
1838
1837
1839 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1838 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1840 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1839 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1841 # this field to determine what to do in filectxfn.
1840 # this field to determine what to do in filectxfn.
1842 _returnnoneformissingfiles = True
1841 _returnnoneformissingfiles = True
1843
1842
1844 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1843 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1845 date=None, extra=None, editor=False):
1844 date=None, extra=None, editor=False):
1846 super(memctx, self).__init__(repo, text, user, date, extra)
1845 super(memctx, self).__init__(repo, text, user, date, extra)
1847 self._rev = None
1846 self._rev = None
1848 self._node = None
1847 self._node = None
1849 parents = [(p or nullid) for p in parents]
1848 parents = [(p or nullid) for p in parents]
1850 p1, p2 = parents
1849 p1, p2 = parents
1851 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1850 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1852 files = sorted(set(files))
1851 files = sorted(set(files))
1853 self._files = files
1852 self._files = files
1854 self.substate = {}
1853 self.substate = {}
1855
1854
1856 # if store is not callable, wrap it in a function
1855 # if store is not callable, wrap it in a function
1857 if not callable(filectxfn):
1856 if not callable(filectxfn):
1858 def getfilectx(repo, memctx, path):
1857 def getfilectx(repo, memctx, path):
1859 fctx = filectxfn[path]
1858 fctx = filectxfn[path]
1860 # this is weird but apparently we only keep track of one parent
1859 # this is weird but apparently we only keep track of one parent
1861 # (why not only store that instead of a tuple?)
1860 # (why not only store that instead of a tuple?)
1862 copied = fctx.renamed()
1861 copied = fctx.renamed()
1863 if copied:
1862 if copied:
1864 copied = copied[0]
1863 copied = copied[0]
1865 return memfilectx(repo, path, fctx.data(),
1864 return memfilectx(repo, path, fctx.data(),
1866 islink=fctx.islink(), isexec=fctx.isexec(),
1865 islink=fctx.islink(), isexec=fctx.isexec(),
1867 copied=copied, memctx=memctx)
1866 copied=copied, memctx=memctx)
1868 self._filectxfn = getfilectx
1867 self._filectxfn = getfilectx
1869 else:
1868 else:
1870 # memoizing increases performance for e.g. vcs convert scenarios.
1869 # memoizing increases performance for e.g. vcs convert scenarios.
1871 self._filectxfn = makecachingfilectxfn(filectxfn)
1870 self._filectxfn = makecachingfilectxfn(filectxfn)
1872
1871
1873 if extra:
1872 if extra:
1874 self._extra = extra.copy()
1873 self._extra = extra.copy()
1875 else:
1874 else:
1876 self._extra = {}
1875 self._extra = {}
1877
1876
1878 if self._extra.get('branch', '') == '':
1877 if self._extra.get('branch', '') == '':
1879 self._extra['branch'] = 'default'
1878 self._extra['branch'] = 'default'
1880
1879
1881 if editor:
1880 if editor:
1882 self._text = editor(self._repo, self, [])
1881 self._text = editor(self._repo, self, [])
1883 self._repo.savecommitmessage(self._text)
1882 self._repo.savecommitmessage(self._text)
1884
1883
1885 def filectx(self, path, filelog=None):
1884 def filectx(self, path, filelog=None):
1886 """get a file context from the working directory
1885 """get a file context from the working directory
1887
1886
1888 Returns None if file doesn't exist and should be removed."""
1887 Returns None if file doesn't exist and should be removed."""
1889 return self._filectxfn(self._repo, self, path)
1888 return self._filectxfn(self._repo, self, path)
1890
1889
1891 def commit(self):
1890 def commit(self):
1892 """commit context to the repo"""
1891 """commit context to the repo"""
1893 return self._repo.commitctx(self)
1892 return self._repo.commitctx(self)
1894
1893
1895 @propertycache
1894 @propertycache
1896 def _manifest(self):
1895 def _manifest(self):
1897 """generate a manifest based on the return values of filectxfn"""
1896 """generate a manifest based on the return values of filectxfn"""
1898
1897
1899 # keep this simple for now; just worry about p1
1898 # keep this simple for now; just worry about p1
1900 pctx = self._parents[0]
1899 pctx = self._parents[0]
1901 man = pctx.manifest().copy()
1900 man = pctx.manifest().copy()
1902
1901
1903 for f in self._status.modified:
1902 for f in self._status.modified:
1904 p1node = nullid
1903 p1node = nullid
1905 p2node = nullid
1904 p2node = nullid
1906 p = pctx[f].parents() # if file isn't in pctx, check p2?
1905 p = pctx[f].parents() # if file isn't in pctx, check p2?
1907 if len(p) > 0:
1906 if len(p) > 0:
1908 p1node = p[0].filenode()
1907 p1node = p[0].filenode()
1909 if len(p) > 1:
1908 if len(p) > 1:
1910 p2node = p[1].filenode()
1909 p2node = p[1].filenode()
1911 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1910 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1912
1911
1913 for f in self._status.added:
1912 for f in self._status.added:
1914 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1913 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1915
1914
1916 for f in self._status.removed:
1915 for f in self._status.removed:
1917 if f in man:
1916 if f in man:
1918 del man[f]
1917 del man[f]
1919
1918
1920 return man
1919 return man
1921
1920
1922 @propertycache
1921 @propertycache
1923 def _status(self):
1922 def _status(self):
1924 """Calculate exact status from ``files`` specified at construction
1923 """Calculate exact status from ``files`` specified at construction
1925 """
1924 """
1926 man1 = self.p1().manifest()
1925 man1 = self.p1().manifest()
1927 p2 = self._parents[1]
1926 p2 = self._parents[1]
1928 # "1 < len(self._parents)" can't be used for checking
1927 # "1 < len(self._parents)" can't be used for checking
1929 # existence of the 2nd parent, because "memctx._parents" is
1928 # existence of the 2nd parent, because "memctx._parents" is
1930 # explicitly initialized by the list, of which length is 2.
1929 # explicitly initialized by the list, of which length is 2.
1931 if p2.node() != nullid:
1930 if p2.node() != nullid:
1932 man2 = p2.manifest()
1931 man2 = p2.manifest()
1933 managing = lambda f: f in man1 or f in man2
1932 managing = lambda f: f in man1 or f in man2
1934 else:
1933 else:
1935 managing = lambda f: f in man1
1934 managing = lambda f: f in man1
1936
1935
1937 modified, added, removed = [], [], []
1936 modified, added, removed = [], [], []
1938 for f in self._files:
1937 for f in self._files:
1939 if not managing(f):
1938 if not managing(f):
1940 added.append(f)
1939 added.append(f)
1941 elif self[f]:
1940 elif self[f]:
1942 modified.append(f)
1941 modified.append(f)
1943 else:
1942 else:
1944 removed.append(f)
1943 removed.append(f)
1945
1944
1946 return scmutil.status(modified, added, removed, [], [], [], [])
1945 return scmutil.status(modified, added, removed, [], [], [], [])
1947
1946
1948 class memfilectx(committablefilectx):
1947 class memfilectx(committablefilectx):
1949 """memfilectx represents an in-memory file to commit.
1948 """memfilectx represents an in-memory file to commit.
1950
1949
1951 See memctx and committablefilectx for more details.
1950 See memctx and committablefilectx for more details.
1952 """
1951 """
1953 def __init__(self, repo, path, data, islink=False,
1952 def __init__(self, repo, path, data, islink=False,
1954 isexec=False, copied=None, memctx=None):
1953 isexec=False, copied=None, memctx=None):
1955 """
1954 """
1956 path is the normalized file path relative to repository root.
1955 path is the normalized file path relative to repository root.
1957 data is the file content as a string.
1956 data is the file content as a string.
1958 islink is True if the file is a symbolic link.
1957 islink is True if the file is a symbolic link.
1959 isexec is True if the file is executable.
1958 isexec is True if the file is executable.
1960 copied is the source file path if current file was copied in the
1959 copied is the source file path if current file was copied in the
1961 revision being committed, or None."""
1960 revision being committed, or None."""
1962 super(memfilectx, self).__init__(repo, path, None, memctx)
1961 super(memfilectx, self).__init__(repo, path, None, memctx)
1963 self._data = data
1962 self._data = data
1964 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1963 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1965 self._copied = None
1964 self._copied = None
1966 if copied:
1965 if copied:
1967 self._copied = (copied, nullid)
1966 self._copied = (copied, nullid)
1968
1967
1969 def data(self):
1968 def data(self):
1970 return self._data
1969 return self._data
1971 def size(self):
1970 def size(self):
1972 return len(self.data())
1971 return len(self.data())
1973 def flags(self):
1972 def flags(self):
1974 return self._flags
1973 return self._flags
1975 def renamed(self):
1974 def renamed(self):
1976 return self._copied
1975 return self._copied
1977
1976
1978 def remove(self, ignoremissing=False):
1977 def remove(self, ignoremissing=False):
1979 """wraps unlink for a repo's working directory"""
1978 """wraps unlink for a repo's working directory"""
1980 # need to figure out what to do here
1979 # need to figure out what to do here
1981 del self._changectx[self._path]
1980 del self._changectx[self._path]
1982
1981
1983 def write(self, data, flags):
1982 def write(self, data, flags):
1984 """wraps repo.wwrite"""
1983 """wraps repo.wwrite"""
1985 self._data = data
1984 self._data = data
@@ -1,384 +1,383
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 base85,
16 base85,
17 bdiff,
17 bdiff,
18 error,
18 error,
19 mpatch,
19 mpatch,
20 util,
20 util,
21 )
21 )
22
22
23 def splitnewlines(text):
23 def splitnewlines(text):
24 '''like str.splitlines, but only split on newlines.'''
24 '''like str.splitlines, but only split on newlines.'''
25 lines = [l + '\n' for l in text.split('\n')]
25 lines = [l + '\n' for l in text.split('\n')]
26 if lines:
26 if lines:
27 if lines[-1] == '\n':
27 if lines[-1] == '\n':
28 lines.pop()
28 lines.pop()
29 else:
29 else:
30 lines[-1] = lines[-1][:-1]
30 lines[-1] = lines[-1][:-1]
31 return lines
31 return lines
32
32
33 class diffopts(object):
33 class diffopts(object):
34 '''context is the number of context lines
34 '''context is the number of context lines
35 text treats all files as text
35 text treats all files as text
36 showfunc enables diff -p output
36 showfunc enables diff -p output
37 git enables the git extended patch format
37 git enables the git extended patch format
38 nodates removes dates from diff headers
38 nodates removes dates from diff headers
39 nobinary ignores binary files
39 nobinary ignores binary files
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
40 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
41 ignorews ignores all whitespace changes in the diff
41 ignorews ignores all whitespace changes in the diff
42 ignorewsamount ignores changes in the amount of whitespace
42 ignorewsamount ignores changes in the amount of whitespace
43 ignoreblanklines ignores changes whose lines are all blank
43 ignoreblanklines ignores changes whose lines are all blank
44 upgrade generates git diffs to avoid data loss
44 upgrade generates git diffs to avoid data loss
45 '''
45 '''
46
46
47 defaults = {
47 defaults = {
48 'context': 3,
48 'context': 3,
49 'text': False,
49 'text': False,
50 'showfunc': False,
50 'showfunc': False,
51 'git': False,
51 'git': False,
52 'nodates': False,
52 'nodates': False,
53 'nobinary': False,
53 'nobinary': False,
54 'noprefix': False,
54 'noprefix': False,
55 'ignorews': False,
55 'ignorews': False,
56 'ignorewsamount': False,
56 'ignorewsamount': False,
57 'ignoreblanklines': False,
57 'ignoreblanklines': False,
58 'upgrade': False,
58 'upgrade': False,
59 }
59 }
60
60
61 def __init__(self, **opts):
61 def __init__(self, **opts):
62 for k in self.defaults.keys():
62 for k in self.defaults.keys():
63 v = opts.get(k)
63 v = opts.get(k)
64 if v is None:
64 if v is None:
65 v = self.defaults[k]
65 v = self.defaults[k]
66 setattr(self, k, v)
66 setattr(self, k, v)
67
67
68 try:
68 try:
69 self.context = int(self.context)
69 self.context = int(self.context)
70 except ValueError:
70 except ValueError:
71 raise error.Abort(_('diff context lines count must be '
71 raise error.Abort(_('diff context lines count must be '
72 'an integer, not %r') % self.context)
72 'an integer, not %r') % self.context)
73
73
74 def copy(self, **kwargs):
74 def copy(self, **kwargs):
75 opts = dict((k, getattr(self, k)) for k in self.defaults)
75 opts = dict((k, getattr(self, k)) for k in self.defaults)
76 opts.update(kwargs)
76 opts.update(kwargs)
77 return diffopts(**opts)
77 return diffopts(**opts)
78
78
79 defaultopts = diffopts()
79 defaultopts = diffopts()
80
80
81 def wsclean(opts, text, blank=True):
81 def wsclean(opts, text, blank=True):
82 if opts.ignorews:
82 if opts.ignorews:
83 text = bdiff.fixws(text, 1)
83 text = bdiff.fixws(text, 1)
84 elif opts.ignorewsamount:
84 elif opts.ignorewsamount:
85 text = bdiff.fixws(text, 0)
85 text = bdiff.fixws(text, 0)
86 if blank and opts.ignoreblanklines:
86 if blank and opts.ignoreblanklines:
87 text = re.sub('\n+', '\n', text).strip('\n')
87 text = re.sub('\n+', '\n', text).strip('\n')
88 return text
88 return text
89
89
90 def splitblock(base1, lines1, base2, lines2, opts):
90 def splitblock(base1, lines1, base2, lines2, opts):
91 # The input lines matches except for interwoven blank lines. We
91 # The input lines matches except for interwoven blank lines. We
92 # transform it into a sequence of matching blocks and blank blocks.
92 # transform it into a sequence of matching blocks and blank blocks.
93 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
93 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
94 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
94 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
95 s1, e1 = 0, len(lines1)
95 s1, e1 = 0, len(lines1)
96 s2, e2 = 0, len(lines2)
96 s2, e2 = 0, len(lines2)
97 while s1 < e1 or s2 < e2:
97 while s1 < e1 or s2 < e2:
98 i1, i2, btype = s1, s2, '='
98 i1, i2, btype = s1, s2, '='
99 if (i1 >= e1 or lines1[i1] == 0
99 if (i1 >= e1 or lines1[i1] == 0
100 or i2 >= e2 or lines2[i2] == 0):
100 or i2 >= e2 or lines2[i2] == 0):
101 # Consume the block of blank lines
101 # Consume the block of blank lines
102 btype = '~'
102 btype = '~'
103 while i1 < e1 and lines1[i1] == 0:
103 while i1 < e1 and lines1[i1] == 0:
104 i1 += 1
104 i1 += 1
105 while i2 < e2 and lines2[i2] == 0:
105 while i2 < e2 and lines2[i2] == 0:
106 i2 += 1
106 i2 += 1
107 else:
107 else:
108 # Consume the matching lines
108 # Consume the matching lines
109 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
109 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
110 i1 += 1
110 i1 += 1
111 i2 += 1
111 i2 += 1
112 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
112 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
113 s1 = i1
113 s1 = i1
114 s2 = i2
114 s2 = i2
115
115
116 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
116 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
117 """Return (block, type) tuples, where block is an mdiff.blocks
117 """Return (block, type) tuples, where block is an mdiff.blocks
118 line entry. type is '=' for blocks matching exactly one another
118 line entry. type is '=' for blocks matching exactly one another
119 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
119 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
120 matching only after having filtered blank lines. If refine is True,
120 matching only after having filtered blank lines.
121 then '~' blocks are refined and are only made of blank lines.
122 line1 and line2 are text1 and text2 split with splitnewlines() if
121 line1 and line2 are text1 and text2 split with splitnewlines() if
123 they are already available.
122 they are already available.
124 """
123 """
125 if opts is None:
124 if opts is None:
126 opts = defaultopts
125 opts = defaultopts
127 if opts.ignorews or opts.ignorewsamount:
126 if opts.ignorews or opts.ignorewsamount:
128 text1 = wsclean(opts, text1, False)
127 text1 = wsclean(opts, text1, False)
129 text2 = wsclean(opts, text2, False)
128 text2 = wsclean(opts, text2, False)
130 diff = bdiff.blocks(text1, text2)
129 diff = bdiff.blocks(text1, text2)
131 for i, s1 in enumerate(diff):
130 for i, s1 in enumerate(diff):
132 # The first match is special.
131 # The first match is special.
133 # we've either found a match starting at line 0 or a match later
132 # we've either found a match starting at line 0 or a match later
134 # in the file. If it starts later, old and new below will both be
133 # in the file. If it starts later, old and new below will both be
135 # empty and we'll continue to the next match.
134 # empty and we'll continue to the next match.
136 if i > 0:
135 if i > 0:
137 s = diff[i - 1]
136 s = diff[i - 1]
138 else:
137 else:
139 s = [0, 0, 0, 0]
138 s = [0, 0, 0, 0]
140 s = [s[1], s1[0], s[3], s1[2]]
139 s = [s[1], s1[0], s[3], s1[2]]
141
140
142 # bdiff sometimes gives huge matches past eof, this check eats them,
141 # bdiff sometimes gives huge matches past eof, this check eats them,
143 # and deals with the special first match case described above
142 # and deals with the special first match case described above
144 if s[0] != s[1] or s[2] != s[3]:
143 if s[0] != s[1] or s[2] != s[3]:
145 type = '!'
144 type = '!'
146 if opts.ignoreblanklines:
145 if opts.ignoreblanklines:
147 if lines1 is None:
146 if lines1 is None:
148 lines1 = splitnewlines(text1)
147 lines1 = splitnewlines(text1)
149 if lines2 is None:
148 if lines2 is None:
150 lines2 = splitnewlines(text2)
149 lines2 = splitnewlines(text2)
151 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
150 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
152 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
151 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
153 if old == new:
152 if old == new:
154 type = '~'
153 type = '~'
155 yield s, type
154 yield s, type
156 yield s1, '='
155 yield s1, '='
157
156
158 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
157 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
159 def datetag(date, fn=None):
158 def datetag(date, fn=None):
160 if not opts.git and not opts.nodates:
159 if not opts.git and not opts.nodates:
161 return '\t%s\n' % date
160 return '\t%s\n' % date
162 if fn and ' ' in fn:
161 if fn and ' ' in fn:
163 return '\t\n'
162 return '\t\n'
164 return '\n'
163 return '\n'
165
164
166 if not a and not b:
165 if not a and not b:
167 return ""
166 return ""
168
167
169 if opts.noprefix:
168 if opts.noprefix:
170 aprefix = bprefix = ''
169 aprefix = bprefix = ''
171 else:
170 else:
172 aprefix = 'a/'
171 aprefix = 'a/'
173 bprefix = 'b/'
172 bprefix = 'b/'
174
173
175 epoch = util.datestr((0, 0))
174 epoch = util.datestr((0, 0))
176
175
177 fn1 = util.pconvert(fn1)
176 fn1 = util.pconvert(fn1)
178 fn2 = util.pconvert(fn2)
177 fn2 = util.pconvert(fn2)
179
178
180 if not opts.text and (util.binary(a) or util.binary(b)):
179 if not opts.text and (util.binary(a) or util.binary(b)):
181 if a and b and len(a) == len(b) and a == b:
180 if a and b and len(a) == len(b) and a == b:
182 return ""
181 return ""
183 l = ['Binary file %s has changed\n' % fn1]
182 l = ['Binary file %s has changed\n' % fn1]
184 elif not a:
183 elif not a:
185 b = splitnewlines(b)
184 b = splitnewlines(b)
186 if a is None:
185 if a is None:
187 l1 = '--- /dev/null%s' % datetag(epoch)
186 l1 = '--- /dev/null%s' % datetag(epoch)
188 else:
187 else:
189 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
188 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
190 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
189 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
191 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
190 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
192 l = [l1, l2, l3] + ["+" + e for e in b]
191 l = [l1, l2, l3] + ["+" + e for e in b]
193 elif not b:
192 elif not b:
194 a = splitnewlines(a)
193 a = splitnewlines(a)
195 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
194 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
196 if b is None:
195 if b is None:
197 l2 = '+++ /dev/null%s' % datetag(epoch)
196 l2 = '+++ /dev/null%s' % datetag(epoch)
198 else:
197 else:
199 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
198 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
200 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
199 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
201 l = [l1, l2, l3] + ["-" + e for e in a]
200 l = [l1, l2, l3] + ["-" + e for e in a]
202 else:
201 else:
203 al = splitnewlines(a)
202 al = splitnewlines(a)
204 bl = splitnewlines(b)
203 bl = splitnewlines(b)
205 l = list(_unidiff(a, b, al, bl, opts=opts))
204 l = list(_unidiff(a, b, al, bl, opts=opts))
206 if not l:
205 if not l:
207 return ""
206 return ""
208
207
209 l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
208 l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
210 l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
209 l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
211
210
212 for ln in xrange(len(l)):
211 for ln in xrange(len(l)):
213 if l[ln][-1] != '\n':
212 if l[ln][-1] != '\n':
214 l[ln] += "\n\ No newline at end of file\n"
213 l[ln] += "\n\ No newline at end of file\n"
215
214
216 return "".join(l)
215 return "".join(l)
217
216
218 # creates a headerless unified diff
217 # creates a headerless unified diff
219 # t1 and t2 are the text to be diffed
218 # t1 and t2 are the text to be diffed
220 # l1 and l2 are the text broken up into lines
219 # l1 and l2 are the text broken up into lines
221 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
220 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
222 def contextend(l, len):
221 def contextend(l, len):
223 ret = l + opts.context
222 ret = l + opts.context
224 if ret > len:
223 if ret > len:
225 ret = len
224 ret = len
226 return ret
225 return ret
227
226
228 def contextstart(l):
227 def contextstart(l):
229 ret = l - opts.context
228 ret = l - opts.context
230 if ret < 0:
229 if ret < 0:
231 return 0
230 return 0
232 return ret
231 return ret
233
232
234 lastfunc = [0, '']
233 lastfunc = [0, '']
235 def yieldhunk(hunk):
234 def yieldhunk(hunk):
236 (astart, a2, bstart, b2, delta) = hunk
235 (astart, a2, bstart, b2, delta) = hunk
237 aend = contextend(a2, len(l1))
236 aend = contextend(a2, len(l1))
238 alen = aend - astart
237 alen = aend - astart
239 blen = b2 - bstart + aend - a2
238 blen = b2 - bstart + aend - a2
240
239
241 func = ""
240 func = ""
242 if opts.showfunc:
241 if opts.showfunc:
243 lastpos, func = lastfunc
242 lastpos, func = lastfunc
244 # walk backwards from the start of the context up to the start of
243 # walk backwards from the start of the context up to the start of
245 # the previous hunk context until we find a line starting with an
244 # the previous hunk context until we find a line starting with an
246 # alphanumeric char.
245 # alphanumeric char.
247 for i in xrange(astart - 1, lastpos - 1, -1):
246 for i in xrange(astart - 1, lastpos - 1, -1):
248 if l1[i][0].isalnum():
247 if l1[i][0].isalnum():
249 func = ' ' + l1[i].rstrip()[:40]
248 func = ' ' + l1[i].rstrip()[:40]
250 lastfunc[1] = func
249 lastfunc[1] = func
251 break
250 break
252 # by recording this hunk's starting point as the next place to
251 # by recording this hunk's starting point as the next place to
253 # start looking for function lines, we avoid reading any line in
252 # start looking for function lines, we avoid reading any line in
254 # the file more than once.
253 # the file more than once.
255 lastfunc[0] = astart
254 lastfunc[0] = astart
256
255
257 # zero-length hunk ranges report their start line as one less
256 # zero-length hunk ranges report their start line as one less
258 if alen:
257 if alen:
259 astart += 1
258 astart += 1
260 if blen:
259 if blen:
261 bstart += 1
260 bstart += 1
262
261
263 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
262 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
264 bstart, blen, func)
263 bstart, blen, func)
265 for x in delta:
264 for x in delta:
266 yield x
265 yield x
267 for x in xrange(a2, aend):
266 for x in xrange(a2, aend):
268 yield ' ' + l1[x]
267 yield ' ' + l1[x]
269
268
270 # bdiff.blocks gives us the matching sequences in the files. The loop
269 # bdiff.blocks gives us the matching sequences in the files. The loop
271 # below finds the spaces between those matching sequences and translates
270 # below finds the spaces between those matching sequences and translates
272 # them into diff output.
271 # them into diff output.
273 #
272 #
274 hunk = None
273 hunk = None
275 ignoredlines = 0
274 ignoredlines = 0
276 for s, stype in allblocks(t1, t2, opts, l1, l2):
275 for s, stype in allblocks(t1, t2, opts, l1, l2):
277 a1, a2, b1, b2 = s
276 a1, a2, b1, b2 = s
278 if stype != '!':
277 if stype != '!':
279 if stype == '~':
278 if stype == '~':
280 # The diff context lines are based on t1 content. When
279 # The diff context lines are based on t1 content. When
281 # blank lines are ignored, the new lines offsets must
280 # blank lines are ignored, the new lines offsets must
282 # be adjusted as if equivalent blocks ('~') had the
281 # be adjusted as if equivalent blocks ('~') had the
283 # same sizes on both sides.
282 # same sizes on both sides.
284 ignoredlines += (b2 - b1) - (a2 - a1)
283 ignoredlines += (b2 - b1) - (a2 - a1)
285 continue
284 continue
286 delta = []
285 delta = []
287 old = l1[a1:a2]
286 old = l1[a1:a2]
288 new = l2[b1:b2]
287 new = l2[b1:b2]
289
288
290 b1 -= ignoredlines
289 b1 -= ignoredlines
291 b2 -= ignoredlines
290 b2 -= ignoredlines
292 astart = contextstart(a1)
291 astart = contextstart(a1)
293 bstart = contextstart(b1)
292 bstart = contextstart(b1)
294 prev = None
293 prev = None
295 if hunk:
294 if hunk:
296 # join with the previous hunk if it falls inside the context
295 # join with the previous hunk if it falls inside the context
297 if astart < hunk[1] + opts.context + 1:
296 if astart < hunk[1] + opts.context + 1:
298 prev = hunk
297 prev = hunk
299 astart = hunk[1]
298 astart = hunk[1]
300 bstart = hunk[3]
299 bstart = hunk[3]
301 else:
300 else:
302 for x in yieldhunk(hunk):
301 for x in yieldhunk(hunk):
303 yield x
302 yield x
304 if prev:
303 if prev:
305 # we've joined the previous hunk, record the new ending points.
304 # we've joined the previous hunk, record the new ending points.
306 hunk[1] = a2
305 hunk[1] = a2
307 hunk[3] = b2
306 hunk[3] = b2
308 delta = hunk[4]
307 delta = hunk[4]
309 else:
308 else:
310 # create a new hunk
309 # create a new hunk
311 hunk = [astart, a2, bstart, b2, delta]
310 hunk = [astart, a2, bstart, b2, delta]
312
311
313 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
312 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
314 delta[len(delta):] = ['-' + x for x in old]
313 delta[len(delta):] = ['-' + x for x in old]
315 delta[len(delta):] = ['+' + x for x in new]
314 delta[len(delta):] = ['+' + x for x in new]
316
315
317 if hunk:
316 if hunk:
318 for x in yieldhunk(hunk):
317 for x in yieldhunk(hunk):
319 yield x
318 yield x
320
319
321 def b85diff(to, tn):
320 def b85diff(to, tn):
322 '''print base85-encoded binary diff'''
321 '''print base85-encoded binary diff'''
323 def fmtline(line):
322 def fmtline(line):
324 l = len(line)
323 l = len(line)
325 if l <= 26:
324 if l <= 26:
326 l = chr(ord('A') + l - 1)
325 l = chr(ord('A') + l - 1)
327 else:
326 else:
328 l = chr(l - 26 + ord('a') - 1)
327 l = chr(l - 26 + ord('a') - 1)
329 return '%c%s\n' % (l, base85.b85encode(line, True))
328 return '%c%s\n' % (l, base85.b85encode(line, True))
330
329
331 def chunk(text, csize=52):
330 def chunk(text, csize=52):
332 l = len(text)
331 l = len(text)
333 i = 0
332 i = 0
334 while i < l:
333 while i < l:
335 yield text[i:i + csize]
334 yield text[i:i + csize]
336 i += csize
335 i += csize
337
336
338 if to is None:
337 if to is None:
339 to = ''
338 to = ''
340 if tn is None:
339 if tn is None:
341 tn = ''
340 tn = ''
342
341
343 if to == tn:
342 if to == tn:
344 return ''
343 return ''
345
344
346 # TODO: deltas
345 # TODO: deltas
347 ret = []
346 ret = []
348 ret.append('GIT binary patch\n')
347 ret.append('GIT binary patch\n')
349 ret.append('literal %s\n' % len(tn))
348 ret.append('literal %s\n' % len(tn))
350 for l in chunk(zlib.compress(tn)):
349 for l in chunk(zlib.compress(tn)):
351 ret.append(fmtline(l))
350 ret.append(fmtline(l))
352 ret.append('\n')
351 ret.append('\n')
353
352
354 return ''.join(ret)
353 return ''.join(ret)
355
354
356 def patchtext(bin):
355 def patchtext(bin):
357 pos = 0
356 pos = 0
358 t = []
357 t = []
359 while pos < len(bin):
358 while pos < len(bin):
360 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
359 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
361 pos += 12
360 pos += 12
362 t.append(bin[pos:pos + l])
361 t.append(bin[pos:pos + l])
363 pos += l
362 pos += l
364 return "".join(t)
363 return "".join(t)
365
364
366 def patch(a, bin):
365 def patch(a, bin):
367 if len(a) == 0:
366 if len(a) == 0:
368 # skip over trivial delta header
367 # skip over trivial delta header
369 return util.buffer(bin, 12)
368 return util.buffer(bin, 12)
370 return mpatch.patches(a, [bin])
369 return mpatch.patches(a, [bin])
371
370
372 # similar to difflib.SequenceMatcher.get_matching_blocks
371 # similar to difflib.SequenceMatcher.get_matching_blocks
373 def get_matching_blocks(a, b):
372 def get_matching_blocks(a, b):
374 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
373 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
375
374
376 def trivialdiffheader(length):
375 def trivialdiffheader(length):
377 return struct.pack(">lll", 0, 0, length) if length else ''
376 return struct.pack(">lll", 0, 0, length) if length else ''
378
377
379 def replacediffheader(oldlen, newlen):
378 def replacediffheader(oldlen, newlen):
380 return struct.pack(">lll", 0, oldlen, newlen)
379 return struct.pack(">lll", 0, oldlen, newlen)
381
380
382 patches = mpatch.patches
381 patches = mpatch.patches
383 patchedsize = mpatch.patchedsize
382 patchedsize = mpatch.patchedsize
384 textdiff = bdiff.bdiff
383 textdiff = bdiff.bdiff
General Comments 0
You need to be logged in to leave comments. Login now