##// END OF EJS Templates
patch: replace "prefix" and "relroot" arguments by "pathfn" (API)...
Martin von Zweigbergk -
r41795:d4c9eebd default
parent child Browse files
Show More
@@ -1,2486 +1,2485 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, pathfn=None, copy=None,
298 copysourcematch=None, hunksfilterfn=None):
298 copysourcematch=None, hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
306 relroot=relroot, copy=copy,
306 copy=copy, copysourcematch=copysourcematch,
307 copysourcematch=copysourcematch,
308 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
309
308
310 def dirs(self):
309 def dirs(self):
311 return self._manifest.dirs()
310 return self._manifest.dirs()
312
311
313 def hasdir(self, dir):
312 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
315
314
316 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
319 directory.
318 directory.
320
319
321 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
322
321
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
323 """
325
324
326 ctx1 = self
325 ctx1 = self
327 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
328
327
329 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
331 # with its first parent.
333 #
332 #
334 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
335 #
334 #
336 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
337 #
336 #
338 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
341 reversed = False
340 reversed = False
342 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
344 reversed = True
343 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
346
345
347 match = self._repo.narrowmatch(match)
346 match = self._repo.narrowmatch(match)
348 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
350 listunknown)
352
351
353 if reversed:
352 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
354 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
356 r.clean)
358
357
359 if listsubrepos:
358 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
360 try:
362 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
363 except KeyError:
362 except KeyError:
364 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
366 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
370 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
373
375 for l in r:
374 for l in r:
376 l.sort()
375 l.sort()
377
376
378 return r
377 return r
379
378
380 class changectx(basectx):
379 class changectx(basectx):
381 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
382 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
383 the repo."""
382 the repo."""
384 def __init__(self, repo, rev, node):
383 def __init__(self, repo, rev, node):
385 super(changectx, self).__init__(repo)
384 super(changectx, self).__init__(repo)
386 self._rev = rev
385 self._rev = rev
387 self._node = node
386 self._node = node
388
387
389 def __hash__(self):
388 def __hash__(self):
390 try:
389 try:
391 return hash(self._rev)
390 return hash(self._rev)
392 except AttributeError:
391 except AttributeError:
393 return id(self)
392 return id(self)
394
393
395 def __nonzero__(self):
394 def __nonzero__(self):
396 return self._rev != nullrev
395 return self._rev != nullrev
397
396
398 __bool__ = __nonzero__
397 __bool__ = __nonzero__
399
398
400 @propertycache
399 @propertycache
401 def _changeset(self):
400 def _changeset(self):
402 return self._repo.changelog.changelogrevision(self.rev())
401 return self._repo.changelog.changelogrevision(self.rev())
403
402
404 @propertycache
403 @propertycache
405 def _manifest(self):
404 def _manifest(self):
406 return self._manifestctx.read()
405 return self._manifestctx.read()
407
406
408 @property
407 @property
409 def _manifestctx(self):
408 def _manifestctx(self):
410 return self._repo.manifestlog[self._changeset.manifest]
409 return self._repo.manifestlog[self._changeset.manifest]
411
410
412 @propertycache
411 @propertycache
413 def _manifestdelta(self):
412 def _manifestdelta(self):
414 return self._manifestctx.readdelta()
413 return self._manifestctx.readdelta()
415
414
416 @propertycache
415 @propertycache
417 def _parents(self):
416 def _parents(self):
418 repo = self._repo
417 repo = self._repo
419 p1, p2 = repo.changelog.parentrevs(self._rev)
418 p1, p2 = repo.changelog.parentrevs(self._rev)
420 if p2 == nullrev:
419 if p2 == nullrev:
421 return [repo[p1]]
420 return [repo[p1]]
422 return [repo[p1], repo[p2]]
421 return [repo[p1], repo[p2]]
423
422
424 def changeset(self):
423 def changeset(self):
425 c = self._changeset
424 c = self._changeset
426 return (
425 return (
427 c.manifest,
426 c.manifest,
428 c.user,
427 c.user,
429 c.date,
428 c.date,
430 c.files,
429 c.files,
431 c.description,
430 c.description,
432 c.extra,
431 c.extra,
433 )
432 )
434 def manifestnode(self):
433 def manifestnode(self):
435 return self._changeset.manifest
434 return self._changeset.manifest
436
435
437 def user(self):
436 def user(self):
438 return self._changeset.user
437 return self._changeset.user
439 def date(self):
438 def date(self):
440 return self._changeset.date
439 return self._changeset.date
441 def files(self):
440 def files(self):
442 return self._changeset.files
441 return self._changeset.files
443 def description(self):
442 def description(self):
444 return self._changeset.description
443 return self._changeset.description
445 def branch(self):
444 def branch(self):
446 return encoding.tolocal(self._changeset.extra.get("branch"))
445 return encoding.tolocal(self._changeset.extra.get("branch"))
447 def closesbranch(self):
446 def closesbranch(self):
448 return 'close' in self._changeset.extra
447 return 'close' in self._changeset.extra
449 def extra(self):
448 def extra(self):
450 """Return a dict of extra information."""
449 """Return a dict of extra information."""
451 return self._changeset.extra
450 return self._changeset.extra
452 def tags(self):
451 def tags(self):
453 """Return a list of byte tag names"""
452 """Return a list of byte tag names"""
454 return self._repo.nodetags(self._node)
453 return self._repo.nodetags(self._node)
455 def bookmarks(self):
454 def bookmarks(self):
456 """Return a list of byte bookmark names."""
455 """Return a list of byte bookmark names."""
457 return self._repo.nodebookmarks(self._node)
456 return self._repo.nodebookmarks(self._node)
458 def phase(self):
457 def phase(self):
459 return self._repo._phasecache.phase(self._repo, self._rev)
458 return self._repo._phasecache.phase(self._repo, self._rev)
460 def hidden(self):
459 def hidden(self):
461 return self._rev in repoview.filterrevs(self._repo, 'visible')
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
462
461
463 def isinmemory(self):
462 def isinmemory(self):
464 return False
463 return False
465
464
466 def children(self):
465 def children(self):
467 """return list of changectx contexts for each child changeset.
466 """return list of changectx contexts for each child changeset.
468
467
469 This returns only the immediate child changesets. Use descendants() to
468 This returns only the immediate child changesets. Use descendants() to
470 recursively walk children.
469 recursively walk children.
471 """
470 """
472 c = self._repo.changelog.children(self._node)
471 c = self._repo.changelog.children(self._node)
473 return [self._repo[x] for x in c]
472 return [self._repo[x] for x in c]
474
473
475 def ancestors(self):
474 def ancestors(self):
476 for a in self._repo.changelog.ancestors([self._rev]):
475 for a in self._repo.changelog.ancestors([self._rev]):
477 yield self._repo[a]
476 yield self._repo[a]
478
477
479 def descendants(self):
478 def descendants(self):
480 """Recursively yield all children of the changeset.
479 """Recursively yield all children of the changeset.
481
480
482 For just the immediate children, use children()
481 For just the immediate children, use children()
483 """
482 """
484 for d in self._repo.changelog.descendants([self._rev]):
483 for d in self._repo.changelog.descendants([self._rev]):
485 yield self._repo[d]
484 yield self._repo[d]
486
485
487 def filectx(self, path, fileid=None, filelog=None):
486 def filectx(self, path, fileid=None, filelog=None):
488 """get a file context from this changeset"""
487 """get a file context from this changeset"""
489 if fileid is None:
488 if fileid is None:
490 fileid = self.filenode(path)
489 fileid = self.filenode(path)
491 return filectx(self._repo, path, fileid=fileid,
490 return filectx(self._repo, path, fileid=fileid,
492 changectx=self, filelog=filelog)
491 changectx=self, filelog=filelog)
493
492
494 def ancestor(self, c2, warn=False):
493 def ancestor(self, c2, warn=False):
495 """return the "best" ancestor context of self and c2
494 """return the "best" ancestor context of self and c2
496
495
497 If there are multiple candidates, it will show a message and check
496 If there are multiple candidates, it will show a message and check
498 merge.preferancestor configuration before falling back to the
497 merge.preferancestor configuration before falling back to the
499 revlog ancestor."""
498 revlog ancestor."""
500 # deal with workingctxs
499 # deal with workingctxs
501 n2 = c2._node
500 n2 = c2._node
502 if n2 is None:
501 if n2 is None:
503 n2 = c2._parents[0]._node
502 n2 = c2._parents[0]._node
504 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
505 if not cahs:
504 if not cahs:
506 anc = nullid
505 anc = nullid
507 elif len(cahs) == 1:
506 elif len(cahs) == 1:
508 anc = cahs[0]
507 anc = cahs[0]
509 else:
508 else:
510 # experimental config: merge.preferancestor
509 # experimental config: merge.preferancestor
511 for r in self._repo.ui.configlist('merge', 'preferancestor'):
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
512 try:
511 try:
513 ctx = scmutil.revsymbol(self._repo, r)
512 ctx = scmutil.revsymbol(self._repo, r)
514 except error.RepoLookupError:
513 except error.RepoLookupError:
515 continue
514 continue
516 anc = ctx.node()
515 anc = ctx.node()
517 if anc in cahs:
516 if anc in cahs:
518 break
517 break
519 else:
518 else:
520 anc = self._repo.changelog.ancestor(self._node, n2)
519 anc = self._repo.changelog.ancestor(self._node, n2)
521 if warn:
520 if warn:
522 self._repo.ui.status(
521 self._repo.ui.status(
523 (_("note: using %s as ancestor of %s and %s\n") %
522 (_("note: using %s as ancestor of %s and %s\n") %
524 (short(anc), short(self._node), short(n2))) +
523 (short(anc), short(self._node), short(n2))) +
525 ''.join(_(" alternatively, use --config "
524 ''.join(_(" alternatively, use --config "
526 "merge.preferancestor=%s\n") %
525 "merge.preferancestor=%s\n") %
527 short(n) for n in sorted(cahs) if n != anc))
526 short(n) for n in sorted(cahs) if n != anc))
528 return self._repo[anc]
527 return self._repo[anc]
529
528
530 def isancestorof(self, other):
529 def isancestorof(self, other):
531 """True if this changeset is an ancestor of other"""
530 """True if this changeset is an ancestor of other"""
532 return self._repo.changelog.isancestorrev(self._rev, other._rev)
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
533
532
534 def walk(self, match):
533 def walk(self, match):
535 '''Generates matching file names.'''
534 '''Generates matching file names.'''
536
535
537 # Wrap match.bad method to have message with nodeid
536 # Wrap match.bad method to have message with nodeid
538 def bad(fn, msg):
537 def bad(fn, msg):
539 # The manifest doesn't know about subrepos, so don't complain about
538 # The manifest doesn't know about subrepos, so don't complain about
540 # paths into valid subrepos.
539 # paths into valid subrepos.
541 if any(fn == s or fn.startswith(s + '/')
540 if any(fn == s or fn.startswith(s + '/')
542 for s in self.substate):
541 for s in self.substate):
543 return
542 return
544 match.bad(fn, _('no such file in rev %s') % self)
543 match.bad(fn, _('no such file in rev %s') % self)
545
544
546 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
547 return self._manifest.walk(m)
546 return self._manifest.walk(m)
548
547
549 def matches(self, match):
548 def matches(self, match):
550 return self.walk(match)
549 return self.walk(match)
551
550
552 class basefilectx(object):
551 class basefilectx(object):
553 """A filecontext object represents the common logic for its children:
552 """A filecontext object represents the common logic for its children:
554 filectx: read-only access to a filerevision that is already present
553 filectx: read-only access to a filerevision that is already present
555 in the repo,
554 in the repo,
556 workingfilectx: a filecontext that represents files from the working
555 workingfilectx: a filecontext that represents files from the working
557 directory,
556 directory,
558 memfilectx: a filecontext that represents files in-memory,
557 memfilectx: a filecontext that represents files in-memory,
559 """
558 """
560 @propertycache
559 @propertycache
561 def _filelog(self):
560 def _filelog(self):
562 return self._repo.file(self._path)
561 return self._repo.file(self._path)
563
562
564 @propertycache
563 @propertycache
565 def _changeid(self):
564 def _changeid(self):
566 if r'_changectx' in self.__dict__:
565 if r'_changectx' in self.__dict__:
567 return self._changectx.rev()
566 return self._changectx.rev()
568 elif r'_descendantrev' in self.__dict__:
567 elif r'_descendantrev' in self.__dict__:
569 # this file context was created from a revision with a known
568 # this file context was created from a revision with a known
570 # descendant, we can (lazily) correct for linkrev aliases
569 # descendant, we can (lazily) correct for linkrev aliases
571 return self._adjustlinkrev(self._descendantrev)
570 return self._adjustlinkrev(self._descendantrev)
572 else:
571 else:
573 return self._filelog.linkrev(self._filerev)
572 return self._filelog.linkrev(self._filerev)
574
573
575 @propertycache
574 @propertycache
576 def _filenode(self):
575 def _filenode(self):
577 if r'_fileid' in self.__dict__:
576 if r'_fileid' in self.__dict__:
578 return self._filelog.lookup(self._fileid)
577 return self._filelog.lookup(self._fileid)
579 else:
578 else:
580 return self._changectx.filenode(self._path)
579 return self._changectx.filenode(self._path)
581
580
582 @propertycache
581 @propertycache
583 def _filerev(self):
582 def _filerev(self):
584 return self._filelog.rev(self._filenode)
583 return self._filelog.rev(self._filenode)
585
584
586 @propertycache
585 @propertycache
587 def _repopath(self):
586 def _repopath(self):
588 return self._path
587 return self._path
589
588
590 def __nonzero__(self):
589 def __nonzero__(self):
591 try:
590 try:
592 self._filenode
591 self._filenode
593 return True
592 return True
594 except error.LookupError:
593 except error.LookupError:
595 # file is missing
594 # file is missing
596 return False
595 return False
597
596
598 __bool__ = __nonzero__
597 __bool__ = __nonzero__
599
598
600 def __bytes__(self):
599 def __bytes__(self):
601 try:
600 try:
602 return "%s@%s" % (self.path(), self._changectx)
601 return "%s@%s" % (self.path(), self._changectx)
603 except error.LookupError:
602 except error.LookupError:
604 return "%s@???" % self.path()
603 return "%s@???" % self.path()
605
604
606 __str__ = encoding.strmethod(__bytes__)
605 __str__ = encoding.strmethod(__bytes__)
607
606
608 def __repr__(self):
607 def __repr__(self):
609 return r"<%s %s>" % (type(self).__name__, str(self))
608 return r"<%s %s>" % (type(self).__name__, str(self))
610
609
611 def __hash__(self):
610 def __hash__(self):
612 try:
611 try:
613 return hash((self._path, self._filenode))
612 return hash((self._path, self._filenode))
614 except AttributeError:
613 except AttributeError:
615 return id(self)
614 return id(self)
616
615
617 def __eq__(self, other):
616 def __eq__(self, other):
618 try:
617 try:
619 return (type(self) == type(other) and self._path == other._path
618 return (type(self) == type(other) and self._path == other._path
620 and self._filenode == other._filenode)
619 and self._filenode == other._filenode)
621 except AttributeError:
620 except AttributeError:
622 return False
621 return False
623
622
624 def __ne__(self, other):
623 def __ne__(self, other):
625 return not (self == other)
624 return not (self == other)
626
625
627 def filerev(self):
626 def filerev(self):
628 return self._filerev
627 return self._filerev
629 def filenode(self):
628 def filenode(self):
630 return self._filenode
629 return self._filenode
631 @propertycache
630 @propertycache
632 def _flags(self):
631 def _flags(self):
633 return self._changectx.flags(self._path)
632 return self._changectx.flags(self._path)
634 def flags(self):
633 def flags(self):
635 return self._flags
634 return self._flags
636 def filelog(self):
635 def filelog(self):
637 return self._filelog
636 return self._filelog
638 def rev(self):
637 def rev(self):
639 return self._changeid
638 return self._changeid
640 def linkrev(self):
639 def linkrev(self):
641 return self._filelog.linkrev(self._filerev)
640 return self._filelog.linkrev(self._filerev)
642 def node(self):
641 def node(self):
643 return self._changectx.node()
642 return self._changectx.node()
644 def hex(self):
643 def hex(self):
645 return self._changectx.hex()
644 return self._changectx.hex()
646 def user(self):
645 def user(self):
647 return self._changectx.user()
646 return self._changectx.user()
648 def date(self):
647 def date(self):
649 return self._changectx.date()
648 return self._changectx.date()
650 def files(self):
649 def files(self):
651 return self._changectx.files()
650 return self._changectx.files()
652 def description(self):
651 def description(self):
653 return self._changectx.description()
652 return self._changectx.description()
654 def branch(self):
653 def branch(self):
655 return self._changectx.branch()
654 return self._changectx.branch()
656 def extra(self):
655 def extra(self):
657 return self._changectx.extra()
656 return self._changectx.extra()
658 def phase(self):
657 def phase(self):
659 return self._changectx.phase()
658 return self._changectx.phase()
660 def phasestr(self):
659 def phasestr(self):
661 return self._changectx.phasestr()
660 return self._changectx.phasestr()
662 def obsolete(self):
661 def obsolete(self):
663 return self._changectx.obsolete()
662 return self._changectx.obsolete()
664 def instabilities(self):
663 def instabilities(self):
665 return self._changectx.instabilities()
664 return self._changectx.instabilities()
666 def manifest(self):
665 def manifest(self):
667 return self._changectx.manifest()
666 return self._changectx.manifest()
668 def changectx(self):
667 def changectx(self):
669 return self._changectx
668 return self._changectx
670 def renamed(self):
669 def renamed(self):
671 return self._copied
670 return self._copied
672 def repo(self):
671 def repo(self):
673 return self._repo
672 return self._repo
674 def size(self):
673 def size(self):
675 return len(self.data())
674 return len(self.data())
676
675
677 def path(self):
676 def path(self):
678 return self._path
677 return self._path
679
678
680 def isbinary(self):
679 def isbinary(self):
681 try:
680 try:
682 return stringutil.binary(self.data())
681 return stringutil.binary(self.data())
683 except IOError:
682 except IOError:
684 return False
683 return False
685 def isexec(self):
684 def isexec(self):
686 return 'x' in self.flags()
685 return 'x' in self.flags()
687 def islink(self):
686 def islink(self):
688 return 'l' in self.flags()
687 return 'l' in self.flags()
689
688
690 def isabsent(self):
689 def isabsent(self):
691 """whether this filectx represents a file not in self._changectx
690 """whether this filectx represents a file not in self._changectx
692
691
693 This is mainly for merge code to detect change/delete conflicts. This is
692 This is mainly for merge code to detect change/delete conflicts. This is
694 expected to be True for all subclasses of basectx."""
693 expected to be True for all subclasses of basectx."""
695 return False
694 return False
696
695
697 _customcmp = False
696 _customcmp = False
698 def cmp(self, fctx):
697 def cmp(self, fctx):
699 """compare with other file context
698 """compare with other file context
700
699
701 returns True if different than fctx.
700 returns True if different than fctx.
702 """
701 """
703 if fctx._customcmp:
702 if fctx._customcmp:
704 return fctx.cmp(self)
703 return fctx.cmp(self)
705
704
706 if self._filenode is None:
705 if self._filenode is None:
707 raise error.ProgrammingError(
706 raise error.ProgrammingError(
708 'filectx.cmp() must be reimplemented if not backed by revlog')
707 'filectx.cmp() must be reimplemented if not backed by revlog')
709
708
710 if fctx._filenode is None:
709 if fctx._filenode is None:
711 if self._repo._encodefilterpats:
710 if self._repo._encodefilterpats:
712 # can't rely on size() because wdir content may be decoded
711 # can't rely on size() because wdir content may be decoded
713 return self._filelog.cmp(self._filenode, fctx.data())
712 return self._filelog.cmp(self._filenode, fctx.data())
714 if self.size() - 4 == fctx.size():
713 if self.size() - 4 == fctx.size():
715 # size() can match:
714 # size() can match:
716 # if file data starts with '\1\n', empty metadata block is
715 # if file data starts with '\1\n', empty metadata block is
717 # prepended, which adds 4 bytes to filelog.size().
716 # prepended, which adds 4 bytes to filelog.size().
718 return self._filelog.cmp(self._filenode, fctx.data())
717 return self._filelog.cmp(self._filenode, fctx.data())
719 if self.size() == fctx.size():
718 if self.size() == fctx.size():
720 # size() matches: need to compare content
719 # size() matches: need to compare content
721 return self._filelog.cmp(self._filenode, fctx.data())
720 return self._filelog.cmp(self._filenode, fctx.data())
722
721
723 # size() differs
722 # size() differs
724 return True
723 return True
725
724
726 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
725 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
727 """return the first ancestor of <srcrev> introducing <fnode>
726 """return the first ancestor of <srcrev> introducing <fnode>
728
727
729 If the linkrev of the file revision does not point to an ancestor of
728 If the linkrev of the file revision does not point to an ancestor of
730 srcrev, we'll walk down the ancestors until we find one introducing
729 srcrev, we'll walk down the ancestors until we find one introducing
731 this file revision.
730 this file revision.
732
731
733 :srcrev: the changeset revision we search ancestors from
732 :srcrev: the changeset revision we search ancestors from
734 :inclusive: if true, the src revision will also be checked
733 :inclusive: if true, the src revision will also be checked
735 :stoprev: an optional revision to stop the walk at. If no introduction
734 :stoprev: an optional revision to stop the walk at. If no introduction
736 of this file content could be found before this floor
735 of this file content could be found before this floor
737 revision, the function will returns "None" and stops its
736 revision, the function will returns "None" and stops its
738 iteration.
737 iteration.
739 """
738 """
740 repo = self._repo
739 repo = self._repo
741 cl = repo.unfiltered().changelog
740 cl = repo.unfiltered().changelog
742 mfl = repo.manifestlog
741 mfl = repo.manifestlog
743 # fetch the linkrev
742 # fetch the linkrev
744 lkr = self.linkrev()
743 lkr = self.linkrev()
745 if srcrev == lkr:
744 if srcrev == lkr:
746 return lkr
745 return lkr
747 # hack to reuse ancestor computation when searching for renames
746 # hack to reuse ancestor computation when searching for renames
748 memberanc = getattr(self, '_ancestrycontext', None)
747 memberanc = getattr(self, '_ancestrycontext', None)
749 iteranc = None
748 iteranc = None
750 if srcrev is None:
749 if srcrev is None:
751 # wctx case, used by workingfilectx during mergecopy
750 # wctx case, used by workingfilectx during mergecopy
752 revs = [p.rev() for p in self._repo[None].parents()]
751 revs = [p.rev() for p in self._repo[None].parents()]
753 inclusive = True # we skipped the real (revless) source
752 inclusive = True # we skipped the real (revless) source
754 else:
753 else:
755 revs = [srcrev]
754 revs = [srcrev]
756 if memberanc is None:
755 if memberanc is None:
757 memberanc = iteranc = cl.ancestors(revs, lkr,
756 memberanc = iteranc = cl.ancestors(revs, lkr,
758 inclusive=inclusive)
757 inclusive=inclusive)
759 # check if this linkrev is an ancestor of srcrev
758 # check if this linkrev is an ancestor of srcrev
760 if lkr not in memberanc:
759 if lkr not in memberanc:
761 if iteranc is None:
760 if iteranc is None:
762 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
761 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
763 fnode = self._filenode
762 fnode = self._filenode
764 path = self._path
763 path = self._path
765 for a in iteranc:
764 for a in iteranc:
766 if stoprev is not None and a < stoprev:
765 if stoprev is not None and a < stoprev:
767 return None
766 return None
768 ac = cl.read(a) # get changeset data (we avoid object creation)
767 ac = cl.read(a) # get changeset data (we avoid object creation)
769 if path in ac[3]: # checking the 'files' field.
768 if path in ac[3]: # checking the 'files' field.
770 # The file has been touched, check if the content is
769 # The file has been touched, check if the content is
771 # similar to the one we search for.
770 # similar to the one we search for.
772 if fnode == mfl[ac[0]].readfast().get(path):
771 if fnode == mfl[ac[0]].readfast().get(path):
773 return a
772 return a
774 # In theory, we should never get out of that loop without a result.
773 # In theory, we should never get out of that loop without a result.
775 # But if manifest uses a buggy file revision (not children of the
774 # But if manifest uses a buggy file revision (not children of the
776 # one it replaces) we could. Such a buggy situation will likely
775 # one it replaces) we could. Such a buggy situation will likely
777 # result is crash somewhere else at to some point.
776 # result is crash somewhere else at to some point.
778 return lkr
777 return lkr
779
778
780 def isintroducedafter(self, changelogrev):
779 def isintroducedafter(self, changelogrev):
781 """True if a filectx has been introduced after a given floor revision
780 """True if a filectx has been introduced after a given floor revision
782 """
781 """
783 if self.linkrev() >= changelogrev:
782 if self.linkrev() >= changelogrev:
784 return True
783 return True
785 introrev = self._introrev(stoprev=changelogrev)
784 introrev = self._introrev(stoprev=changelogrev)
786 if introrev is None:
785 if introrev is None:
787 return False
786 return False
788 return introrev >= changelogrev
787 return introrev >= changelogrev
789
788
790 def introrev(self):
789 def introrev(self):
791 """return the rev of the changeset which introduced this file revision
790 """return the rev of the changeset which introduced this file revision
792
791
793 This method is different from linkrev because it take into account the
792 This method is different from linkrev because it take into account the
794 changeset the filectx was created from. It ensures the returned
793 changeset the filectx was created from. It ensures the returned
795 revision is one of its ancestors. This prevents bugs from
794 revision is one of its ancestors. This prevents bugs from
796 'linkrev-shadowing' when a file revision is used by multiple
795 'linkrev-shadowing' when a file revision is used by multiple
797 changesets.
796 changesets.
798 """
797 """
799 return self._introrev()
798 return self._introrev()
800
799
801 def _introrev(self, stoprev=None):
800 def _introrev(self, stoprev=None):
802 """
801 """
803 Same as `introrev` but, with an extra argument to limit changelog
802 Same as `introrev` but, with an extra argument to limit changelog
804 iteration range in some internal usecase.
803 iteration range in some internal usecase.
805
804
806 If `stoprev` is set, the `introrev` will not be searched past that
805 If `stoprev` is set, the `introrev` will not be searched past that
807 `stoprev` revision and "None" might be returned. This is useful to
806 `stoprev` revision and "None" might be returned. This is useful to
808 limit the iteration range.
807 limit the iteration range.
809 """
808 """
810 toprev = None
809 toprev = None
811 attrs = vars(self)
810 attrs = vars(self)
812 if r'_changeid' in attrs:
811 if r'_changeid' in attrs:
813 # We have a cached value already
812 # We have a cached value already
814 toprev = self._changeid
813 toprev = self._changeid
815 elif r'_changectx' in attrs:
814 elif r'_changectx' in attrs:
816 # We know which changelog entry we are coming from
815 # We know which changelog entry we are coming from
817 toprev = self._changectx.rev()
816 toprev = self._changectx.rev()
818
817
819 if toprev is not None:
818 if toprev is not None:
820 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
819 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
821 elif r'_descendantrev' in attrs:
820 elif r'_descendantrev' in attrs:
822 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
821 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
823 # be nice and cache the result of the computation
822 # be nice and cache the result of the computation
824 if introrev is not None:
823 if introrev is not None:
825 self._changeid = introrev
824 self._changeid = introrev
826 return introrev
825 return introrev
827 else:
826 else:
828 return self.linkrev()
827 return self.linkrev()
829
828
830 def introfilectx(self):
829 def introfilectx(self):
831 """Return filectx having identical contents, but pointing to the
830 """Return filectx having identical contents, but pointing to the
832 changeset revision where this filectx was introduced"""
831 changeset revision where this filectx was introduced"""
833 introrev = self.introrev()
832 introrev = self.introrev()
834 if self.rev() == introrev:
833 if self.rev() == introrev:
835 return self
834 return self
836 return self.filectx(self.filenode(), changeid=introrev)
835 return self.filectx(self.filenode(), changeid=introrev)
837
836
838 def _parentfilectx(self, path, fileid, filelog):
837 def _parentfilectx(self, path, fileid, filelog):
839 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
838 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
840 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
839 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
841 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
840 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
842 # If self is associated with a changeset (probably explicitly
841 # If self is associated with a changeset (probably explicitly
843 # fed), ensure the created filectx is associated with a
842 # fed), ensure the created filectx is associated with a
844 # changeset that is an ancestor of self.changectx.
843 # changeset that is an ancestor of self.changectx.
845 # This lets us later use _adjustlinkrev to get a correct link.
844 # This lets us later use _adjustlinkrev to get a correct link.
846 fctx._descendantrev = self.rev()
845 fctx._descendantrev = self.rev()
847 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
848 elif r'_descendantrev' in vars(self):
847 elif r'_descendantrev' in vars(self):
849 # Otherwise propagate _descendantrev if we have one associated.
848 # Otherwise propagate _descendantrev if we have one associated.
850 fctx._descendantrev = self._descendantrev
849 fctx._descendantrev = self._descendantrev
851 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
850 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
852 return fctx
851 return fctx
853
852
854 def parents(self):
853 def parents(self):
855 _path = self._path
854 _path = self._path
856 fl = self._filelog
855 fl = self._filelog
857 parents = self._filelog.parents(self._filenode)
856 parents = self._filelog.parents(self._filenode)
858 pl = [(_path, node, fl) for node in parents if node != nullid]
857 pl = [(_path, node, fl) for node in parents if node != nullid]
859
858
860 r = fl.renamed(self._filenode)
859 r = fl.renamed(self._filenode)
861 if r:
860 if r:
862 # - In the simple rename case, both parent are nullid, pl is empty.
861 # - In the simple rename case, both parent are nullid, pl is empty.
863 # - In case of merge, only one of the parent is null id and should
862 # - In case of merge, only one of the parent is null id and should
864 # be replaced with the rename information. This parent is -always-
863 # be replaced with the rename information. This parent is -always-
865 # the first one.
864 # the first one.
866 #
865 #
867 # As null id have always been filtered out in the previous list
866 # As null id have always been filtered out in the previous list
868 # comprehension, inserting to 0 will always result in "replacing
867 # comprehension, inserting to 0 will always result in "replacing
869 # first nullid parent with rename information.
868 # first nullid parent with rename information.
870 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
869 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
871
870
872 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
871 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
873
872
874 def p1(self):
873 def p1(self):
875 return self.parents()[0]
874 return self.parents()[0]
876
875
877 def p2(self):
876 def p2(self):
878 p = self.parents()
877 p = self.parents()
879 if len(p) == 2:
878 if len(p) == 2:
880 return p[1]
879 return p[1]
881 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
880 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
882
881
883 def annotate(self, follow=False, skiprevs=None, diffopts=None):
882 def annotate(self, follow=False, skiprevs=None, diffopts=None):
884 """Returns a list of annotateline objects for each line in the file
883 """Returns a list of annotateline objects for each line in the file
885
884
886 - line.fctx is the filectx of the node where that line was last changed
885 - line.fctx is the filectx of the node where that line was last changed
887 - line.lineno is the line number at the first appearance in the managed
886 - line.lineno is the line number at the first appearance in the managed
888 file
887 file
889 - line.text is the data on that line (including newline character)
888 - line.text is the data on that line (including newline character)
890 """
889 """
891 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
890 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
892
891
893 def parents(f):
892 def parents(f):
894 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
893 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
895 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
894 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
896 # from the topmost introrev (= srcrev) down to p.linkrev() if it
895 # from the topmost introrev (= srcrev) down to p.linkrev() if it
897 # isn't an ancestor of the srcrev.
896 # isn't an ancestor of the srcrev.
898 f._changeid
897 f._changeid
899 pl = f.parents()
898 pl = f.parents()
900
899
901 # Don't return renamed parents if we aren't following.
900 # Don't return renamed parents if we aren't following.
902 if not follow:
901 if not follow:
903 pl = [p for p in pl if p.path() == f.path()]
902 pl = [p for p in pl if p.path() == f.path()]
904
903
905 # renamed filectx won't have a filelog yet, so set it
904 # renamed filectx won't have a filelog yet, so set it
906 # from the cache to save time
905 # from the cache to save time
907 for p in pl:
906 for p in pl:
908 if not r'_filelog' in p.__dict__:
907 if not r'_filelog' in p.__dict__:
909 p._filelog = getlog(p.path())
908 p._filelog = getlog(p.path())
910
909
911 return pl
910 return pl
912
911
913 # use linkrev to find the first changeset where self appeared
912 # use linkrev to find the first changeset where self appeared
914 base = self.introfilectx()
913 base = self.introfilectx()
915 if getattr(base, '_ancestrycontext', None) is None:
914 if getattr(base, '_ancestrycontext', None) is None:
916 cl = self._repo.changelog
915 cl = self._repo.changelog
917 if base.rev() is None:
916 if base.rev() is None:
918 # wctx is not inclusive, but works because _ancestrycontext
917 # wctx is not inclusive, but works because _ancestrycontext
919 # is used to test filelog revisions
918 # is used to test filelog revisions
920 ac = cl.ancestors([p.rev() for p in base.parents()],
919 ac = cl.ancestors([p.rev() for p in base.parents()],
921 inclusive=True)
920 inclusive=True)
922 else:
921 else:
923 ac = cl.ancestors([base.rev()], inclusive=True)
922 ac = cl.ancestors([base.rev()], inclusive=True)
924 base._ancestrycontext = ac
923 base._ancestrycontext = ac
925
924
926 return dagop.annotate(base, parents, skiprevs=skiprevs,
925 return dagop.annotate(base, parents, skiprevs=skiprevs,
927 diffopts=diffopts)
926 diffopts=diffopts)
928
927
929 def ancestors(self, followfirst=False):
928 def ancestors(self, followfirst=False):
930 visit = {}
929 visit = {}
931 c = self
930 c = self
932 if followfirst:
931 if followfirst:
933 cut = 1
932 cut = 1
934 else:
933 else:
935 cut = None
934 cut = None
936
935
937 while True:
936 while True:
938 for parent in c.parents()[:cut]:
937 for parent in c.parents()[:cut]:
939 visit[(parent.linkrev(), parent.filenode())] = parent
938 visit[(parent.linkrev(), parent.filenode())] = parent
940 if not visit:
939 if not visit:
941 break
940 break
942 c = visit.pop(max(visit))
941 c = visit.pop(max(visit))
943 yield c
942 yield c
944
943
945 def decodeddata(self):
944 def decodeddata(self):
946 """Returns `data()` after running repository decoding filters.
945 """Returns `data()` after running repository decoding filters.
947
946
948 This is often equivalent to how the data would be expressed on disk.
947 This is often equivalent to how the data would be expressed on disk.
949 """
948 """
950 return self._repo.wwritedata(self.path(), self.data())
949 return self._repo.wwritedata(self.path(), self.data())
951
950
952 class filectx(basefilectx):
951 class filectx(basefilectx):
953 """A filecontext object makes access to data related to a particular
952 """A filecontext object makes access to data related to a particular
954 filerevision convenient."""
953 filerevision convenient."""
955 def __init__(self, repo, path, changeid=None, fileid=None,
954 def __init__(self, repo, path, changeid=None, fileid=None,
956 filelog=None, changectx=None):
955 filelog=None, changectx=None):
957 """changeid must be a revision number, if specified.
956 """changeid must be a revision number, if specified.
958 fileid can be a file revision or node."""
957 fileid can be a file revision or node."""
959 self._repo = repo
958 self._repo = repo
960 self._path = path
959 self._path = path
961
960
962 assert (changeid is not None
961 assert (changeid is not None
963 or fileid is not None
962 or fileid is not None
964 or changectx is not None), \
963 or changectx is not None), \
965 ("bad args: changeid=%r, fileid=%r, changectx=%r"
964 ("bad args: changeid=%r, fileid=%r, changectx=%r"
966 % (changeid, fileid, changectx))
965 % (changeid, fileid, changectx))
967
966
968 if filelog is not None:
967 if filelog is not None:
969 self._filelog = filelog
968 self._filelog = filelog
970
969
971 if changeid is not None:
970 if changeid is not None:
972 self._changeid = changeid
971 self._changeid = changeid
973 if changectx is not None:
972 if changectx is not None:
974 self._changectx = changectx
973 self._changectx = changectx
975 if fileid is not None:
974 if fileid is not None:
976 self._fileid = fileid
975 self._fileid = fileid
977
976
978 @propertycache
977 @propertycache
979 def _changectx(self):
978 def _changectx(self):
980 try:
979 try:
981 return self._repo[self._changeid]
980 return self._repo[self._changeid]
982 except error.FilteredRepoLookupError:
981 except error.FilteredRepoLookupError:
983 # Linkrev may point to any revision in the repository. When the
982 # Linkrev may point to any revision in the repository. When the
984 # repository is filtered this may lead to `filectx` trying to build
983 # repository is filtered this may lead to `filectx` trying to build
985 # `changectx` for filtered revision. In such case we fallback to
984 # `changectx` for filtered revision. In such case we fallback to
986 # creating `changectx` on the unfiltered version of the reposition.
985 # creating `changectx` on the unfiltered version of the reposition.
987 # This fallback should not be an issue because `changectx` from
986 # This fallback should not be an issue because `changectx` from
988 # `filectx` are not used in complex operations that care about
987 # `filectx` are not used in complex operations that care about
989 # filtering.
988 # filtering.
990 #
989 #
991 # This fallback is a cheap and dirty fix that prevent several
990 # This fallback is a cheap and dirty fix that prevent several
992 # crashes. It does not ensure the behavior is correct. However the
991 # crashes. It does not ensure the behavior is correct. However the
993 # behavior was not correct before filtering either and "incorrect
992 # behavior was not correct before filtering either and "incorrect
994 # behavior" is seen as better as "crash"
993 # behavior" is seen as better as "crash"
995 #
994 #
996 # Linkrevs have several serious troubles with filtering that are
995 # Linkrevs have several serious troubles with filtering that are
997 # complicated to solve. Proper handling of the issue here should be
996 # complicated to solve. Proper handling of the issue here should be
998 # considered when solving linkrev issue are on the table.
997 # considered when solving linkrev issue are on the table.
999 return self._repo.unfiltered()[self._changeid]
998 return self._repo.unfiltered()[self._changeid]
1000
999
1001 def filectx(self, fileid, changeid=None):
1000 def filectx(self, fileid, changeid=None):
1002 '''opens an arbitrary revision of the file without
1001 '''opens an arbitrary revision of the file without
1003 opening a new filelog'''
1002 opening a new filelog'''
1004 return filectx(self._repo, self._path, fileid=fileid,
1003 return filectx(self._repo, self._path, fileid=fileid,
1005 filelog=self._filelog, changeid=changeid)
1004 filelog=self._filelog, changeid=changeid)
1006
1005
1007 def rawdata(self):
1006 def rawdata(self):
1008 return self._filelog.revision(self._filenode, raw=True)
1007 return self._filelog.revision(self._filenode, raw=True)
1009
1008
1010 def rawflags(self):
1009 def rawflags(self):
1011 """low-level revlog flags"""
1010 """low-level revlog flags"""
1012 return self._filelog.flags(self._filerev)
1011 return self._filelog.flags(self._filerev)
1013
1012
1014 def data(self):
1013 def data(self):
1015 try:
1014 try:
1016 return self._filelog.read(self._filenode)
1015 return self._filelog.read(self._filenode)
1017 except error.CensoredNodeError:
1016 except error.CensoredNodeError:
1018 if self._repo.ui.config("censor", "policy") == "ignore":
1017 if self._repo.ui.config("censor", "policy") == "ignore":
1019 return ""
1018 return ""
1020 raise error.Abort(_("censored node: %s") % short(self._filenode),
1019 raise error.Abort(_("censored node: %s") % short(self._filenode),
1021 hint=_("set censor.policy to ignore errors"))
1020 hint=_("set censor.policy to ignore errors"))
1022
1021
1023 def size(self):
1022 def size(self):
1024 return self._filelog.size(self._filerev)
1023 return self._filelog.size(self._filerev)
1025
1024
1026 @propertycache
1025 @propertycache
1027 def _copied(self):
1026 def _copied(self):
1028 """check if file was actually renamed in this changeset revision
1027 """check if file was actually renamed in this changeset revision
1029
1028
1030 If rename logged in file revision, we report copy for changeset only
1029 If rename logged in file revision, we report copy for changeset only
1031 if file revisions linkrev points back to the changeset in question
1030 if file revisions linkrev points back to the changeset in question
1032 or both changeset parents contain different file revisions.
1031 or both changeset parents contain different file revisions.
1033 """
1032 """
1034
1033
1035 renamed = self._filelog.renamed(self._filenode)
1034 renamed = self._filelog.renamed(self._filenode)
1036 if not renamed:
1035 if not renamed:
1037 return None
1036 return None
1038
1037
1039 if self.rev() == self.linkrev():
1038 if self.rev() == self.linkrev():
1040 return renamed
1039 return renamed
1041
1040
1042 name = self.path()
1041 name = self.path()
1043 fnode = self._filenode
1042 fnode = self._filenode
1044 for p in self._changectx.parents():
1043 for p in self._changectx.parents():
1045 try:
1044 try:
1046 if fnode == p.filenode(name):
1045 if fnode == p.filenode(name):
1047 return None
1046 return None
1048 except error.LookupError:
1047 except error.LookupError:
1049 pass
1048 pass
1050 return renamed
1049 return renamed
1051
1050
1052 def children(self):
1051 def children(self):
1053 # hard for renames
1052 # hard for renames
1054 c = self._filelog.children(self._filenode)
1053 c = self._filelog.children(self._filenode)
1055 return [filectx(self._repo, self._path, fileid=x,
1054 return [filectx(self._repo, self._path, fileid=x,
1056 filelog=self._filelog) for x in c]
1055 filelog=self._filelog) for x in c]
1057
1056
1058 class committablectx(basectx):
1057 class committablectx(basectx):
1059 """A committablectx object provides common functionality for a context that
1058 """A committablectx object provides common functionality for a context that
1060 wants the ability to commit, e.g. workingctx or memctx."""
1059 wants the ability to commit, e.g. workingctx or memctx."""
1061 def __init__(self, repo, text="", user=None, date=None, extra=None,
1060 def __init__(self, repo, text="", user=None, date=None, extra=None,
1062 changes=None):
1061 changes=None):
1063 super(committablectx, self).__init__(repo)
1062 super(committablectx, self).__init__(repo)
1064 self._rev = None
1063 self._rev = None
1065 self._node = None
1064 self._node = None
1066 self._text = text
1065 self._text = text
1067 if date:
1066 if date:
1068 self._date = dateutil.parsedate(date)
1067 self._date = dateutil.parsedate(date)
1069 if user:
1068 if user:
1070 self._user = user
1069 self._user = user
1071 if changes:
1070 if changes:
1072 self._status = changes
1071 self._status = changes
1073
1072
1074 self._extra = {}
1073 self._extra = {}
1075 if extra:
1074 if extra:
1076 self._extra = extra.copy()
1075 self._extra = extra.copy()
1077 if 'branch' not in self._extra:
1076 if 'branch' not in self._extra:
1078 try:
1077 try:
1079 branch = encoding.fromlocal(self._repo.dirstate.branch())
1078 branch = encoding.fromlocal(self._repo.dirstate.branch())
1080 except UnicodeDecodeError:
1079 except UnicodeDecodeError:
1081 raise error.Abort(_('branch name not in UTF-8!'))
1080 raise error.Abort(_('branch name not in UTF-8!'))
1082 self._extra['branch'] = branch
1081 self._extra['branch'] = branch
1083 if self._extra['branch'] == '':
1082 if self._extra['branch'] == '':
1084 self._extra['branch'] = 'default'
1083 self._extra['branch'] = 'default'
1085
1084
1086 def __bytes__(self):
1085 def __bytes__(self):
1087 return bytes(self._parents[0]) + "+"
1086 return bytes(self._parents[0]) + "+"
1088
1087
1089 __str__ = encoding.strmethod(__bytes__)
1088 __str__ = encoding.strmethod(__bytes__)
1090
1089
1091 def __nonzero__(self):
1090 def __nonzero__(self):
1092 return True
1091 return True
1093
1092
1094 __bool__ = __nonzero__
1093 __bool__ = __nonzero__
1095
1094
1096 def _buildflagfunc(self):
1095 def _buildflagfunc(self):
1097 # Create a fallback function for getting file flags when the
1096 # Create a fallback function for getting file flags when the
1098 # filesystem doesn't support them
1097 # filesystem doesn't support them
1099
1098
1100 copiesget = self._repo.dirstate.copies().get
1099 copiesget = self._repo.dirstate.copies().get
1101 parents = self.parents()
1100 parents = self.parents()
1102 if len(parents) < 2:
1101 if len(parents) < 2:
1103 # when we have one parent, it's easy: copy from parent
1102 # when we have one parent, it's easy: copy from parent
1104 man = parents[0].manifest()
1103 man = parents[0].manifest()
1105 def func(f):
1104 def func(f):
1106 f = copiesget(f, f)
1105 f = copiesget(f, f)
1107 return man.flags(f)
1106 return man.flags(f)
1108 else:
1107 else:
1109 # merges are tricky: we try to reconstruct the unstored
1108 # merges are tricky: we try to reconstruct the unstored
1110 # result from the merge (issue1802)
1109 # result from the merge (issue1802)
1111 p1, p2 = parents
1110 p1, p2 = parents
1112 pa = p1.ancestor(p2)
1111 pa = p1.ancestor(p2)
1113 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1112 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1114
1113
1115 def func(f):
1114 def func(f):
1116 f = copiesget(f, f) # may be wrong for merges with copies
1115 f = copiesget(f, f) # may be wrong for merges with copies
1117 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1116 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1118 if fl1 == fl2:
1117 if fl1 == fl2:
1119 return fl1
1118 return fl1
1120 if fl1 == fla:
1119 if fl1 == fla:
1121 return fl2
1120 return fl2
1122 if fl2 == fla:
1121 if fl2 == fla:
1123 return fl1
1122 return fl1
1124 return '' # punt for conflicts
1123 return '' # punt for conflicts
1125
1124
1126 return func
1125 return func
1127
1126
1128 @propertycache
1127 @propertycache
1129 def _flagfunc(self):
1128 def _flagfunc(self):
1130 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1129 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1131
1130
1132 @propertycache
1131 @propertycache
1133 def _status(self):
1132 def _status(self):
1134 return self._repo.status()
1133 return self._repo.status()
1135
1134
1136 @propertycache
1135 @propertycache
1137 def _user(self):
1136 def _user(self):
1138 return self._repo.ui.username()
1137 return self._repo.ui.username()
1139
1138
1140 @propertycache
1139 @propertycache
1141 def _date(self):
1140 def _date(self):
1142 ui = self._repo.ui
1141 ui = self._repo.ui
1143 date = ui.configdate('devel', 'default-date')
1142 date = ui.configdate('devel', 'default-date')
1144 if date is None:
1143 if date is None:
1145 date = dateutil.makedate()
1144 date = dateutil.makedate()
1146 return date
1145 return date
1147
1146
1148 def subrev(self, subpath):
1147 def subrev(self, subpath):
1149 return None
1148 return None
1150
1149
1151 def manifestnode(self):
1150 def manifestnode(self):
1152 return None
1151 return None
1153 def user(self):
1152 def user(self):
1154 return self._user or self._repo.ui.username()
1153 return self._user or self._repo.ui.username()
1155 def date(self):
1154 def date(self):
1156 return self._date
1155 return self._date
1157 def description(self):
1156 def description(self):
1158 return self._text
1157 return self._text
1159 def files(self):
1158 def files(self):
1160 return sorted(self._status.modified + self._status.added +
1159 return sorted(self._status.modified + self._status.added +
1161 self._status.removed)
1160 self._status.removed)
1162
1161
1163 def modified(self):
1162 def modified(self):
1164 return self._status.modified
1163 return self._status.modified
1165 def added(self):
1164 def added(self):
1166 return self._status.added
1165 return self._status.added
1167 def removed(self):
1166 def removed(self):
1168 return self._status.removed
1167 return self._status.removed
1169 def deleted(self):
1168 def deleted(self):
1170 return self._status.deleted
1169 return self._status.deleted
1171 def branch(self):
1170 def branch(self):
1172 return encoding.tolocal(self._extra['branch'])
1171 return encoding.tolocal(self._extra['branch'])
1173 def closesbranch(self):
1172 def closesbranch(self):
1174 return 'close' in self._extra
1173 return 'close' in self._extra
1175 def extra(self):
1174 def extra(self):
1176 return self._extra
1175 return self._extra
1177
1176
1178 def isinmemory(self):
1177 def isinmemory(self):
1179 return False
1178 return False
1180
1179
1181 def tags(self):
1180 def tags(self):
1182 return []
1181 return []
1183
1182
1184 def bookmarks(self):
1183 def bookmarks(self):
1185 b = []
1184 b = []
1186 for p in self.parents():
1185 for p in self.parents():
1187 b.extend(p.bookmarks())
1186 b.extend(p.bookmarks())
1188 return b
1187 return b
1189
1188
1190 def phase(self):
1189 def phase(self):
1191 phase = phases.draft # default phase to draft
1190 phase = phases.draft # default phase to draft
1192 for p in self.parents():
1191 for p in self.parents():
1193 phase = max(phase, p.phase())
1192 phase = max(phase, p.phase())
1194 return phase
1193 return phase
1195
1194
1196 def hidden(self):
1195 def hidden(self):
1197 return False
1196 return False
1198
1197
1199 def children(self):
1198 def children(self):
1200 return []
1199 return []
1201
1200
1202 def flags(self, path):
1201 def flags(self, path):
1203 if r'_manifest' in self.__dict__:
1202 if r'_manifest' in self.__dict__:
1204 try:
1203 try:
1205 return self._manifest.flags(path)
1204 return self._manifest.flags(path)
1206 except KeyError:
1205 except KeyError:
1207 return ''
1206 return ''
1208
1207
1209 try:
1208 try:
1210 return self._flagfunc(path)
1209 return self._flagfunc(path)
1211 except OSError:
1210 except OSError:
1212 return ''
1211 return ''
1213
1212
1214 def ancestor(self, c2):
1213 def ancestor(self, c2):
1215 """return the "best" ancestor context of self and c2"""
1214 """return the "best" ancestor context of self and c2"""
1216 return self._parents[0].ancestor(c2) # punt on two parents for now
1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1217
1216
1218 def walk(self, match):
1217 def walk(self, match):
1219 '''Generates matching file names.'''
1218 '''Generates matching file names.'''
1220 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1219 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1221 subrepos=sorted(self.substate),
1220 subrepos=sorted(self.substate),
1222 unknown=True, ignored=False))
1221 unknown=True, ignored=False))
1223
1222
1224 def matches(self, match):
1223 def matches(self, match):
1225 match = self._repo.narrowmatch(match)
1224 match = self._repo.narrowmatch(match)
1226 ds = self._repo.dirstate
1225 ds = self._repo.dirstate
1227 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1226 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1228
1227
1229 def ancestors(self):
1228 def ancestors(self):
1230 for p in self._parents:
1229 for p in self._parents:
1231 yield p
1230 yield p
1232 for a in self._repo.changelog.ancestors(
1231 for a in self._repo.changelog.ancestors(
1233 [p.rev() for p in self._parents]):
1232 [p.rev() for p in self._parents]):
1234 yield self._repo[a]
1233 yield self._repo[a]
1235
1234
1236 def markcommitted(self, node):
1235 def markcommitted(self, node):
1237 """Perform post-commit cleanup necessary after committing this ctx
1236 """Perform post-commit cleanup necessary after committing this ctx
1238
1237
1239 Specifically, this updates backing stores this working context
1238 Specifically, this updates backing stores this working context
1240 wraps to reflect the fact that the changes reflected by this
1239 wraps to reflect the fact that the changes reflected by this
1241 workingctx have been committed. For example, it marks
1240 workingctx have been committed. For example, it marks
1242 modified and added files as normal in the dirstate.
1241 modified and added files as normal in the dirstate.
1243
1242
1244 """
1243 """
1245
1244
1246 with self._repo.dirstate.parentchange():
1245 with self._repo.dirstate.parentchange():
1247 for f in self.modified() + self.added():
1246 for f in self.modified() + self.added():
1248 self._repo.dirstate.normal(f)
1247 self._repo.dirstate.normal(f)
1249 for f in self.removed():
1248 for f in self.removed():
1250 self._repo.dirstate.drop(f)
1249 self._repo.dirstate.drop(f)
1251 self._repo.dirstate.setparents(node)
1250 self._repo.dirstate.setparents(node)
1252
1251
1253 # write changes out explicitly, because nesting wlock at
1252 # write changes out explicitly, because nesting wlock at
1254 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1253 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1255 # from immediately doing so for subsequent changing files
1254 # from immediately doing so for subsequent changing files
1256 self._repo.dirstate.write(self._repo.currenttransaction())
1255 self._repo.dirstate.write(self._repo.currenttransaction())
1257
1256
1258 def dirty(self, missing=False, merge=True, branch=True):
1257 def dirty(self, missing=False, merge=True, branch=True):
1259 return False
1258 return False
1260
1259
1261 class workingctx(committablectx):
1260 class workingctx(committablectx):
1262 """A workingctx object makes access to data related to
1261 """A workingctx object makes access to data related to
1263 the current working directory convenient.
1262 the current working directory convenient.
1264 date - any valid date string or (unixtime, offset), or None.
1263 date - any valid date string or (unixtime, offset), or None.
1265 user - username string, or None.
1264 user - username string, or None.
1266 extra - a dictionary of extra values, or None.
1265 extra - a dictionary of extra values, or None.
1267 changes - a list of file lists as returned by localrepo.status()
1266 changes - a list of file lists as returned by localrepo.status()
1268 or None to use the repository status.
1267 or None to use the repository status.
1269 """
1268 """
1270 def __init__(self, repo, text="", user=None, date=None, extra=None,
1269 def __init__(self, repo, text="", user=None, date=None, extra=None,
1271 changes=None):
1270 changes=None):
1272 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1271 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1273
1272
1274 def __iter__(self):
1273 def __iter__(self):
1275 d = self._repo.dirstate
1274 d = self._repo.dirstate
1276 for f in d:
1275 for f in d:
1277 if d[f] != 'r':
1276 if d[f] != 'r':
1278 yield f
1277 yield f
1279
1278
1280 def __contains__(self, key):
1279 def __contains__(self, key):
1281 return self._repo.dirstate[key] not in "?r"
1280 return self._repo.dirstate[key] not in "?r"
1282
1281
1283 def hex(self):
1282 def hex(self):
1284 return hex(wdirid)
1283 return hex(wdirid)
1285
1284
1286 @propertycache
1285 @propertycache
1287 def _parents(self):
1286 def _parents(self):
1288 p = self._repo.dirstate.parents()
1287 p = self._repo.dirstate.parents()
1289 if p[1] == nullid:
1288 if p[1] == nullid:
1290 p = p[:-1]
1289 p = p[:-1]
1291 # use unfiltered repo to delay/avoid loading obsmarkers
1290 # use unfiltered repo to delay/avoid loading obsmarkers
1292 unfi = self._repo.unfiltered()
1291 unfi = self._repo.unfiltered()
1293 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1292 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1294
1293
1295 def _fileinfo(self, path):
1294 def _fileinfo(self, path):
1296 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1295 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1297 self._manifest
1296 self._manifest
1298 return super(workingctx, self)._fileinfo(path)
1297 return super(workingctx, self)._fileinfo(path)
1299
1298
1300 def filectx(self, path, filelog=None):
1299 def filectx(self, path, filelog=None):
1301 """get a file context from the working directory"""
1300 """get a file context from the working directory"""
1302 return workingfilectx(self._repo, path, workingctx=self,
1301 return workingfilectx(self._repo, path, workingctx=self,
1303 filelog=filelog)
1302 filelog=filelog)
1304
1303
1305 def dirty(self, missing=False, merge=True, branch=True):
1304 def dirty(self, missing=False, merge=True, branch=True):
1306 "check whether a working directory is modified"
1305 "check whether a working directory is modified"
1307 # check subrepos first
1306 # check subrepos first
1308 for s in sorted(self.substate):
1307 for s in sorted(self.substate):
1309 if self.sub(s).dirty(missing=missing):
1308 if self.sub(s).dirty(missing=missing):
1310 return True
1309 return True
1311 # check current working dir
1310 # check current working dir
1312 return ((merge and self.p2()) or
1311 return ((merge and self.p2()) or
1313 (branch and self.branch() != self.p1().branch()) or
1312 (branch and self.branch() != self.p1().branch()) or
1314 self.modified() or self.added() or self.removed() or
1313 self.modified() or self.added() or self.removed() or
1315 (missing and self.deleted()))
1314 (missing and self.deleted()))
1316
1315
1317 def add(self, list, prefix=""):
1316 def add(self, list, prefix=""):
1318 with self._repo.wlock():
1317 with self._repo.wlock():
1319 ui, ds = self._repo.ui, self._repo.dirstate
1318 ui, ds = self._repo.ui, self._repo.dirstate
1320 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1319 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1321 rejected = []
1320 rejected = []
1322 lstat = self._repo.wvfs.lstat
1321 lstat = self._repo.wvfs.lstat
1323 for f in list:
1322 for f in list:
1324 # ds.pathto() returns an absolute file when this is invoked from
1323 # ds.pathto() returns an absolute file when this is invoked from
1325 # the keyword extension. That gets flagged as non-portable on
1324 # the keyword extension. That gets flagged as non-portable on
1326 # Windows, since it contains the drive letter and colon.
1325 # Windows, since it contains the drive letter and colon.
1327 scmutil.checkportable(ui, os.path.join(prefix, f))
1326 scmutil.checkportable(ui, os.path.join(prefix, f))
1328 try:
1327 try:
1329 st = lstat(f)
1328 st = lstat(f)
1330 except OSError:
1329 except OSError:
1331 ui.warn(_("%s does not exist!\n") % uipath(f))
1330 ui.warn(_("%s does not exist!\n") % uipath(f))
1332 rejected.append(f)
1331 rejected.append(f)
1333 continue
1332 continue
1334 limit = ui.configbytes('ui', 'large-file-limit')
1333 limit = ui.configbytes('ui', 'large-file-limit')
1335 if limit != 0 and st.st_size > limit:
1334 if limit != 0 and st.st_size > limit:
1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1335 ui.warn(_("%s: up to %d MB of RAM may be required "
1337 "to manage this file\n"
1336 "to manage this file\n"
1338 "(use 'hg revert %s' to cancel the "
1337 "(use 'hg revert %s' to cancel the "
1339 "pending addition)\n")
1338 "pending addition)\n")
1340 % (f, 3 * st.st_size // 1000000, uipath(f)))
1339 % (f, 3 * st.st_size // 1000000, uipath(f)))
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1340 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 ui.warn(_("%s not added: only files and symlinks "
1341 ui.warn(_("%s not added: only files and symlinks "
1343 "supported currently\n") % uipath(f))
1342 "supported currently\n") % uipath(f))
1344 rejected.append(f)
1343 rejected.append(f)
1345 elif ds[f] in 'amn':
1344 elif ds[f] in 'amn':
1346 ui.warn(_("%s already tracked!\n") % uipath(f))
1345 ui.warn(_("%s already tracked!\n") % uipath(f))
1347 elif ds[f] == 'r':
1346 elif ds[f] == 'r':
1348 ds.normallookup(f)
1347 ds.normallookup(f)
1349 else:
1348 else:
1350 ds.add(f)
1349 ds.add(f)
1351 return rejected
1350 return rejected
1352
1351
1353 def forget(self, files, prefix=""):
1352 def forget(self, files, prefix=""):
1354 with self._repo.wlock():
1353 with self._repo.wlock():
1355 ds = self._repo.dirstate
1354 ds = self._repo.dirstate
1356 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1355 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1357 rejected = []
1356 rejected = []
1358 for f in files:
1357 for f in files:
1359 if f not in ds:
1358 if f not in ds:
1360 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1359 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1361 rejected.append(f)
1360 rejected.append(f)
1362 elif ds[f] != 'a':
1361 elif ds[f] != 'a':
1363 ds.remove(f)
1362 ds.remove(f)
1364 else:
1363 else:
1365 ds.drop(f)
1364 ds.drop(f)
1366 return rejected
1365 return rejected
1367
1366
1368 def copy(self, source, dest):
1367 def copy(self, source, dest):
1369 try:
1368 try:
1370 st = self._repo.wvfs.lstat(dest)
1369 st = self._repo.wvfs.lstat(dest)
1371 except OSError as err:
1370 except OSError as err:
1372 if err.errno != errno.ENOENT:
1371 if err.errno != errno.ENOENT:
1373 raise
1372 raise
1374 self._repo.ui.warn(_("%s does not exist!\n")
1373 self._repo.ui.warn(_("%s does not exist!\n")
1375 % self._repo.dirstate.pathto(dest))
1374 % self._repo.dirstate.pathto(dest))
1376 return
1375 return
1377 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1376 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1378 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1377 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1379 "symbolic link\n")
1378 "symbolic link\n")
1380 % self._repo.dirstate.pathto(dest))
1379 % self._repo.dirstate.pathto(dest))
1381 else:
1380 else:
1382 with self._repo.wlock():
1381 with self._repo.wlock():
1383 ds = self._repo.dirstate
1382 ds = self._repo.dirstate
1384 if ds[dest] in '?':
1383 if ds[dest] in '?':
1385 ds.add(dest)
1384 ds.add(dest)
1386 elif ds[dest] in 'r':
1385 elif ds[dest] in 'r':
1387 ds.normallookup(dest)
1386 ds.normallookup(dest)
1388 ds.copy(source, dest)
1387 ds.copy(source, dest)
1389
1388
1390 def match(self, pats=None, include=None, exclude=None, default='glob',
1389 def match(self, pats=None, include=None, exclude=None, default='glob',
1391 listsubrepos=False, badfn=None):
1390 listsubrepos=False, badfn=None):
1392 r = self._repo
1391 r = self._repo
1393
1392
1394 # Only a case insensitive filesystem needs magic to translate user input
1393 # Only a case insensitive filesystem needs magic to translate user input
1395 # to actual case in the filesystem.
1394 # to actual case in the filesystem.
1396 icasefs = not util.fscasesensitive(r.root)
1395 icasefs = not util.fscasesensitive(r.root)
1397 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1396 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1398 default, auditor=r.auditor, ctx=self,
1397 default, auditor=r.auditor, ctx=self,
1399 listsubrepos=listsubrepos, badfn=badfn,
1398 listsubrepos=listsubrepos, badfn=badfn,
1400 icasefs=icasefs)
1399 icasefs=icasefs)
1401
1400
1402 def _filtersuspectsymlink(self, files):
1401 def _filtersuspectsymlink(self, files):
1403 if not files or self._repo.dirstate._checklink:
1402 if not files or self._repo.dirstate._checklink:
1404 return files
1403 return files
1405
1404
1406 # Symlink placeholders may get non-symlink-like contents
1405 # Symlink placeholders may get non-symlink-like contents
1407 # via user error or dereferencing by NFS or Samba servers,
1406 # via user error or dereferencing by NFS or Samba servers,
1408 # so we filter out any placeholders that don't look like a
1407 # so we filter out any placeholders that don't look like a
1409 # symlink
1408 # symlink
1410 sane = []
1409 sane = []
1411 for f in files:
1410 for f in files:
1412 if self.flags(f) == 'l':
1411 if self.flags(f) == 'l':
1413 d = self[f].data()
1412 d = self[f].data()
1414 if (d == '' or len(d) >= 1024 or '\n' in d
1413 if (d == '' or len(d) >= 1024 or '\n' in d
1415 or stringutil.binary(d)):
1414 or stringutil.binary(d)):
1416 self._repo.ui.debug('ignoring suspect symlink placeholder'
1415 self._repo.ui.debug('ignoring suspect symlink placeholder'
1417 ' "%s"\n' % f)
1416 ' "%s"\n' % f)
1418 continue
1417 continue
1419 sane.append(f)
1418 sane.append(f)
1420 return sane
1419 return sane
1421
1420
1422 def _checklookup(self, files):
1421 def _checklookup(self, files):
1423 # check for any possibly clean files
1422 # check for any possibly clean files
1424 if not files:
1423 if not files:
1425 return [], [], []
1424 return [], [], []
1426
1425
1427 modified = []
1426 modified = []
1428 deleted = []
1427 deleted = []
1429 fixup = []
1428 fixup = []
1430 pctx = self._parents[0]
1429 pctx = self._parents[0]
1431 # do a full compare of any files that might have changed
1430 # do a full compare of any files that might have changed
1432 for f in sorted(files):
1431 for f in sorted(files):
1433 try:
1432 try:
1434 # This will return True for a file that got replaced by a
1433 # This will return True for a file that got replaced by a
1435 # directory in the interim, but fixing that is pretty hard.
1434 # directory in the interim, but fixing that is pretty hard.
1436 if (f not in pctx or self.flags(f) != pctx.flags(f)
1435 if (f not in pctx or self.flags(f) != pctx.flags(f)
1437 or pctx[f].cmp(self[f])):
1436 or pctx[f].cmp(self[f])):
1438 modified.append(f)
1437 modified.append(f)
1439 else:
1438 else:
1440 fixup.append(f)
1439 fixup.append(f)
1441 except (IOError, OSError):
1440 except (IOError, OSError):
1442 # A file become inaccessible in between? Mark it as deleted,
1441 # A file become inaccessible in between? Mark it as deleted,
1443 # matching dirstate behavior (issue5584).
1442 # matching dirstate behavior (issue5584).
1444 # The dirstate has more complex behavior around whether a
1443 # The dirstate has more complex behavior around whether a
1445 # missing file matches a directory, etc, but we don't need to
1444 # missing file matches a directory, etc, but we don't need to
1446 # bother with that: if f has made it to this point, we're sure
1445 # bother with that: if f has made it to this point, we're sure
1447 # it's in the dirstate.
1446 # it's in the dirstate.
1448 deleted.append(f)
1447 deleted.append(f)
1449
1448
1450 return modified, deleted, fixup
1449 return modified, deleted, fixup
1451
1450
1452 def _poststatusfixup(self, status, fixup):
1451 def _poststatusfixup(self, status, fixup):
1453 """update dirstate for files that are actually clean"""
1452 """update dirstate for files that are actually clean"""
1454 poststatus = self._repo.postdsstatus()
1453 poststatus = self._repo.postdsstatus()
1455 if fixup or poststatus:
1454 if fixup or poststatus:
1456 try:
1455 try:
1457 oldid = self._repo.dirstate.identity()
1456 oldid = self._repo.dirstate.identity()
1458
1457
1459 # updating the dirstate is optional
1458 # updating the dirstate is optional
1460 # so we don't wait on the lock
1459 # so we don't wait on the lock
1461 # wlock can invalidate the dirstate, so cache normal _after_
1460 # wlock can invalidate the dirstate, so cache normal _after_
1462 # taking the lock
1461 # taking the lock
1463 with self._repo.wlock(False):
1462 with self._repo.wlock(False):
1464 if self._repo.dirstate.identity() == oldid:
1463 if self._repo.dirstate.identity() == oldid:
1465 if fixup:
1464 if fixup:
1466 normal = self._repo.dirstate.normal
1465 normal = self._repo.dirstate.normal
1467 for f in fixup:
1466 for f in fixup:
1468 normal(f)
1467 normal(f)
1469 # write changes out explicitly, because nesting
1468 # write changes out explicitly, because nesting
1470 # wlock at runtime may prevent 'wlock.release()'
1469 # wlock at runtime may prevent 'wlock.release()'
1471 # after this block from doing so for subsequent
1470 # after this block from doing so for subsequent
1472 # changing files
1471 # changing files
1473 tr = self._repo.currenttransaction()
1472 tr = self._repo.currenttransaction()
1474 self._repo.dirstate.write(tr)
1473 self._repo.dirstate.write(tr)
1475
1474
1476 if poststatus:
1475 if poststatus:
1477 for ps in poststatus:
1476 for ps in poststatus:
1478 ps(self, status)
1477 ps(self, status)
1479 else:
1478 else:
1480 # in this case, writing changes out breaks
1479 # in this case, writing changes out breaks
1481 # consistency, because .hg/dirstate was
1480 # consistency, because .hg/dirstate was
1482 # already changed simultaneously after last
1481 # already changed simultaneously after last
1483 # caching (see also issue5584 for detail)
1482 # caching (see also issue5584 for detail)
1484 self._repo.ui.debug('skip updating dirstate: '
1483 self._repo.ui.debug('skip updating dirstate: '
1485 'identity mismatch\n')
1484 'identity mismatch\n')
1486 except error.LockError:
1485 except error.LockError:
1487 pass
1486 pass
1488 finally:
1487 finally:
1489 # Even if the wlock couldn't be grabbed, clear out the list.
1488 # Even if the wlock couldn't be grabbed, clear out the list.
1490 self._repo.clearpostdsstatus()
1489 self._repo.clearpostdsstatus()
1491
1490
1492 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1491 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1493 '''Gets the status from the dirstate -- internal use only.'''
1492 '''Gets the status from the dirstate -- internal use only.'''
1494 subrepos = []
1493 subrepos = []
1495 if '.hgsub' in self:
1494 if '.hgsub' in self:
1496 subrepos = sorted(self.substate)
1495 subrepos = sorted(self.substate)
1497 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1496 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1498 clean=clean, unknown=unknown)
1497 clean=clean, unknown=unknown)
1499
1498
1500 # check for any possibly clean files
1499 # check for any possibly clean files
1501 fixup = []
1500 fixup = []
1502 if cmp:
1501 if cmp:
1503 modified2, deleted2, fixup = self._checklookup(cmp)
1502 modified2, deleted2, fixup = self._checklookup(cmp)
1504 s.modified.extend(modified2)
1503 s.modified.extend(modified2)
1505 s.deleted.extend(deleted2)
1504 s.deleted.extend(deleted2)
1506
1505
1507 if fixup and clean:
1506 if fixup and clean:
1508 s.clean.extend(fixup)
1507 s.clean.extend(fixup)
1509
1508
1510 self._poststatusfixup(s, fixup)
1509 self._poststatusfixup(s, fixup)
1511
1510
1512 if match.always():
1511 if match.always():
1513 # cache for performance
1512 # cache for performance
1514 if s.unknown or s.ignored or s.clean:
1513 if s.unknown or s.ignored or s.clean:
1515 # "_status" is cached with list*=False in the normal route
1514 # "_status" is cached with list*=False in the normal route
1516 self._status = scmutil.status(s.modified, s.added, s.removed,
1515 self._status = scmutil.status(s.modified, s.added, s.removed,
1517 s.deleted, [], [], [])
1516 s.deleted, [], [], [])
1518 else:
1517 else:
1519 self._status = s
1518 self._status = s
1520
1519
1521 return s
1520 return s
1522
1521
1523 @propertycache
1522 @propertycache
1524 def _manifest(self):
1523 def _manifest(self):
1525 """generate a manifest corresponding to the values in self._status
1524 """generate a manifest corresponding to the values in self._status
1526
1525
1527 This reuse the file nodeid from parent, but we use special node
1526 This reuse the file nodeid from parent, but we use special node
1528 identifiers for added and modified files. This is used by manifests
1527 identifiers for added and modified files. This is used by manifests
1529 merge to see that files are different and by update logic to avoid
1528 merge to see that files are different and by update logic to avoid
1530 deleting newly added files.
1529 deleting newly added files.
1531 """
1530 """
1532 return self._buildstatusmanifest(self._status)
1531 return self._buildstatusmanifest(self._status)
1533
1532
1534 def _buildstatusmanifest(self, status):
1533 def _buildstatusmanifest(self, status):
1535 """Builds a manifest that includes the given status results."""
1534 """Builds a manifest that includes the given status results."""
1536 parents = self.parents()
1535 parents = self.parents()
1537
1536
1538 man = parents[0].manifest().copy()
1537 man = parents[0].manifest().copy()
1539
1538
1540 ff = self._flagfunc
1539 ff = self._flagfunc
1541 for i, l in ((addednodeid, status.added),
1540 for i, l in ((addednodeid, status.added),
1542 (modifiednodeid, status.modified)):
1541 (modifiednodeid, status.modified)):
1543 for f in l:
1542 for f in l:
1544 man[f] = i
1543 man[f] = i
1545 try:
1544 try:
1546 man.setflag(f, ff(f))
1545 man.setflag(f, ff(f))
1547 except OSError:
1546 except OSError:
1548 pass
1547 pass
1549
1548
1550 for f in status.deleted + status.removed:
1549 for f in status.deleted + status.removed:
1551 if f in man:
1550 if f in man:
1552 del man[f]
1551 del man[f]
1553
1552
1554 return man
1553 return man
1555
1554
1556 def _buildstatus(self, other, s, match, listignored, listclean,
1555 def _buildstatus(self, other, s, match, listignored, listclean,
1557 listunknown):
1556 listunknown):
1558 """build a status with respect to another context
1557 """build a status with respect to another context
1559
1558
1560 This includes logic for maintaining the fast path of status when
1559 This includes logic for maintaining the fast path of status when
1561 comparing the working directory against its parent, which is to skip
1560 comparing the working directory against its parent, which is to skip
1562 building a new manifest if self (working directory) is not comparing
1561 building a new manifest if self (working directory) is not comparing
1563 against its parent (repo['.']).
1562 against its parent (repo['.']).
1564 """
1563 """
1565 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1564 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1566 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1565 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1567 # might have accidentally ended up with the entire contents of the file
1566 # might have accidentally ended up with the entire contents of the file
1568 # they are supposed to be linking to.
1567 # they are supposed to be linking to.
1569 s.modified[:] = self._filtersuspectsymlink(s.modified)
1568 s.modified[:] = self._filtersuspectsymlink(s.modified)
1570 if other != self._repo['.']:
1569 if other != self._repo['.']:
1571 s = super(workingctx, self)._buildstatus(other, s, match,
1570 s = super(workingctx, self)._buildstatus(other, s, match,
1572 listignored, listclean,
1571 listignored, listclean,
1573 listunknown)
1572 listunknown)
1574 return s
1573 return s
1575
1574
1576 def _matchstatus(self, other, match):
1575 def _matchstatus(self, other, match):
1577 """override the match method with a filter for directory patterns
1576 """override the match method with a filter for directory patterns
1578
1577
1579 We use inheritance to customize the match.bad method only in cases of
1578 We use inheritance to customize the match.bad method only in cases of
1580 workingctx since it belongs only to the working directory when
1579 workingctx since it belongs only to the working directory when
1581 comparing against the parent changeset.
1580 comparing against the parent changeset.
1582
1581
1583 If we aren't comparing against the working directory's parent, then we
1582 If we aren't comparing against the working directory's parent, then we
1584 just use the default match object sent to us.
1583 just use the default match object sent to us.
1585 """
1584 """
1586 if other != self._repo['.']:
1585 if other != self._repo['.']:
1587 def bad(f, msg):
1586 def bad(f, msg):
1588 # 'f' may be a directory pattern from 'match.files()',
1587 # 'f' may be a directory pattern from 'match.files()',
1589 # so 'f not in ctx1' is not enough
1588 # so 'f not in ctx1' is not enough
1590 if f not in other and not other.hasdir(f):
1589 if f not in other and not other.hasdir(f):
1591 self._repo.ui.warn('%s: %s\n' %
1590 self._repo.ui.warn('%s: %s\n' %
1592 (self._repo.dirstate.pathto(f), msg))
1591 (self._repo.dirstate.pathto(f), msg))
1593 match.bad = bad
1592 match.bad = bad
1594 return match
1593 return match
1595
1594
1596 def markcommitted(self, node):
1595 def markcommitted(self, node):
1597 super(workingctx, self).markcommitted(node)
1596 super(workingctx, self).markcommitted(node)
1598
1597
1599 sparse.aftercommit(self._repo, node)
1598 sparse.aftercommit(self._repo, node)
1600
1599
1601 class committablefilectx(basefilectx):
1600 class committablefilectx(basefilectx):
1602 """A committablefilectx provides common functionality for a file context
1601 """A committablefilectx provides common functionality for a file context
1603 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1602 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1604 def __init__(self, repo, path, filelog=None, ctx=None):
1603 def __init__(self, repo, path, filelog=None, ctx=None):
1605 self._repo = repo
1604 self._repo = repo
1606 self._path = path
1605 self._path = path
1607 self._changeid = None
1606 self._changeid = None
1608 self._filerev = self._filenode = None
1607 self._filerev = self._filenode = None
1609
1608
1610 if filelog is not None:
1609 if filelog is not None:
1611 self._filelog = filelog
1610 self._filelog = filelog
1612 if ctx:
1611 if ctx:
1613 self._changectx = ctx
1612 self._changectx = ctx
1614
1613
1615 def __nonzero__(self):
1614 def __nonzero__(self):
1616 return True
1615 return True
1617
1616
1618 __bool__ = __nonzero__
1617 __bool__ = __nonzero__
1619
1618
1620 def linkrev(self):
1619 def linkrev(self):
1621 # linked to self._changectx no matter if file is modified or not
1620 # linked to self._changectx no matter if file is modified or not
1622 return self.rev()
1621 return self.rev()
1623
1622
1624 def parents(self):
1623 def parents(self):
1625 '''return parent filectxs, following copies if necessary'''
1624 '''return parent filectxs, following copies if necessary'''
1626 def filenode(ctx, path):
1625 def filenode(ctx, path):
1627 return ctx._manifest.get(path, nullid)
1626 return ctx._manifest.get(path, nullid)
1628
1627
1629 path = self._path
1628 path = self._path
1630 fl = self._filelog
1629 fl = self._filelog
1631 pcl = self._changectx._parents
1630 pcl = self._changectx._parents
1632 renamed = self.renamed()
1631 renamed = self.renamed()
1633
1632
1634 if renamed:
1633 if renamed:
1635 pl = [renamed + (None,)]
1634 pl = [renamed + (None,)]
1636 else:
1635 else:
1637 pl = [(path, filenode(pcl[0], path), fl)]
1636 pl = [(path, filenode(pcl[0], path), fl)]
1638
1637
1639 for pc in pcl[1:]:
1638 for pc in pcl[1:]:
1640 pl.append((path, filenode(pc, path), fl))
1639 pl.append((path, filenode(pc, path), fl))
1641
1640
1642 return [self._parentfilectx(p, fileid=n, filelog=l)
1641 return [self._parentfilectx(p, fileid=n, filelog=l)
1643 for p, n, l in pl if n != nullid]
1642 for p, n, l in pl if n != nullid]
1644
1643
1645 def children(self):
1644 def children(self):
1646 return []
1645 return []
1647
1646
1648 class workingfilectx(committablefilectx):
1647 class workingfilectx(committablefilectx):
1649 """A workingfilectx object makes access to data related to a particular
1648 """A workingfilectx object makes access to data related to a particular
1650 file in the working directory convenient."""
1649 file in the working directory convenient."""
1651 def __init__(self, repo, path, filelog=None, workingctx=None):
1650 def __init__(self, repo, path, filelog=None, workingctx=None):
1652 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1651 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1653
1652
1654 @propertycache
1653 @propertycache
1655 def _changectx(self):
1654 def _changectx(self):
1656 return workingctx(self._repo)
1655 return workingctx(self._repo)
1657
1656
1658 def data(self):
1657 def data(self):
1659 return self._repo.wread(self._path)
1658 return self._repo.wread(self._path)
1660 def renamed(self):
1659 def renamed(self):
1661 rp = self._repo.dirstate.copied(self._path)
1660 rp = self._repo.dirstate.copied(self._path)
1662 if not rp:
1661 if not rp:
1663 return None
1662 return None
1664 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1663 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1665
1664
1666 def size(self):
1665 def size(self):
1667 return self._repo.wvfs.lstat(self._path).st_size
1666 return self._repo.wvfs.lstat(self._path).st_size
1668 def date(self):
1667 def date(self):
1669 t, tz = self._changectx.date()
1668 t, tz = self._changectx.date()
1670 try:
1669 try:
1671 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1670 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1672 except OSError as err:
1671 except OSError as err:
1673 if err.errno != errno.ENOENT:
1672 if err.errno != errno.ENOENT:
1674 raise
1673 raise
1675 return (t, tz)
1674 return (t, tz)
1676
1675
1677 def exists(self):
1676 def exists(self):
1678 return self._repo.wvfs.exists(self._path)
1677 return self._repo.wvfs.exists(self._path)
1679
1678
1680 def lexists(self):
1679 def lexists(self):
1681 return self._repo.wvfs.lexists(self._path)
1680 return self._repo.wvfs.lexists(self._path)
1682
1681
1683 def audit(self):
1682 def audit(self):
1684 return self._repo.wvfs.audit(self._path)
1683 return self._repo.wvfs.audit(self._path)
1685
1684
1686 def cmp(self, fctx):
1685 def cmp(self, fctx):
1687 """compare with other file context
1686 """compare with other file context
1688
1687
1689 returns True if different than fctx.
1688 returns True if different than fctx.
1690 """
1689 """
1691 # fctx should be a filectx (not a workingfilectx)
1690 # fctx should be a filectx (not a workingfilectx)
1692 # invert comparison to reuse the same code path
1691 # invert comparison to reuse the same code path
1693 return fctx.cmp(self)
1692 return fctx.cmp(self)
1694
1693
1695 def remove(self, ignoremissing=False):
1694 def remove(self, ignoremissing=False):
1696 """wraps unlink for a repo's working directory"""
1695 """wraps unlink for a repo's working directory"""
1697 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1696 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1698 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1697 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1699 rmdir=rmdir)
1698 rmdir=rmdir)
1700
1699
1701 def write(self, data, flags, backgroundclose=False, **kwargs):
1700 def write(self, data, flags, backgroundclose=False, **kwargs):
1702 """wraps repo.wwrite"""
1701 """wraps repo.wwrite"""
1703 self._repo.wwrite(self._path, data, flags,
1702 self._repo.wwrite(self._path, data, flags,
1704 backgroundclose=backgroundclose,
1703 backgroundclose=backgroundclose,
1705 **kwargs)
1704 **kwargs)
1706
1705
1707 def markcopied(self, src):
1706 def markcopied(self, src):
1708 """marks this file a copy of `src`"""
1707 """marks this file a copy of `src`"""
1709 if self._repo.dirstate[self._path] in "nma":
1708 if self._repo.dirstate[self._path] in "nma":
1710 self._repo.dirstate.copy(src, self._path)
1709 self._repo.dirstate.copy(src, self._path)
1711
1710
1712 def clearunknown(self):
1711 def clearunknown(self):
1713 """Removes conflicting items in the working directory so that
1712 """Removes conflicting items in the working directory so that
1714 ``write()`` can be called successfully.
1713 ``write()`` can be called successfully.
1715 """
1714 """
1716 wvfs = self._repo.wvfs
1715 wvfs = self._repo.wvfs
1717 f = self._path
1716 f = self._path
1718 wvfs.audit(f)
1717 wvfs.audit(f)
1719 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1718 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1720 # remove files under the directory as they should already be
1719 # remove files under the directory as they should already be
1721 # warned and backed up
1720 # warned and backed up
1722 if wvfs.isdir(f) and not wvfs.islink(f):
1721 if wvfs.isdir(f) and not wvfs.islink(f):
1723 wvfs.rmtree(f, forcibly=True)
1722 wvfs.rmtree(f, forcibly=True)
1724 for p in reversed(list(util.finddirs(f))):
1723 for p in reversed(list(util.finddirs(f))):
1725 if wvfs.isfileorlink(p):
1724 if wvfs.isfileorlink(p):
1726 wvfs.unlink(p)
1725 wvfs.unlink(p)
1727 break
1726 break
1728 else:
1727 else:
1729 # don't remove files if path conflicts are not processed
1728 # don't remove files if path conflicts are not processed
1730 if wvfs.isdir(f) and not wvfs.islink(f):
1729 if wvfs.isdir(f) and not wvfs.islink(f):
1731 wvfs.removedirs(f)
1730 wvfs.removedirs(f)
1732
1731
1733 def setflags(self, l, x):
1732 def setflags(self, l, x):
1734 self._repo.wvfs.setflags(self._path, l, x)
1733 self._repo.wvfs.setflags(self._path, l, x)
1735
1734
1736 class overlayworkingctx(committablectx):
1735 class overlayworkingctx(committablectx):
1737 """Wraps another mutable context with a write-back cache that can be
1736 """Wraps another mutable context with a write-back cache that can be
1738 converted into a commit context.
1737 converted into a commit context.
1739
1738
1740 self._cache[path] maps to a dict with keys: {
1739 self._cache[path] maps to a dict with keys: {
1741 'exists': bool?
1740 'exists': bool?
1742 'date': date?
1741 'date': date?
1743 'data': str?
1742 'data': str?
1744 'flags': str?
1743 'flags': str?
1745 'copied': str? (path or None)
1744 'copied': str? (path or None)
1746 }
1745 }
1747 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1746 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1748 is `False`, the file was deleted.
1747 is `False`, the file was deleted.
1749 """
1748 """
1750
1749
1751 def __init__(self, repo):
1750 def __init__(self, repo):
1752 super(overlayworkingctx, self).__init__(repo)
1751 super(overlayworkingctx, self).__init__(repo)
1753 self.clean()
1752 self.clean()
1754
1753
1755 def setbase(self, wrappedctx):
1754 def setbase(self, wrappedctx):
1756 self._wrappedctx = wrappedctx
1755 self._wrappedctx = wrappedctx
1757 self._parents = [wrappedctx]
1756 self._parents = [wrappedctx]
1758 # Drop old manifest cache as it is now out of date.
1757 # Drop old manifest cache as it is now out of date.
1759 # This is necessary when, e.g., rebasing several nodes with one
1758 # This is necessary when, e.g., rebasing several nodes with one
1760 # ``overlayworkingctx`` (e.g. with --collapse).
1759 # ``overlayworkingctx`` (e.g. with --collapse).
1761 util.clearcachedproperty(self, '_manifest')
1760 util.clearcachedproperty(self, '_manifest')
1762
1761
1763 def data(self, path):
1762 def data(self, path):
1764 if self.isdirty(path):
1763 if self.isdirty(path):
1765 if self._cache[path]['exists']:
1764 if self._cache[path]['exists']:
1766 if self._cache[path]['data']:
1765 if self._cache[path]['data']:
1767 return self._cache[path]['data']
1766 return self._cache[path]['data']
1768 else:
1767 else:
1769 # Must fallback here, too, because we only set flags.
1768 # Must fallback here, too, because we only set flags.
1770 return self._wrappedctx[path].data()
1769 return self._wrappedctx[path].data()
1771 else:
1770 else:
1772 raise error.ProgrammingError("No such file or directory: %s" %
1771 raise error.ProgrammingError("No such file or directory: %s" %
1773 path)
1772 path)
1774 else:
1773 else:
1775 return self._wrappedctx[path].data()
1774 return self._wrappedctx[path].data()
1776
1775
1777 @propertycache
1776 @propertycache
1778 def _manifest(self):
1777 def _manifest(self):
1779 parents = self.parents()
1778 parents = self.parents()
1780 man = parents[0].manifest().copy()
1779 man = parents[0].manifest().copy()
1781
1780
1782 flag = self._flagfunc
1781 flag = self._flagfunc
1783 for path in self.added():
1782 for path in self.added():
1784 man[path] = addednodeid
1783 man[path] = addednodeid
1785 man.setflag(path, flag(path))
1784 man.setflag(path, flag(path))
1786 for path in self.modified():
1785 for path in self.modified():
1787 man[path] = modifiednodeid
1786 man[path] = modifiednodeid
1788 man.setflag(path, flag(path))
1787 man.setflag(path, flag(path))
1789 for path in self.removed():
1788 for path in self.removed():
1790 del man[path]
1789 del man[path]
1791 return man
1790 return man
1792
1791
1793 @propertycache
1792 @propertycache
1794 def _flagfunc(self):
1793 def _flagfunc(self):
1795 def f(path):
1794 def f(path):
1796 return self._cache[path]['flags']
1795 return self._cache[path]['flags']
1797 return f
1796 return f
1798
1797
1799 def files(self):
1798 def files(self):
1800 return sorted(self.added() + self.modified() + self.removed())
1799 return sorted(self.added() + self.modified() + self.removed())
1801
1800
1802 def modified(self):
1801 def modified(self):
1803 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1802 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1804 self._existsinparent(f)]
1803 self._existsinparent(f)]
1805
1804
1806 def added(self):
1805 def added(self):
1807 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1806 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1808 not self._existsinparent(f)]
1807 not self._existsinparent(f)]
1809
1808
1810 def removed(self):
1809 def removed(self):
1811 return [f for f in self._cache.keys() if
1810 return [f for f in self._cache.keys() if
1812 not self._cache[f]['exists'] and self._existsinparent(f)]
1811 not self._cache[f]['exists'] and self._existsinparent(f)]
1813
1812
1814 def isinmemory(self):
1813 def isinmemory(self):
1815 return True
1814 return True
1816
1815
1817 def filedate(self, path):
1816 def filedate(self, path):
1818 if self.isdirty(path):
1817 if self.isdirty(path):
1819 return self._cache[path]['date']
1818 return self._cache[path]['date']
1820 else:
1819 else:
1821 return self._wrappedctx[path].date()
1820 return self._wrappedctx[path].date()
1822
1821
1823 def markcopied(self, path, origin):
1822 def markcopied(self, path, origin):
1824 if self.isdirty(path):
1823 if self.isdirty(path):
1825 self._cache[path]['copied'] = origin
1824 self._cache[path]['copied'] = origin
1826 else:
1825 else:
1827 raise error.ProgrammingError('markcopied() called on clean context')
1826 raise error.ProgrammingError('markcopied() called on clean context')
1828
1827
1829 def copydata(self, path):
1828 def copydata(self, path):
1830 if self.isdirty(path):
1829 if self.isdirty(path):
1831 return self._cache[path]['copied']
1830 return self._cache[path]['copied']
1832 else:
1831 else:
1833 raise error.ProgrammingError('copydata() called on clean context')
1832 raise error.ProgrammingError('copydata() called on clean context')
1834
1833
1835 def flags(self, path):
1834 def flags(self, path):
1836 if self.isdirty(path):
1835 if self.isdirty(path):
1837 if self._cache[path]['exists']:
1836 if self._cache[path]['exists']:
1838 return self._cache[path]['flags']
1837 return self._cache[path]['flags']
1839 else:
1838 else:
1840 raise error.ProgrammingError("No such file or directory: %s" %
1839 raise error.ProgrammingError("No such file or directory: %s" %
1841 self._path)
1840 self._path)
1842 else:
1841 else:
1843 return self._wrappedctx[path].flags()
1842 return self._wrappedctx[path].flags()
1844
1843
1845 def __contains__(self, key):
1844 def __contains__(self, key):
1846 if key in self._cache:
1845 if key in self._cache:
1847 return self._cache[key]['exists']
1846 return self._cache[key]['exists']
1848 return key in self.p1()
1847 return key in self.p1()
1849
1848
1850 def _existsinparent(self, path):
1849 def _existsinparent(self, path):
1851 try:
1850 try:
1852 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1851 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1853 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1852 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1854 # with an ``exists()`` function.
1853 # with an ``exists()`` function.
1855 self._wrappedctx[path]
1854 self._wrappedctx[path]
1856 return True
1855 return True
1857 except error.ManifestLookupError:
1856 except error.ManifestLookupError:
1858 return False
1857 return False
1859
1858
1860 def _auditconflicts(self, path):
1859 def _auditconflicts(self, path):
1861 """Replicates conflict checks done by wvfs.write().
1860 """Replicates conflict checks done by wvfs.write().
1862
1861
1863 Since we never write to the filesystem and never call `applyupdates` in
1862 Since we never write to the filesystem and never call `applyupdates` in
1864 IMM, we'll never check that a path is actually writable -- e.g., because
1863 IMM, we'll never check that a path is actually writable -- e.g., because
1865 it adds `a/foo`, but `a` is actually a file in the other commit.
1864 it adds `a/foo`, but `a` is actually a file in the other commit.
1866 """
1865 """
1867 def fail(path, component):
1866 def fail(path, component):
1868 # p1() is the base and we're receiving "writes" for p2()'s
1867 # p1() is the base and we're receiving "writes" for p2()'s
1869 # files.
1868 # files.
1870 if 'l' in self.p1()[component].flags():
1869 if 'l' in self.p1()[component].flags():
1871 raise error.Abort("error: %s conflicts with symlink %s "
1870 raise error.Abort("error: %s conflicts with symlink %s "
1872 "in %d." % (path, component,
1871 "in %d." % (path, component,
1873 self.p1().rev()))
1872 self.p1().rev()))
1874 else:
1873 else:
1875 raise error.Abort("error: '%s' conflicts with file '%s' in "
1874 raise error.Abort("error: '%s' conflicts with file '%s' in "
1876 "%d." % (path, component,
1875 "%d." % (path, component,
1877 self.p1().rev()))
1876 self.p1().rev()))
1878
1877
1879 # Test that each new directory to be created to write this path from p2
1878 # Test that each new directory to be created to write this path from p2
1880 # is not a file in p1.
1879 # is not a file in p1.
1881 components = path.split('/')
1880 components = path.split('/')
1882 for i in pycompat.xrange(len(components)):
1881 for i in pycompat.xrange(len(components)):
1883 component = "/".join(components[0:i])
1882 component = "/".join(components[0:i])
1884 if component in self:
1883 if component in self:
1885 fail(path, component)
1884 fail(path, component)
1886
1885
1887 # Test the other direction -- that this path from p2 isn't a directory
1886 # Test the other direction -- that this path from p2 isn't a directory
1888 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1887 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1889 match = self.match(pats=[path + '/'], default=b'path')
1888 match = self.match(pats=[path + '/'], default=b'path')
1890 matches = self.p1().manifest().matches(match)
1889 matches = self.p1().manifest().matches(match)
1891 mfiles = matches.keys()
1890 mfiles = matches.keys()
1892 if len(mfiles) > 0:
1891 if len(mfiles) > 0:
1893 if len(mfiles) == 1 and mfiles[0] == path:
1892 if len(mfiles) == 1 and mfiles[0] == path:
1894 return
1893 return
1895 # omit the files which are deleted in current IMM wctx
1894 # omit the files which are deleted in current IMM wctx
1896 mfiles = [m for m in mfiles if m in self]
1895 mfiles = [m for m in mfiles if m in self]
1897 if not mfiles:
1896 if not mfiles:
1898 return
1897 return
1899 raise error.Abort("error: file '%s' cannot be written because "
1898 raise error.Abort("error: file '%s' cannot be written because "
1900 " '%s/' is a folder in %s (containing %d "
1899 " '%s/' is a folder in %s (containing %d "
1901 "entries: %s)"
1900 "entries: %s)"
1902 % (path, path, self.p1(), len(mfiles),
1901 % (path, path, self.p1(), len(mfiles),
1903 ', '.join(mfiles)))
1902 ', '.join(mfiles)))
1904
1903
1905 def write(self, path, data, flags='', **kwargs):
1904 def write(self, path, data, flags='', **kwargs):
1906 if data is None:
1905 if data is None:
1907 raise error.ProgrammingError("data must be non-None")
1906 raise error.ProgrammingError("data must be non-None")
1908 self._auditconflicts(path)
1907 self._auditconflicts(path)
1909 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1908 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1910 flags=flags)
1909 flags=flags)
1911
1910
1912 def setflags(self, path, l, x):
1911 def setflags(self, path, l, x):
1913 flag = ''
1912 flag = ''
1914 if l:
1913 if l:
1915 flag = 'l'
1914 flag = 'l'
1916 elif x:
1915 elif x:
1917 flag = 'x'
1916 flag = 'x'
1918 self._markdirty(path, exists=True, date=dateutil.makedate(),
1917 self._markdirty(path, exists=True, date=dateutil.makedate(),
1919 flags=flag)
1918 flags=flag)
1920
1919
1921 def remove(self, path):
1920 def remove(self, path):
1922 self._markdirty(path, exists=False)
1921 self._markdirty(path, exists=False)
1923
1922
1924 def exists(self, path):
1923 def exists(self, path):
1925 """exists behaves like `lexists`, but needs to follow symlinks and
1924 """exists behaves like `lexists`, but needs to follow symlinks and
1926 return False if they are broken.
1925 return False if they are broken.
1927 """
1926 """
1928 if self.isdirty(path):
1927 if self.isdirty(path):
1929 # If this path exists and is a symlink, "follow" it by calling
1928 # If this path exists and is a symlink, "follow" it by calling
1930 # exists on the destination path.
1929 # exists on the destination path.
1931 if (self._cache[path]['exists'] and
1930 if (self._cache[path]['exists'] and
1932 'l' in self._cache[path]['flags']):
1931 'l' in self._cache[path]['flags']):
1933 return self.exists(self._cache[path]['data'].strip())
1932 return self.exists(self._cache[path]['data'].strip())
1934 else:
1933 else:
1935 return self._cache[path]['exists']
1934 return self._cache[path]['exists']
1936
1935
1937 return self._existsinparent(path)
1936 return self._existsinparent(path)
1938
1937
1939 def lexists(self, path):
1938 def lexists(self, path):
1940 """lexists returns True if the path exists"""
1939 """lexists returns True if the path exists"""
1941 if self.isdirty(path):
1940 if self.isdirty(path):
1942 return self._cache[path]['exists']
1941 return self._cache[path]['exists']
1943
1942
1944 return self._existsinparent(path)
1943 return self._existsinparent(path)
1945
1944
1946 def size(self, path):
1945 def size(self, path):
1947 if self.isdirty(path):
1946 if self.isdirty(path):
1948 if self._cache[path]['exists']:
1947 if self._cache[path]['exists']:
1949 return len(self._cache[path]['data'])
1948 return len(self._cache[path]['data'])
1950 else:
1949 else:
1951 raise error.ProgrammingError("No such file or directory: %s" %
1950 raise error.ProgrammingError("No such file or directory: %s" %
1952 self._path)
1951 self._path)
1953 return self._wrappedctx[path].size()
1952 return self._wrappedctx[path].size()
1954
1953
1955 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1954 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1956 user=None, editor=None):
1955 user=None, editor=None):
1957 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1956 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1958 committed.
1957 committed.
1959
1958
1960 ``text`` is the commit message.
1959 ``text`` is the commit message.
1961 ``parents`` (optional) are rev numbers.
1960 ``parents`` (optional) are rev numbers.
1962 """
1961 """
1963 # Default parents to the wrapped contexts' if not passed.
1962 # Default parents to the wrapped contexts' if not passed.
1964 if parents is None:
1963 if parents is None:
1965 parents = self._wrappedctx.parents()
1964 parents = self._wrappedctx.parents()
1966 if len(parents) == 1:
1965 if len(parents) == 1:
1967 parents = (parents[0], None)
1966 parents = (parents[0], None)
1968
1967
1969 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1968 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1970 if parents[1] is None:
1969 if parents[1] is None:
1971 parents = (self._repo[parents[0]], None)
1970 parents = (self._repo[parents[0]], None)
1972 else:
1971 else:
1973 parents = (self._repo[parents[0]], self._repo[parents[1]])
1972 parents = (self._repo[parents[0]], self._repo[parents[1]])
1974
1973
1975 files = self._cache.keys()
1974 files = self._cache.keys()
1976 def getfile(repo, memctx, path):
1975 def getfile(repo, memctx, path):
1977 if self._cache[path]['exists']:
1976 if self._cache[path]['exists']:
1978 return memfilectx(repo, memctx, path,
1977 return memfilectx(repo, memctx, path,
1979 self._cache[path]['data'],
1978 self._cache[path]['data'],
1980 'l' in self._cache[path]['flags'],
1979 'l' in self._cache[path]['flags'],
1981 'x' in self._cache[path]['flags'],
1980 'x' in self._cache[path]['flags'],
1982 self._cache[path]['copied'])
1981 self._cache[path]['copied'])
1983 else:
1982 else:
1984 # Returning None, but including the path in `files`, is
1983 # Returning None, but including the path in `files`, is
1985 # necessary for memctx to register a deletion.
1984 # necessary for memctx to register a deletion.
1986 return None
1985 return None
1987 return memctx(self._repo, parents, text, files, getfile, date=date,
1986 return memctx(self._repo, parents, text, files, getfile, date=date,
1988 extra=extra, user=user, branch=branch, editor=editor)
1987 extra=extra, user=user, branch=branch, editor=editor)
1989
1988
1990 def isdirty(self, path):
1989 def isdirty(self, path):
1991 return path in self._cache
1990 return path in self._cache
1992
1991
1993 def isempty(self):
1992 def isempty(self):
1994 # We need to discard any keys that are actually clean before the empty
1993 # We need to discard any keys that are actually clean before the empty
1995 # commit check.
1994 # commit check.
1996 self._compact()
1995 self._compact()
1997 return len(self._cache) == 0
1996 return len(self._cache) == 0
1998
1997
1999 def clean(self):
1998 def clean(self):
2000 self._cache = {}
1999 self._cache = {}
2001
2000
2002 def _compact(self):
2001 def _compact(self):
2003 """Removes keys from the cache that are actually clean, by comparing
2002 """Removes keys from the cache that are actually clean, by comparing
2004 them with the underlying context.
2003 them with the underlying context.
2005
2004
2006 This can occur during the merge process, e.g. by passing --tool :local
2005 This can occur during the merge process, e.g. by passing --tool :local
2007 to resolve a conflict.
2006 to resolve a conflict.
2008 """
2007 """
2009 keys = []
2008 keys = []
2010 # This won't be perfect, but can help performance significantly when
2009 # This won't be perfect, but can help performance significantly when
2011 # using things like remotefilelog.
2010 # using things like remotefilelog.
2012 scmutil.prefetchfiles(
2011 scmutil.prefetchfiles(
2013 self.repo(), [self.p1().rev()],
2012 self.repo(), [self.p1().rev()],
2014 scmutil.matchfiles(self.repo(), self._cache.keys()))
2013 scmutil.matchfiles(self.repo(), self._cache.keys()))
2015
2014
2016 for path in self._cache.keys():
2015 for path in self._cache.keys():
2017 cache = self._cache[path]
2016 cache = self._cache[path]
2018 try:
2017 try:
2019 underlying = self._wrappedctx[path]
2018 underlying = self._wrappedctx[path]
2020 if (underlying.data() == cache['data'] and
2019 if (underlying.data() == cache['data'] and
2021 underlying.flags() == cache['flags']):
2020 underlying.flags() == cache['flags']):
2022 keys.append(path)
2021 keys.append(path)
2023 except error.ManifestLookupError:
2022 except error.ManifestLookupError:
2024 # Path not in the underlying manifest (created).
2023 # Path not in the underlying manifest (created).
2025 continue
2024 continue
2026
2025
2027 for path in keys:
2026 for path in keys:
2028 del self._cache[path]
2027 del self._cache[path]
2029 return keys
2028 return keys
2030
2029
2031 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2030 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2032 # data not provided, let's see if we already have some; if not, let's
2031 # data not provided, let's see if we already have some; if not, let's
2033 # grab it from our underlying context, so that we always have data if
2032 # grab it from our underlying context, so that we always have data if
2034 # the file is marked as existing.
2033 # the file is marked as existing.
2035 if exists and data is None:
2034 if exists and data is None:
2036 oldentry = self._cache.get(path) or {}
2035 oldentry = self._cache.get(path) or {}
2037 data = oldentry.get('data') or self._wrappedctx[path].data()
2036 data = oldentry.get('data') or self._wrappedctx[path].data()
2038
2037
2039 self._cache[path] = {
2038 self._cache[path] = {
2040 'exists': exists,
2039 'exists': exists,
2041 'data': data,
2040 'data': data,
2042 'date': date,
2041 'date': date,
2043 'flags': flags,
2042 'flags': flags,
2044 'copied': None,
2043 'copied': None,
2045 }
2044 }
2046
2045
2047 def filectx(self, path, filelog=None):
2046 def filectx(self, path, filelog=None):
2048 return overlayworkingfilectx(self._repo, path, parent=self,
2047 return overlayworkingfilectx(self._repo, path, parent=self,
2049 filelog=filelog)
2048 filelog=filelog)
2050
2049
2051 class overlayworkingfilectx(committablefilectx):
2050 class overlayworkingfilectx(committablefilectx):
2052 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2051 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2053 cache, which can be flushed through later by calling ``flush()``."""
2052 cache, which can be flushed through later by calling ``flush()``."""
2054
2053
2055 def __init__(self, repo, path, filelog=None, parent=None):
2054 def __init__(self, repo, path, filelog=None, parent=None):
2056 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2055 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2057 parent)
2056 parent)
2058 self._repo = repo
2057 self._repo = repo
2059 self._parent = parent
2058 self._parent = parent
2060 self._path = path
2059 self._path = path
2061
2060
2062 def cmp(self, fctx):
2061 def cmp(self, fctx):
2063 return self.data() != fctx.data()
2062 return self.data() != fctx.data()
2064
2063
2065 def changectx(self):
2064 def changectx(self):
2066 return self._parent
2065 return self._parent
2067
2066
2068 def data(self):
2067 def data(self):
2069 return self._parent.data(self._path)
2068 return self._parent.data(self._path)
2070
2069
2071 def date(self):
2070 def date(self):
2072 return self._parent.filedate(self._path)
2071 return self._parent.filedate(self._path)
2073
2072
2074 def exists(self):
2073 def exists(self):
2075 return self.lexists()
2074 return self.lexists()
2076
2075
2077 def lexists(self):
2076 def lexists(self):
2078 return self._parent.exists(self._path)
2077 return self._parent.exists(self._path)
2079
2078
2080 def renamed(self):
2079 def renamed(self):
2081 path = self._parent.copydata(self._path)
2080 path = self._parent.copydata(self._path)
2082 if not path:
2081 if not path:
2083 return None
2082 return None
2084 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2083 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2085
2084
2086 def size(self):
2085 def size(self):
2087 return self._parent.size(self._path)
2086 return self._parent.size(self._path)
2088
2087
2089 def markcopied(self, origin):
2088 def markcopied(self, origin):
2090 self._parent.markcopied(self._path, origin)
2089 self._parent.markcopied(self._path, origin)
2091
2090
2092 def audit(self):
2091 def audit(self):
2093 pass
2092 pass
2094
2093
2095 def flags(self):
2094 def flags(self):
2096 return self._parent.flags(self._path)
2095 return self._parent.flags(self._path)
2097
2096
2098 def setflags(self, islink, isexec):
2097 def setflags(self, islink, isexec):
2099 return self._parent.setflags(self._path, islink, isexec)
2098 return self._parent.setflags(self._path, islink, isexec)
2100
2099
2101 def write(self, data, flags, backgroundclose=False, **kwargs):
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2102 return self._parent.write(self._path, data, flags, **kwargs)
2101 return self._parent.write(self._path, data, flags, **kwargs)
2103
2102
2104 def remove(self, ignoremissing=False):
2103 def remove(self, ignoremissing=False):
2105 return self._parent.remove(self._path)
2104 return self._parent.remove(self._path)
2106
2105
2107 def clearunknown(self):
2106 def clearunknown(self):
2108 pass
2107 pass
2109
2108
2110 class workingcommitctx(workingctx):
2109 class workingcommitctx(workingctx):
2111 """A workingcommitctx object makes access to data related to
2110 """A workingcommitctx object makes access to data related to
2112 the revision being committed convenient.
2111 the revision being committed convenient.
2113
2112
2114 This hides changes in the working directory, if they aren't
2113 This hides changes in the working directory, if they aren't
2115 committed in this context.
2114 committed in this context.
2116 """
2115 """
2117 def __init__(self, repo, changes,
2116 def __init__(self, repo, changes,
2118 text="", user=None, date=None, extra=None):
2117 text="", user=None, date=None, extra=None):
2119 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2118 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2120 changes)
2119 changes)
2121
2120
2122 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2121 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2123 """Return matched files only in ``self._status``
2122 """Return matched files only in ``self._status``
2124
2123
2125 Uncommitted files appear "clean" via this context, even if
2124 Uncommitted files appear "clean" via this context, even if
2126 they aren't actually so in the working directory.
2125 they aren't actually so in the working directory.
2127 """
2126 """
2128 if clean:
2127 if clean:
2129 clean = [f for f in self._manifest if f not in self._changedset]
2128 clean = [f for f in self._manifest if f not in self._changedset]
2130 else:
2129 else:
2131 clean = []
2130 clean = []
2132 return scmutil.status([f for f in self._status.modified if match(f)],
2131 return scmutil.status([f for f in self._status.modified if match(f)],
2133 [f for f in self._status.added if match(f)],
2132 [f for f in self._status.added if match(f)],
2134 [f for f in self._status.removed if match(f)],
2133 [f for f in self._status.removed if match(f)],
2135 [], [], [], clean)
2134 [], [], [], clean)
2136
2135
2137 @propertycache
2136 @propertycache
2138 def _changedset(self):
2137 def _changedset(self):
2139 """Return the set of files changed in this context
2138 """Return the set of files changed in this context
2140 """
2139 """
2141 changed = set(self._status.modified)
2140 changed = set(self._status.modified)
2142 changed.update(self._status.added)
2141 changed.update(self._status.added)
2143 changed.update(self._status.removed)
2142 changed.update(self._status.removed)
2144 return changed
2143 return changed
2145
2144
2146 def makecachingfilectxfn(func):
2145 def makecachingfilectxfn(func):
2147 """Create a filectxfn that caches based on the path.
2146 """Create a filectxfn that caches based on the path.
2148
2147
2149 We can't use util.cachefunc because it uses all arguments as the cache
2148 We can't use util.cachefunc because it uses all arguments as the cache
2150 key and this creates a cycle since the arguments include the repo and
2149 key and this creates a cycle since the arguments include the repo and
2151 memctx.
2150 memctx.
2152 """
2151 """
2153 cache = {}
2152 cache = {}
2154
2153
2155 def getfilectx(repo, memctx, path):
2154 def getfilectx(repo, memctx, path):
2156 if path not in cache:
2155 if path not in cache:
2157 cache[path] = func(repo, memctx, path)
2156 cache[path] = func(repo, memctx, path)
2158 return cache[path]
2157 return cache[path]
2159
2158
2160 return getfilectx
2159 return getfilectx
2161
2160
2162 def memfilefromctx(ctx):
2161 def memfilefromctx(ctx):
2163 """Given a context return a memfilectx for ctx[path]
2162 """Given a context return a memfilectx for ctx[path]
2164
2163
2165 This is a convenience method for building a memctx based on another
2164 This is a convenience method for building a memctx based on another
2166 context.
2165 context.
2167 """
2166 """
2168 def getfilectx(repo, memctx, path):
2167 def getfilectx(repo, memctx, path):
2169 fctx = ctx[path]
2168 fctx = ctx[path]
2170 copied = fctx.renamed()
2169 copied = fctx.renamed()
2171 if copied:
2170 if copied:
2172 copied = copied[0]
2171 copied = copied[0]
2173 return memfilectx(repo, memctx, path, fctx.data(),
2172 return memfilectx(repo, memctx, path, fctx.data(),
2174 islink=fctx.islink(), isexec=fctx.isexec(),
2173 islink=fctx.islink(), isexec=fctx.isexec(),
2175 copied=copied)
2174 copied=copied)
2176
2175
2177 return getfilectx
2176 return getfilectx
2178
2177
2179 def memfilefrompatch(patchstore):
2178 def memfilefrompatch(patchstore):
2180 """Given a patch (e.g. patchstore object) return a memfilectx
2179 """Given a patch (e.g. patchstore object) return a memfilectx
2181
2180
2182 This is a convenience method for building a memctx based on a patchstore.
2181 This is a convenience method for building a memctx based on a patchstore.
2183 """
2182 """
2184 def getfilectx(repo, memctx, path):
2183 def getfilectx(repo, memctx, path):
2185 data, mode, copied = patchstore.getfile(path)
2184 data, mode, copied = patchstore.getfile(path)
2186 if data is None:
2185 if data is None:
2187 return None
2186 return None
2188 islink, isexec = mode
2187 islink, isexec = mode
2189 return memfilectx(repo, memctx, path, data, islink=islink,
2188 return memfilectx(repo, memctx, path, data, islink=islink,
2190 isexec=isexec, copied=copied)
2189 isexec=isexec, copied=copied)
2191
2190
2192 return getfilectx
2191 return getfilectx
2193
2192
2194 class memctx(committablectx):
2193 class memctx(committablectx):
2195 """Use memctx to perform in-memory commits via localrepo.commitctx().
2194 """Use memctx to perform in-memory commits via localrepo.commitctx().
2196
2195
2197 Revision information is supplied at initialization time while
2196 Revision information is supplied at initialization time while
2198 related files data and is made available through a callback
2197 related files data and is made available through a callback
2199 mechanism. 'repo' is the current localrepo, 'parents' is a
2198 mechanism. 'repo' is the current localrepo, 'parents' is a
2200 sequence of two parent revisions identifiers (pass None for every
2199 sequence of two parent revisions identifiers (pass None for every
2201 missing parent), 'text' is the commit message and 'files' lists
2200 missing parent), 'text' is the commit message and 'files' lists
2202 names of files touched by the revision (normalized and relative to
2201 names of files touched by the revision (normalized and relative to
2203 repository root).
2202 repository root).
2204
2203
2205 filectxfn(repo, memctx, path) is a callable receiving the
2204 filectxfn(repo, memctx, path) is a callable receiving the
2206 repository, the current memctx object and the normalized path of
2205 repository, the current memctx object and the normalized path of
2207 requested file, relative to repository root. It is fired by the
2206 requested file, relative to repository root. It is fired by the
2208 commit function for every file in 'files', but calls order is
2207 commit function for every file in 'files', but calls order is
2209 undefined. If the file is available in the revision being
2208 undefined. If the file is available in the revision being
2210 committed (updated or added), filectxfn returns a memfilectx
2209 committed (updated or added), filectxfn returns a memfilectx
2211 object. If the file was removed, filectxfn return None for recent
2210 object. If the file was removed, filectxfn return None for recent
2212 Mercurial. Moved files are represented by marking the source file
2211 Mercurial. Moved files are represented by marking the source file
2213 removed and the new file added with copy information (see
2212 removed and the new file added with copy information (see
2214 memfilectx).
2213 memfilectx).
2215
2214
2216 user receives the committer name and defaults to current
2215 user receives the committer name and defaults to current
2217 repository username, date is the commit date in any format
2216 repository username, date is the commit date in any format
2218 supported by dateutil.parsedate() and defaults to current date, extra
2217 supported by dateutil.parsedate() and defaults to current date, extra
2219 is a dictionary of metadata or is left empty.
2218 is a dictionary of metadata or is left empty.
2220 """
2219 """
2221
2220
2222 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2221 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2223 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2222 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2224 # this field to determine what to do in filectxfn.
2223 # this field to determine what to do in filectxfn.
2225 _returnnoneformissingfiles = True
2224 _returnnoneformissingfiles = True
2226
2225
2227 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2226 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2228 date=None, extra=None, branch=None, editor=False):
2227 date=None, extra=None, branch=None, editor=False):
2229 super(memctx, self).__init__(repo, text, user, date, extra)
2228 super(memctx, self).__init__(repo, text, user, date, extra)
2230 self._rev = None
2229 self._rev = None
2231 self._node = None
2230 self._node = None
2232 parents = [(p or nullid) for p in parents]
2231 parents = [(p or nullid) for p in parents]
2233 p1, p2 = parents
2232 p1, p2 = parents
2234 self._parents = [self._repo[p] for p in (p1, p2)]
2233 self._parents = [self._repo[p] for p in (p1, p2)]
2235 files = sorted(set(files))
2234 files = sorted(set(files))
2236 self._files = files
2235 self._files = files
2237 if branch is not None:
2236 if branch is not None:
2238 self._extra['branch'] = encoding.fromlocal(branch)
2237 self._extra['branch'] = encoding.fromlocal(branch)
2239 self.substate = {}
2238 self.substate = {}
2240
2239
2241 if isinstance(filectxfn, patch.filestore):
2240 if isinstance(filectxfn, patch.filestore):
2242 filectxfn = memfilefrompatch(filectxfn)
2241 filectxfn = memfilefrompatch(filectxfn)
2243 elif not callable(filectxfn):
2242 elif not callable(filectxfn):
2244 # if store is not callable, wrap it in a function
2243 # if store is not callable, wrap it in a function
2245 filectxfn = memfilefromctx(filectxfn)
2244 filectxfn = memfilefromctx(filectxfn)
2246
2245
2247 # memoizing increases performance for e.g. vcs convert scenarios.
2246 # memoizing increases performance for e.g. vcs convert scenarios.
2248 self._filectxfn = makecachingfilectxfn(filectxfn)
2247 self._filectxfn = makecachingfilectxfn(filectxfn)
2249
2248
2250 if editor:
2249 if editor:
2251 self._text = editor(self._repo, self, [])
2250 self._text = editor(self._repo, self, [])
2252 self._repo.savecommitmessage(self._text)
2251 self._repo.savecommitmessage(self._text)
2253
2252
2254 def filectx(self, path, filelog=None):
2253 def filectx(self, path, filelog=None):
2255 """get a file context from the working directory
2254 """get a file context from the working directory
2256
2255
2257 Returns None if file doesn't exist and should be removed."""
2256 Returns None if file doesn't exist and should be removed."""
2258 return self._filectxfn(self._repo, self, path)
2257 return self._filectxfn(self._repo, self, path)
2259
2258
2260 def commit(self):
2259 def commit(self):
2261 """commit context to the repo"""
2260 """commit context to the repo"""
2262 return self._repo.commitctx(self)
2261 return self._repo.commitctx(self)
2263
2262
2264 @propertycache
2263 @propertycache
2265 def _manifest(self):
2264 def _manifest(self):
2266 """generate a manifest based on the return values of filectxfn"""
2265 """generate a manifest based on the return values of filectxfn"""
2267
2266
2268 # keep this simple for now; just worry about p1
2267 # keep this simple for now; just worry about p1
2269 pctx = self._parents[0]
2268 pctx = self._parents[0]
2270 man = pctx.manifest().copy()
2269 man = pctx.manifest().copy()
2271
2270
2272 for f in self._status.modified:
2271 for f in self._status.modified:
2273 man[f] = modifiednodeid
2272 man[f] = modifiednodeid
2274
2273
2275 for f in self._status.added:
2274 for f in self._status.added:
2276 man[f] = addednodeid
2275 man[f] = addednodeid
2277
2276
2278 for f in self._status.removed:
2277 for f in self._status.removed:
2279 if f in man:
2278 if f in man:
2280 del man[f]
2279 del man[f]
2281
2280
2282 return man
2281 return man
2283
2282
2284 @propertycache
2283 @propertycache
2285 def _status(self):
2284 def _status(self):
2286 """Calculate exact status from ``files`` specified at construction
2285 """Calculate exact status from ``files`` specified at construction
2287 """
2286 """
2288 man1 = self.p1().manifest()
2287 man1 = self.p1().manifest()
2289 p2 = self._parents[1]
2288 p2 = self._parents[1]
2290 # "1 < len(self._parents)" can't be used for checking
2289 # "1 < len(self._parents)" can't be used for checking
2291 # existence of the 2nd parent, because "memctx._parents" is
2290 # existence of the 2nd parent, because "memctx._parents" is
2292 # explicitly initialized by the list, of which length is 2.
2291 # explicitly initialized by the list, of which length is 2.
2293 if p2.node() != nullid:
2292 if p2.node() != nullid:
2294 man2 = p2.manifest()
2293 man2 = p2.manifest()
2295 managing = lambda f: f in man1 or f in man2
2294 managing = lambda f: f in man1 or f in man2
2296 else:
2295 else:
2297 managing = lambda f: f in man1
2296 managing = lambda f: f in man1
2298
2297
2299 modified, added, removed = [], [], []
2298 modified, added, removed = [], [], []
2300 for f in self._files:
2299 for f in self._files:
2301 if not managing(f):
2300 if not managing(f):
2302 added.append(f)
2301 added.append(f)
2303 elif self[f]:
2302 elif self[f]:
2304 modified.append(f)
2303 modified.append(f)
2305 else:
2304 else:
2306 removed.append(f)
2305 removed.append(f)
2307
2306
2308 return scmutil.status(modified, added, removed, [], [], [], [])
2307 return scmutil.status(modified, added, removed, [], [], [], [])
2309
2308
2310 class memfilectx(committablefilectx):
2309 class memfilectx(committablefilectx):
2311 """memfilectx represents an in-memory file to commit.
2310 """memfilectx represents an in-memory file to commit.
2312
2311
2313 See memctx and committablefilectx for more details.
2312 See memctx and committablefilectx for more details.
2314 """
2313 """
2315 def __init__(self, repo, changectx, path, data, islink=False,
2314 def __init__(self, repo, changectx, path, data, islink=False,
2316 isexec=False, copied=None):
2315 isexec=False, copied=None):
2317 """
2316 """
2318 path is the normalized file path relative to repository root.
2317 path is the normalized file path relative to repository root.
2319 data is the file content as a string.
2318 data is the file content as a string.
2320 islink is True if the file is a symbolic link.
2319 islink is True if the file is a symbolic link.
2321 isexec is True if the file is executable.
2320 isexec is True if the file is executable.
2322 copied is the source file path if current file was copied in the
2321 copied is the source file path if current file was copied in the
2323 revision being committed, or None."""
2322 revision being committed, or None."""
2324 super(memfilectx, self).__init__(repo, path, None, changectx)
2323 super(memfilectx, self).__init__(repo, path, None, changectx)
2325 self._data = data
2324 self._data = data
2326 if islink:
2325 if islink:
2327 self._flags = 'l'
2326 self._flags = 'l'
2328 elif isexec:
2327 elif isexec:
2329 self._flags = 'x'
2328 self._flags = 'x'
2330 else:
2329 else:
2331 self._flags = ''
2330 self._flags = ''
2332 self._copied = None
2331 self._copied = None
2333 if copied:
2332 if copied:
2334 self._copied = (copied, nullid)
2333 self._copied = (copied, nullid)
2335
2334
2336 def cmp(self, fctx):
2335 def cmp(self, fctx):
2337 return self.data() != fctx.data()
2336 return self.data() != fctx.data()
2338
2337
2339 def data(self):
2338 def data(self):
2340 return self._data
2339 return self._data
2341
2340
2342 def remove(self, ignoremissing=False):
2341 def remove(self, ignoremissing=False):
2343 """wraps unlink for a repo's working directory"""
2342 """wraps unlink for a repo's working directory"""
2344 # need to figure out what to do here
2343 # need to figure out what to do here
2345 del self._changectx[self._path]
2344 del self._changectx[self._path]
2346
2345
2347 def write(self, data, flags, **kwargs):
2346 def write(self, data, flags, **kwargs):
2348 """wraps repo.wwrite"""
2347 """wraps repo.wwrite"""
2349 self._data = data
2348 self._data = data
2350
2349
2351
2350
2352 class metadataonlyctx(committablectx):
2351 class metadataonlyctx(committablectx):
2353 """Like memctx but it's reusing the manifest of different commit.
2352 """Like memctx but it's reusing the manifest of different commit.
2354 Intended to be used by lightweight operations that are creating
2353 Intended to be used by lightweight operations that are creating
2355 metadata-only changes.
2354 metadata-only changes.
2356
2355
2357 Revision information is supplied at initialization time. 'repo' is the
2356 Revision information is supplied at initialization time. 'repo' is the
2358 current localrepo, 'ctx' is original revision which manifest we're reuisng
2357 current localrepo, 'ctx' is original revision which manifest we're reuisng
2359 'parents' is a sequence of two parent revisions identifiers (pass None for
2358 'parents' is a sequence of two parent revisions identifiers (pass None for
2360 every missing parent), 'text' is the commit.
2359 every missing parent), 'text' is the commit.
2361
2360
2362 user receives the committer name and defaults to current repository
2361 user receives the committer name and defaults to current repository
2363 username, date is the commit date in any format supported by
2362 username, date is the commit date in any format supported by
2364 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2363 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2365 metadata or is left empty.
2364 metadata or is left empty.
2366 """
2365 """
2367 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2366 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2368 date=None, extra=None, editor=False):
2367 date=None, extra=None, editor=False):
2369 if text is None:
2368 if text is None:
2370 text = originalctx.description()
2369 text = originalctx.description()
2371 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2370 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2372 self._rev = None
2371 self._rev = None
2373 self._node = None
2372 self._node = None
2374 self._originalctx = originalctx
2373 self._originalctx = originalctx
2375 self._manifestnode = originalctx.manifestnode()
2374 self._manifestnode = originalctx.manifestnode()
2376 if parents is None:
2375 if parents is None:
2377 parents = originalctx.parents()
2376 parents = originalctx.parents()
2378 else:
2377 else:
2379 parents = [repo[p] for p in parents if p is not None]
2378 parents = [repo[p] for p in parents if p is not None]
2380 parents = parents[:]
2379 parents = parents[:]
2381 while len(parents) < 2:
2380 while len(parents) < 2:
2382 parents.append(repo[nullid])
2381 parents.append(repo[nullid])
2383 p1, p2 = self._parents = parents
2382 p1, p2 = self._parents = parents
2384
2383
2385 # sanity check to ensure that the reused manifest parents are
2384 # sanity check to ensure that the reused manifest parents are
2386 # manifests of our commit parents
2385 # manifests of our commit parents
2387 mp1, mp2 = self.manifestctx().parents
2386 mp1, mp2 = self.manifestctx().parents
2388 if p1 != nullid and p1.manifestnode() != mp1:
2387 if p1 != nullid and p1.manifestnode() != mp1:
2389 raise RuntimeError(r"can't reuse the manifest: its p1 "
2388 raise RuntimeError(r"can't reuse the manifest: its p1 "
2390 r"doesn't match the new ctx p1")
2389 r"doesn't match the new ctx p1")
2391 if p2 != nullid and p2.manifestnode() != mp2:
2390 if p2 != nullid and p2.manifestnode() != mp2:
2392 raise RuntimeError(r"can't reuse the manifest: "
2391 raise RuntimeError(r"can't reuse the manifest: "
2393 r"its p2 doesn't match the new ctx p2")
2392 r"its p2 doesn't match the new ctx p2")
2394
2393
2395 self._files = originalctx.files()
2394 self._files = originalctx.files()
2396 self.substate = {}
2395 self.substate = {}
2397
2396
2398 if editor:
2397 if editor:
2399 self._text = editor(self._repo, self, [])
2398 self._text = editor(self._repo, self, [])
2400 self._repo.savecommitmessage(self._text)
2399 self._repo.savecommitmessage(self._text)
2401
2400
2402 def manifestnode(self):
2401 def manifestnode(self):
2403 return self._manifestnode
2402 return self._manifestnode
2404
2403
2405 @property
2404 @property
2406 def _manifestctx(self):
2405 def _manifestctx(self):
2407 return self._repo.manifestlog[self._manifestnode]
2406 return self._repo.manifestlog[self._manifestnode]
2408
2407
2409 def filectx(self, path, filelog=None):
2408 def filectx(self, path, filelog=None):
2410 return self._originalctx.filectx(path, filelog=filelog)
2409 return self._originalctx.filectx(path, filelog=filelog)
2411
2410
2412 def commit(self):
2411 def commit(self):
2413 """commit context to the repo"""
2412 """commit context to the repo"""
2414 return self._repo.commitctx(self)
2413 return self._repo.commitctx(self)
2415
2414
2416 @property
2415 @property
2417 def _manifest(self):
2416 def _manifest(self):
2418 return self._originalctx.manifest()
2417 return self._originalctx.manifest()
2419
2418
2420 @propertycache
2419 @propertycache
2421 def _status(self):
2420 def _status(self):
2422 """Calculate exact status from ``files`` specified in the ``origctx``
2421 """Calculate exact status from ``files`` specified in the ``origctx``
2423 and parents manifests.
2422 and parents manifests.
2424 """
2423 """
2425 man1 = self.p1().manifest()
2424 man1 = self.p1().manifest()
2426 p2 = self._parents[1]
2425 p2 = self._parents[1]
2427 # "1 < len(self._parents)" can't be used for checking
2426 # "1 < len(self._parents)" can't be used for checking
2428 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2427 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2429 # explicitly initialized by the list, of which length is 2.
2428 # explicitly initialized by the list, of which length is 2.
2430 if p2.node() != nullid:
2429 if p2.node() != nullid:
2431 man2 = p2.manifest()
2430 man2 = p2.manifest()
2432 managing = lambda f: f in man1 or f in man2
2431 managing = lambda f: f in man1 or f in man2
2433 else:
2432 else:
2434 managing = lambda f: f in man1
2433 managing = lambda f: f in man1
2435
2434
2436 modified, added, removed = [], [], []
2435 modified, added, removed = [], [], []
2437 for f in self._files:
2436 for f in self._files:
2438 if not managing(f):
2437 if not managing(f):
2439 added.append(f)
2438 added.append(f)
2440 elif f in self:
2439 elif f in self:
2441 modified.append(f)
2440 modified.append(f)
2442 else:
2441 else:
2443 removed.append(f)
2442 removed.append(f)
2444
2443
2445 return scmutil.status(modified, added, removed, [], [], [], [])
2444 return scmutil.status(modified, added, removed, [], [], [], [])
2446
2445
2447 class arbitraryfilectx(object):
2446 class arbitraryfilectx(object):
2448 """Allows you to use filectx-like functions on a file in an arbitrary
2447 """Allows you to use filectx-like functions on a file in an arbitrary
2449 location on disk, possibly not in the working directory.
2448 location on disk, possibly not in the working directory.
2450 """
2449 """
2451 def __init__(self, path, repo=None):
2450 def __init__(self, path, repo=None):
2452 # Repo is optional because contrib/simplemerge uses this class.
2451 # Repo is optional because contrib/simplemerge uses this class.
2453 self._repo = repo
2452 self._repo = repo
2454 self._path = path
2453 self._path = path
2455
2454
2456 def cmp(self, fctx):
2455 def cmp(self, fctx):
2457 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2456 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2458 # path if either side is a symlink.
2457 # path if either side is a symlink.
2459 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2458 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2460 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2459 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2461 # Add a fast-path for merge if both sides are disk-backed.
2460 # Add a fast-path for merge if both sides are disk-backed.
2462 # Note that filecmp uses the opposite return values (True if same)
2461 # Note that filecmp uses the opposite return values (True if same)
2463 # from our cmp functions (True if different).
2462 # from our cmp functions (True if different).
2464 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2463 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2465 return self.data() != fctx.data()
2464 return self.data() != fctx.data()
2466
2465
2467 def path(self):
2466 def path(self):
2468 return self._path
2467 return self._path
2469
2468
2470 def flags(self):
2469 def flags(self):
2471 return ''
2470 return ''
2472
2471
2473 def data(self):
2472 def data(self):
2474 return util.readfile(self._path)
2473 return util.readfile(self._path)
2475
2474
2476 def decodeddata(self):
2475 def decodeddata(self):
2477 with open(self._path, "rb") as f:
2476 with open(self._path, "rb") as f:
2478 return f.read()
2477 return f.read()
2479
2478
2480 def remove(self):
2479 def remove(self):
2481 util.unlink(self._path)
2480 util.unlink(self._path)
2482
2481
2483 def write(self, data, flags, **kwargs):
2482 def write(self, data, flags, **kwargs):
2484 assert not flags
2483 assert not flags
2485 with open(self._path, "wb") as f:
2484 with open(self._path, "wb") as f:
2486 f.write(data)
2485 f.write(data)
@@ -1,920 +1,931 b''
1 # logcmdutil.py - utility for log-like commands
1 # logcmdutil.py - utility for log-like commands
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import itertools
10 import itertools
11 import os
11 import os
12 import posixpath
12
13
13 from .i18n import _
14 from .i18n import _
14 from .node import (
15 from .node import (
15 nullid,
16 nullid,
16 wdirid,
17 wdirid,
17 wdirrev,
18 wdirrev,
18 )
19 )
19
20
20 from . import (
21 from . import (
21 dagop,
22 dagop,
22 error,
23 error,
23 formatter,
24 formatter,
24 graphmod,
25 graphmod,
25 match as matchmod,
26 match as matchmod,
26 mdiff,
27 mdiff,
27 patch,
28 patch,
28 pathutil,
29 pathutil,
29 pycompat,
30 pycompat,
30 revset,
31 revset,
31 revsetlang,
32 revsetlang,
32 scmutil,
33 scmutil,
33 smartset,
34 smartset,
34 templatekw,
35 templatekw,
35 templater,
36 templater,
36 util,
37 util,
37 )
38 )
38 from .utils import (
39 from .utils import (
39 dateutil,
40 dateutil,
40 stringutil,
41 stringutil,
41 )
42 )
42
43
43 def getlimit(opts):
44 def getlimit(opts):
44 """get the log limit according to option -l/--limit"""
45 """get the log limit according to option -l/--limit"""
45 limit = opts.get('limit')
46 limit = opts.get('limit')
46 if limit:
47 if limit:
47 try:
48 try:
48 limit = int(limit)
49 limit = int(limit)
49 except ValueError:
50 except ValueError:
50 raise error.Abort(_('limit must be a positive integer'))
51 raise error.Abort(_('limit must be a positive integer'))
51 if limit <= 0:
52 if limit <= 0:
52 raise error.Abort(_('limit must be positive'))
53 raise error.Abort(_('limit must be positive'))
53 else:
54 else:
54 limit = None
55 limit = None
55 return limit
56 return limit
56
57
57 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
58 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
58 changes=None, stat=False, fp=None, graphwidth=0,
59 changes=None, stat=False, fp=None, graphwidth=0,
59 prefix='', root='', listsubrepos=False, hunksfilterfn=None):
60 prefix='', root='', listsubrepos=False, hunksfilterfn=None):
60 '''show diff or diffstat.'''
61 '''show diff or diffstat.'''
61 ctx1 = repo[node1]
62 ctx1 = repo[node1]
62 ctx2 = repo[node2]
63 ctx2 = repo[node2]
63 if root:
64 if root:
64 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
65 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
65 else:
66 else:
66 relroot = ''
67 relroot = ''
67 copysourcematch = None
68 copysourcematch = None
69 def pathfn(f):
70 return posixpath.join(prefix, f)
68 if relroot != '':
71 if relroot != '':
69 # XXX relative roots currently don't work if the root is within a
72 # XXX relative roots currently don't work if the root is within a
70 # subrepo
73 # subrepo
71 uirelroot = match.uipath(relroot)
74 uirelroot = match.uipath(relroot)
72 relroot += '/'
75 relroot += '/'
73 for matchroot in match.files():
76 for matchroot in match.files():
74 if not matchroot.startswith(relroot):
77 if not matchroot.startswith(relroot):
75 ui.warn(_('warning: %s not inside relative root %s\n') % (
78 ui.warn(_('warning: %s not inside relative root %s\n') % (
76 match.uipath(matchroot), uirelroot))
79 match.uipath(matchroot), uirelroot))
77
80
78 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
81 relrootmatch = scmutil.match(ctx2, pats=[relroot], default='path')
79 match = matchmod.intersectmatchers(match, relrootmatch)
82 match = matchmod.intersectmatchers(match, relrootmatch)
80 copysourcematch = relrootmatch
83 copysourcematch = relrootmatch
81
84
85 checkroot = (repo.ui.configbool('devel', 'all-warnings') or
86 repo.ui.configbool('devel', 'check-relroot'))
87 def pathfn(f):
88 if checkroot and not f.startswith(relroot):
89 raise AssertionError(
90 "file %s doesn't start with relroot %s" % (f, relroot))
91 return posixpath.join(prefix, f[len(relroot):])
92
82 if stat:
93 if stat:
83 diffopts = diffopts.copy(context=0, noprefix=False)
94 diffopts = diffopts.copy(context=0, noprefix=False)
84 width = 80
95 width = 80
85 if not ui.plain():
96 if not ui.plain():
86 width = ui.termwidth() - graphwidth
97 width = ui.termwidth() - graphwidth
87
98
88 chunks = ctx2.diff(ctx1, match, changes, opts=diffopts, prefix=prefix,
99 chunks = ctx2.diff(ctx1, match, changes, opts=diffopts, pathfn=pathfn,
89 relroot=relroot, copysourcematch=copysourcematch,
100 copysourcematch=copysourcematch,
90 hunksfilterfn=hunksfilterfn)
101 hunksfilterfn=hunksfilterfn)
91
102
92 if fp is not None or ui.canwritewithoutlabels():
103 if fp is not None or ui.canwritewithoutlabels():
93 out = fp or ui
104 out = fp or ui
94 if stat:
105 if stat:
95 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
106 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
96 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
107 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
97 out.write(chunk)
108 out.write(chunk)
98 else:
109 else:
99 if stat:
110 if stat:
100 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
111 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
101 else:
112 else:
102 chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
113 chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
103 opts=diffopts)
114 opts=diffopts)
104 if ui.canbatchlabeledwrites():
115 if ui.canbatchlabeledwrites():
105 def gen():
116 def gen():
106 for chunk, label in chunks:
117 for chunk, label in chunks:
107 yield ui.label(chunk, label=label)
118 yield ui.label(chunk, label=label)
108 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
119 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
109 ui.write(chunk)
120 ui.write(chunk)
110 else:
121 else:
111 for chunk, label in chunks:
122 for chunk, label in chunks:
112 ui.write(chunk, label=label)
123 ui.write(chunk, label=label)
113
124
114 if listsubrepos:
125 if listsubrepos:
115 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
126 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
116 tempnode2 = node2
127 tempnode2 = node2
117 try:
128 try:
118 if node2 is not None:
129 if node2 is not None:
119 tempnode2 = ctx2.substate[subpath][1]
130 tempnode2 = ctx2.substate[subpath][1]
120 except KeyError:
131 except KeyError:
121 # A subrepo that existed in node1 was deleted between node1 and
132 # A subrepo that existed in node1 was deleted between node1 and
122 # node2 (inclusive). Thus, ctx2's substate won't contain that
133 # node2 (inclusive). Thus, ctx2's substate won't contain that
123 # subpath. The best we can do is to ignore it.
134 # subpath. The best we can do is to ignore it.
124 tempnode2 = None
135 tempnode2 = None
125 submatch = matchmod.subdirmatcher(subpath, match)
136 submatch = matchmod.subdirmatcher(subpath, match)
126 subprefix = repo.wvfs.reljoin(prefix, subpath)
137 subprefix = repo.wvfs.reljoin(prefix, subpath)
127 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
138 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
128 stat=stat, fp=fp, prefix=subprefix)
139 stat=stat, fp=fp, prefix=subprefix)
129
140
130 class changesetdiffer(object):
141 class changesetdiffer(object):
131 """Generate diff of changeset with pre-configured filtering functions"""
142 """Generate diff of changeset with pre-configured filtering functions"""
132
143
133 def _makefilematcher(self, ctx):
144 def _makefilematcher(self, ctx):
134 return scmutil.matchall(ctx.repo())
145 return scmutil.matchall(ctx.repo())
135
146
136 def _makehunksfilter(self, ctx):
147 def _makehunksfilter(self, ctx):
137 return None
148 return None
138
149
139 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
150 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
140 repo = ctx.repo()
151 repo = ctx.repo()
141 node = ctx.node()
152 node = ctx.node()
142 prev = ctx.p1().node()
153 prev = ctx.p1().node()
143 diffordiffstat(ui, repo, diffopts, prev, node,
154 diffordiffstat(ui, repo, diffopts, prev, node,
144 match=self._makefilematcher(ctx), stat=stat,
155 match=self._makefilematcher(ctx), stat=stat,
145 graphwidth=graphwidth,
156 graphwidth=graphwidth,
146 hunksfilterfn=self._makehunksfilter(ctx))
157 hunksfilterfn=self._makehunksfilter(ctx))
147
158
148 def changesetlabels(ctx):
159 def changesetlabels(ctx):
149 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
160 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
150 if ctx.obsolete():
161 if ctx.obsolete():
151 labels.append('changeset.obsolete')
162 labels.append('changeset.obsolete')
152 if ctx.isunstable():
163 if ctx.isunstable():
153 labels.append('changeset.unstable')
164 labels.append('changeset.unstable')
154 for instability in ctx.instabilities():
165 for instability in ctx.instabilities():
155 labels.append('instability.%s' % instability)
166 labels.append('instability.%s' % instability)
156 return ' '.join(labels)
167 return ' '.join(labels)
157
168
158 class changesetprinter(object):
169 class changesetprinter(object):
159 '''show changeset information when templating not requested.'''
170 '''show changeset information when templating not requested.'''
160
171
161 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
172 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
162 self.ui = ui
173 self.ui = ui
163 self.repo = repo
174 self.repo = repo
164 self.buffered = buffered
175 self.buffered = buffered
165 self._differ = differ or changesetdiffer()
176 self._differ = differ or changesetdiffer()
166 self._diffopts = patch.diffallopts(ui, diffopts)
177 self._diffopts = patch.diffallopts(ui, diffopts)
167 self._includestat = diffopts and diffopts.get('stat')
178 self._includestat = diffopts and diffopts.get('stat')
168 self._includediff = diffopts and diffopts.get('patch')
179 self._includediff = diffopts and diffopts.get('patch')
169 self.header = {}
180 self.header = {}
170 self.hunk = {}
181 self.hunk = {}
171 self.lastheader = None
182 self.lastheader = None
172 self.footer = None
183 self.footer = None
173 self._columns = templatekw.getlogcolumns()
184 self._columns = templatekw.getlogcolumns()
174
185
175 def flush(self, ctx):
186 def flush(self, ctx):
176 rev = ctx.rev()
187 rev = ctx.rev()
177 if rev in self.header:
188 if rev in self.header:
178 h = self.header[rev]
189 h = self.header[rev]
179 if h != self.lastheader:
190 if h != self.lastheader:
180 self.lastheader = h
191 self.lastheader = h
181 self.ui.write(h)
192 self.ui.write(h)
182 del self.header[rev]
193 del self.header[rev]
183 if rev in self.hunk:
194 if rev in self.hunk:
184 self.ui.write(self.hunk[rev])
195 self.ui.write(self.hunk[rev])
185 del self.hunk[rev]
196 del self.hunk[rev]
186
197
187 def close(self):
198 def close(self):
188 if self.footer:
199 if self.footer:
189 self.ui.write(self.footer)
200 self.ui.write(self.footer)
190
201
191 def show(self, ctx, copies=None, **props):
202 def show(self, ctx, copies=None, **props):
192 props = pycompat.byteskwargs(props)
203 props = pycompat.byteskwargs(props)
193 if self.buffered:
204 if self.buffered:
194 self.ui.pushbuffer(labeled=True)
205 self.ui.pushbuffer(labeled=True)
195 self._show(ctx, copies, props)
206 self._show(ctx, copies, props)
196 self.hunk[ctx.rev()] = self.ui.popbuffer()
207 self.hunk[ctx.rev()] = self.ui.popbuffer()
197 else:
208 else:
198 self._show(ctx, copies, props)
209 self._show(ctx, copies, props)
199
210
200 def _show(self, ctx, copies, props):
211 def _show(self, ctx, copies, props):
201 '''show a single changeset or file revision'''
212 '''show a single changeset or file revision'''
202 changenode = ctx.node()
213 changenode = ctx.node()
203 graphwidth = props.get('graphwidth', 0)
214 graphwidth = props.get('graphwidth', 0)
204
215
205 if self.ui.quiet:
216 if self.ui.quiet:
206 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
217 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
207 label='log.node')
218 label='log.node')
208 return
219 return
209
220
210 columns = self._columns
221 columns = self._columns
211 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
222 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
212 label=changesetlabels(ctx))
223 label=changesetlabels(ctx))
213
224
214 # branches are shown first before any other names due to backwards
225 # branches are shown first before any other names due to backwards
215 # compatibility
226 # compatibility
216 branch = ctx.branch()
227 branch = ctx.branch()
217 # don't show the default branch name
228 # don't show the default branch name
218 if branch != 'default':
229 if branch != 'default':
219 self.ui.write(columns['branch'] % branch, label='log.branch')
230 self.ui.write(columns['branch'] % branch, label='log.branch')
220
231
221 for nsname, ns in self.repo.names.iteritems():
232 for nsname, ns in self.repo.names.iteritems():
222 # branches has special logic already handled above, so here we just
233 # branches has special logic already handled above, so here we just
223 # skip it
234 # skip it
224 if nsname == 'branches':
235 if nsname == 'branches':
225 continue
236 continue
226 # we will use the templatename as the color name since those two
237 # we will use the templatename as the color name since those two
227 # should be the same
238 # should be the same
228 for name in ns.names(self.repo, changenode):
239 for name in ns.names(self.repo, changenode):
229 self.ui.write(ns.logfmt % name,
240 self.ui.write(ns.logfmt % name,
230 label='log.%s' % ns.colorname)
241 label='log.%s' % ns.colorname)
231 if self.ui.debugflag:
242 if self.ui.debugflag:
232 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
243 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
233 for pctx in scmutil.meaningfulparents(self.repo, ctx):
244 for pctx in scmutil.meaningfulparents(self.repo, ctx):
234 label = 'log.parent changeset.%s' % pctx.phasestr()
245 label = 'log.parent changeset.%s' % pctx.phasestr()
235 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
246 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
236 label=label)
247 label=label)
237
248
238 if self.ui.debugflag:
249 if self.ui.debugflag:
239 mnode = ctx.manifestnode()
250 mnode = ctx.manifestnode()
240 if mnode is None:
251 if mnode is None:
241 mnode = wdirid
252 mnode = wdirid
242 mrev = wdirrev
253 mrev = wdirrev
243 else:
254 else:
244 mrev = self.repo.manifestlog.rev(mnode)
255 mrev = self.repo.manifestlog.rev(mnode)
245 self.ui.write(columns['manifest']
256 self.ui.write(columns['manifest']
246 % scmutil.formatrevnode(self.ui, mrev, mnode),
257 % scmutil.formatrevnode(self.ui, mrev, mnode),
247 label='ui.debug log.manifest')
258 label='ui.debug log.manifest')
248 self.ui.write(columns['user'] % ctx.user(), label='log.user')
259 self.ui.write(columns['user'] % ctx.user(), label='log.user')
249 self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
260 self.ui.write(columns['date'] % dateutil.datestr(ctx.date()),
250 label='log.date')
261 label='log.date')
251
262
252 if ctx.isunstable():
263 if ctx.isunstable():
253 instabilities = ctx.instabilities()
264 instabilities = ctx.instabilities()
254 self.ui.write(columns['instability'] % ', '.join(instabilities),
265 self.ui.write(columns['instability'] % ', '.join(instabilities),
255 label='log.instability')
266 label='log.instability')
256
267
257 elif ctx.obsolete():
268 elif ctx.obsolete():
258 self._showobsfate(ctx)
269 self._showobsfate(ctx)
259
270
260 self._exthook(ctx)
271 self._exthook(ctx)
261
272
262 if self.ui.debugflag:
273 if self.ui.debugflag:
263 files = ctx.p1().status(ctx)[:3]
274 files = ctx.p1().status(ctx)[:3]
264 for key, value in zip(['files', 'files+', 'files-'], files):
275 for key, value in zip(['files', 'files+', 'files-'], files):
265 if value:
276 if value:
266 self.ui.write(columns[key] % " ".join(value),
277 self.ui.write(columns[key] % " ".join(value),
267 label='ui.debug log.files')
278 label='ui.debug log.files')
268 elif ctx.files() and self.ui.verbose:
279 elif ctx.files() and self.ui.verbose:
269 self.ui.write(columns['files'] % " ".join(ctx.files()),
280 self.ui.write(columns['files'] % " ".join(ctx.files()),
270 label='ui.note log.files')
281 label='ui.note log.files')
271 if copies and self.ui.verbose:
282 if copies and self.ui.verbose:
272 copies = ['%s (%s)' % c for c in copies]
283 copies = ['%s (%s)' % c for c in copies]
273 self.ui.write(columns['copies'] % ' '.join(copies),
284 self.ui.write(columns['copies'] % ' '.join(copies),
274 label='ui.note log.copies')
285 label='ui.note log.copies')
275
286
276 extra = ctx.extra()
287 extra = ctx.extra()
277 if extra and self.ui.debugflag:
288 if extra and self.ui.debugflag:
278 for key, value in sorted(extra.items()):
289 for key, value in sorted(extra.items()):
279 self.ui.write(columns['extra']
290 self.ui.write(columns['extra']
280 % (key, stringutil.escapestr(value)),
291 % (key, stringutil.escapestr(value)),
281 label='ui.debug log.extra')
292 label='ui.debug log.extra')
282
293
283 description = ctx.description().strip()
294 description = ctx.description().strip()
284 if description:
295 if description:
285 if self.ui.verbose:
296 if self.ui.verbose:
286 self.ui.write(_("description:\n"),
297 self.ui.write(_("description:\n"),
287 label='ui.note log.description')
298 label='ui.note log.description')
288 self.ui.write(description,
299 self.ui.write(description,
289 label='ui.note log.description')
300 label='ui.note log.description')
290 self.ui.write("\n\n")
301 self.ui.write("\n\n")
291 else:
302 else:
292 self.ui.write(columns['summary'] % description.splitlines()[0],
303 self.ui.write(columns['summary'] % description.splitlines()[0],
293 label='log.summary')
304 label='log.summary')
294 self.ui.write("\n")
305 self.ui.write("\n")
295
306
296 self._showpatch(ctx, graphwidth)
307 self._showpatch(ctx, graphwidth)
297
308
298 def _showobsfate(self, ctx):
309 def _showobsfate(self, ctx):
299 # TODO: do not depend on templater
310 # TODO: do not depend on templater
300 tres = formatter.templateresources(self.repo.ui, self.repo)
311 tres = formatter.templateresources(self.repo.ui, self.repo)
301 t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
312 t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}',
302 defaults=templatekw.keywords,
313 defaults=templatekw.keywords,
303 resources=tres)
314 resources=tres)
304 obsfate = t.renderdefault({'ctx': ctx}).splitlines()
315 obsfate = t.renderdefault({'ctx': ctx}).splitlines()
305
316
306 if obsfate:
317 if obsfate:
307 for obsfateline in obsfate:
318 for obsfateline in obsfate:
308 self.ui.write(self._columns['obsolete'] % obsfateline,
319 self.ui.write(self._columns['obsolete'] % obsfateline,
309 label='log.obsfate')
320 label='log.obsfate')
310
321
311 def _exthook(self, ctx):
322 def _exthook(self, ctx):
312 '''empty method used by extension as a hook point
323 '''empty method used by extension as a hook point
313 '''
324 '''
314
325
315 def _showpatch(self, ctx, graphwidth=0):
326 def _showpatch(self, ctx, graphwidth=0):
316 if self._includestat:
327 if self._includestat:
317 self._differ.showdiff(self.ui, ctx, self._diffopts,
328 self._differ.showdiff(self.ui, ctx, self._diffopts,
318 graphwidth, stat=True)
329 graphwidth, stat=True)
319 if self._includestat and self._includediff:
330 if self._includestat and self._includediff:
320 self.ui.write("\n")
331 self.ui.write("\n")
321 if self._includediff:
332 if self._includediff:
322 self._differ.showdiff(self.ui, ctx, self._diffopts,
333 self._differ.showdiff(self.ui, ctx, self._diffopts,
323 graphwidth, stat=False)
334 graphwidth, stat=False)
324 if self._includestat or self._includediff:
335 if self._includestat or self._includediff:
325 self.ui.write("\n")
336 self.ui.write("\n")
326
337
327 class changesetformatter(changesetprinter):
338 class changesetformatter(changesetprinter):
328 """Format changeset information by generic formatter"""
339 """Format changeset information by generic formatter"""
329
340
330 def __init__(self, ui, repo, fm, differ=None, diffopts=None,
341 def __init__(self, ui, repo, fm, differ=None, diffopts=None,
331 buffered=False):
342 buffered=False):
332 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
343 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
333 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
344 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
334 self._fm = fm
345 self._fm = fm
335
346
336 def close(self):
347 def close(self):
337 self._fm.end()
348 self._fm.end()
338
349
339 def _show(self, ctx, copies, props):
350 def _show(self, ctx, copies, props):
340 '''show a single changeset or file revision'''
351 '''show a single changeset or file revision'''
341 fm = self._fm
352 fm = self._fm
342 fm.startitem()
353 fm.startitem()
343 fm.context(ctx=ctx)
354 fm.context(ctx=ctx)
344 fm.data(rev=scmutil.intrev(ctx),
355 fm.data(rev=scmutil.intrev(ctx),
345 node=fm.hexfunc(scmutil.binnode(ctx)))
356 node=fm.hexfunc(scmutil.binnode(ctx)))
346
357
347 if self.ui.quiet:
358 if self.ui.quiet:
348 return
359 return
349
360
350 fm.data(branch=ctx.branch(),
361 fm.data(branch=ctx.branch(),
351 phase=ctx.phasestr(),
362 phase=ctx.phasestr(),
352 user=ctx.user(),
363 user=ctx.user(),
353 date=fm.formatdate(ctx.date()),
364 date=fm.formatdate(ctx.date()),
354 desc=ctx.description(),
365 desc=ctx.description(),
355 bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'),
366 bookmarks=fm.formatlist(ctx.bookmarks(), name='bookmark'),
356 tags=fm.formatlist(ctx.tags(), name='tag'),
367 tags=fm.formatlist(ctx.tags(), name='tag'),
357 parents=fm.formatlist([fm.hexfunc(c.node())
368 parents=fm.formatlist([fm.hexfunc(c.node())
358 for c in ctx.parents()], name='node'))
369 for c in ctx.parents()], name='node'))
359
370
360 if self.ui.debugflag:
371 if self.ui.debugflag:
361 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
372 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
362 extra=fm.formatdict(ctx.extra()))
373 extra=fm.formatdict(ctx.extra()))
363
374
364 files = ctx.p1().status(ctx)
375 files = ctx.p1().status(ctx)
365 fm.data(modified=fm.formatlist(files[0], name='file'),
376 fm.data(modified=fm.formatlist(files[0], name='file'),
366 added=fm.formatlist(files[1], name='file'),
377 added=fm.formatlist(files[1], name='file'),
367 removed=fm.formatlist(files[2], name='file'))
378 removed=fm.formatlist(files[2], name='file'))
368
379
369 elif self.ui.verbose:
380 elif self.ui.verbose:
370 fm.data(files=fm.formatlist(ctx.files(), name='file'))
381 fm.data(files=fm.formatlist(ctx.files(), name='file'))
371 if copies:
382 if copies:
372 fm.data(copies=fm.formatdict(copies,
383 fm.data(copies=fm.formatdict(copies,
373 key='name', value='source'))
384 key='name', value='source'))
374
385
375 if self._includestat:
386 if self._includestat:
376 self.ui.pushbuffer()
387 self.ui.pushbuffer()
377 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
388 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
378 fm.data(diffstat=self.ui.popbuffer())
389 fm.data(diffstat=self.ui.popbuffer())
379 if self._includediff:
390 if self._includediff:
380 self.ui.pushbuffer()
391 self.ui.pushbuffer()
381 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
392 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
382 fm.data(diff=self.ui.popbuffer())
393 fm.data(diff=self.ui.popbuffer())
383
394
384 class changesettemplater(changesetprinter):
395 class changesettemplater(changesetprinter):
385 '''format changeset information.
396 '''format changeset information.
386
397
387 Note: there are a variety of convenience functions to build a
398 Note: there are a variety of convenience functions to build a
388 changesettemplater for common cases. See functions such as:
399 changesettemplater for common cases. See functions such as:
389 maketemplater, changesetdisplayer, buildcommittemplate, or other
400 maketemplater, changesetdisplayer, buildcommittemplate, or other
390 functions that use changesest_templater.
401 functions that use changesest_templater.
391 '''
402 '''
392
403
393 # Arguments before "buffered" used to be positional. Consider not
404 # Arguments before "buffered" used to be positional. Consider not
394 # adding/removing arguments before "buffered" to not break callers.
405 # adding/removing arguments before "buffered" to not break callers.
395 def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
406 def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
396 buffered=False):
407 buffered=False):
397 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
408 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
398 # tres is shared with _graphnodeformatter()
409 # tres is shared with _graphnodeformatter()
399 self._tresources = tres = formatter.templateresources(ui, repo)
410 self._tresources = tres = formatter.templateresources(ui, repo)
400 self.t = formatter.loadtemplater(ui, tmplspec,
411 self.t = formatter.loadtemplater(ui, tmplspec,
401 defaults=templatekw.keywords,
412 defaults=templatekw.keywords,
402 resources=tres,
413 resources=tres,
403 cache=templatekw.defaulttempl)
414 cache=templatekw.defaulttempl)
404 self._counter = itertools.count()
415 self._counter = itertools.count()
405
416
406 self._tref = tmplspec.ref
417 self._tref = tmplspec.ref
407 self._parts = {'header': '', 'footer': '',
418 self._parts = {'header': '', 'footer': '',
408 tmplspec.ref: tmplspec.ref,
419 tmplspec.ref: tmplspec.ref,
409 'docheader': '', 'docfooter': '',
420 'docheader': '', 'docfooter': '',
410 'separator': ''}
421 'separator': ''}
411 if tmplspec.mapfile:
422 if tmplspec.mapfile:
412 # find correct templates for current mode, for backward
423 # find correct templates for current mode, for backward
413 # compatibility with 'log -v/-q/--debug' using a mapfile
424 # compatibility with 'log -v/-q/--debug' using a mapfile
414 tmplmodes = [
425 tmplmodes = [
415 (True, ''),
426 (True, ''),
416 (self.ui.verbose, '_verbose'),
427 (self.ui.verbose, '_verbose'),
417 (self.ui.quiet, '_quiet'),
428 (self.ui.quiet, '_quiet'),
418 (self.ui.debugflag, '_debug'),
429 (self.ui.debugflag, '_debug'),
419 ]
430 ]
420 for mode, postfix in tmplmodes:
431 for mode, postfix in tmplmodes:
421 for t in self._parts:
432 for t in self._parts:
422 cur = t + postfix
433 cur = t + postfix
423 if mode and cur in self.t:
434 if mode and cur in self.t:
424 self._parts[t] = cur
435 self._parts[t] = cur
425 else:
436 else:
426 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
437 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
427 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
438 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
428 self._parts.update(m)
439 self._parts.update(m)
429
440
430 if self._parts['docheader']:
441 if self._parts['docheader']:
431 self.ui.write(self.t.render(self._parts['docheader'], {}))
442 self.ui.write(self.t.render(self._parts['docheader'], {}))
432
443
433 def close(self):
444 def close(self):
434 if self._parts['docfooter']:
445 if self._parts['docfooter']:
435 if not self.footer:
446 if not self.footer:
436 self.footer = ""
447 self.footer = ""
437 self.footer += self.t.render(self._parts['docfooter'], {})
448 self.footer += self.t.render(self._parts['docfooter'], {})
438 return super(changesettemplater, self).close()
449 return super(changesettemplater, self).close()
439
450
440 def _show(self, ctx, copies, props):
451 def _show(self, ctx, copies, props):
441 '''show a single changeset or file revision'''
452 '''show a single changeset or file revision'''
442 props = props.copy()
453 props = props.copy()
443 props['ctx'] = ctx
454 props['ctx'] = ctx
444 props['index'] = index = next(self._counter)
455 props['index'] = index = next(self._counter)
445 props['revcache'] = {'copies': copies}
456 props['revcache'] = {'copies': copies}
446 graphwidth = props.get('graphwidth', 0)
457 graphwidth = props.get('graphwidth', 0)
447
458
448 # write separator, which wouldn't work well with the header part below
459 # write separator, which wouldn't work well with the header part below
449 # since there's inherently a conflict between header (across items) and
460 # since there's inherently a conflict between header (across items) and
450 # separator (per item)
461 # separator (per item)
451 if self._parts['separator'] and index > 0:
462 if self._parts['separator'] and index > 0:
452 self.ui.write(self.t.render(self._parts['separator'], {}))
463 self.ui.write(self.t.render(self._parts['separator'], {}))
453
464
454 # write header
465 # write header
455 if self._parts['header']:
466 if self._parts['header']:
456 h = self.t.render(self._parts['header'], props)
467 h = self.t.render(self._parts['header'], props)
457 if self.buffered:
468 if self.buffered:
458 self.header[ctx.rev()] = h
469 self.header[ctx.rev()] = h
459 else:
470 else:
460 if self.lastheader != h:
471 if self.lastheader != h:
461 self.lastheader = h
472 self.lastheader = h
462 self.ui.write(h)
473 self.ui.write(h)
463
474
464 # write changeset metadata, then patch if requested
475 # write changeset metadata, then patch if requested
465 key = self._parts[self._tref]
476 key = self._parts[self._tref]
466 self.ui.write(self.t.render(key, props))
477 self.ui.write(self.t.render(key, props))
467 self._showpatch(ctx, graphwidth)
478 self._showpatch(ctx, graphwidth)
468
479
469 if self._parts['footer']:
480 if self._parts['footer']:
470 if not self.footer:
481 if not self.footer:
471 self.footer = self.t.render(self._parts['footer'], props)
482 self.footer = self.t.render(self._parts['footer'], props)
472
483
473 def templatespec(tmpl, mapfile):
484 def templatespec(tmpl, mapfile):
474 if pycompat.ispy3:
485 if pycompat.ispy3:
475 assert not isinstance(tmpl, str), 'tmpl must not be a str'
486 assert not isinstance(tmpl, str), 'tmpl must not be a str'
476 if mapfile:
487 if mapfile:
477 return formatter.templatespec('changeset', tmpl, mapfile)
488 return formatter.templatespec('changeset', tmpl, mapfile)
478 else:
489 else:
479 return formatter.templatespec('', tmpl, None)
490 return formatter.templatespec('', tmpl, None)
480
491
481 def _lookuptemplate(ui, tmpl, style):
492 def _lookuptemplate(ui, tmpl, style):
482 """Find the template matching the given template spec or style
493 """Find the template matching the given template spec or style
483
494
484 See formatter.lookuptemplate() for details.
495 See formatter.lookuptemplate() for details.
485 """
496 """
486
497
487 # ui settings
498 # ui settings
488 if not tmpl and not style: # template are stronger than style
499 if not tmpl and not style: # template are stronger than style
489 tmpl = ui.config('ui', 'logtemplate')
500 tmpl = ui.config('ui', 'logtemplate')
490 if tmpl:
501 if tmpl:
491 return templatespec(templater.unquotestring(tmpl), None)
502 return templatespec(templater.unquotestring(tmpl), None)
492 else:
503 else:
493 style = util.expandpath(ui.config('ui', 'style'))
504 style = util.expandpath(ui.config('ui', 'style'))
494
505
495 if not tmpl and style:
506 if not tmpl and style:
496 mapfile = style
507 mapfile = style
497 if not os.path.split(mapfile)[0]:
508 if not os.path.split(mapfile)[0]:
498 mapname = (templater.templatepath('map-cmdline.' + mapfile)
509 mapname = (templater.templatepath('map-cmdline.' + mapfile)
499 or templater.templatepath(mapfile))
510 or templater.templatepath(mapfile))
500 if mapname:
511 if mapname:
501 mapfile = mapname
512 mapfile = mapname
502 return templatespec(None, mapfile)
513 return templatespec(None, mapfile)
503
514
504 if not tmpl:
515 if not tmpl:
505 return templatespec(None, None)
516 return templatespec(None, None)
506
517
507 return formatter.lookuptemplate(ui, 'changeset', tmpl)
518 return formatter.lookuptemplate(ui, 'changeset', tmpl)
508
519
509 def maketemplater(ui, repo, tmpl, buffered=False):
520 def maketemplater(ui, repo, tmpl, buffered=False):
510 """Create a changesettemplater from a literal template 'tmpl'
521 """Create a changesettemplater from a literal template 'tmpl'
511 byte-string."""
522 byte-string."""
512 spec = templatespec(tmpl, None)
523 spec = templatespec(tmpl, None)
513 return changesettemplater(ui, repo, spec, buffered=buffered)
524 return changesettemplater(ui, repo, spec, buffered=buffered)
514
525
515 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
526 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
516 """show one changeset using template or regular display.
527 """show one changeset using template or regular display.
517
528
518 Display format will be the first non-empty hit of:
529 Display format will be the first non-empty hit of:
519 1. option 'template'
530 1. option 'template'
520 2. option 'style'
531 2. option 'style'
521 3. [ui] setting 'logtemplate'
532 3. [ui] setting 'logtemplate'
522 4. [ui] setting 'style'
533 4. [ui] setting 'style'
523 If all of these values are either the unset or the empty string,
534 If all of these values are either the unset or the empty string,
524 regular display via changesetprinter() is done.
535 regular display via changesetprinter() is done.
525 """
536 """
526 postargs = (differ, opts, buffered)
537 postargs = (differ, opts, buffered)
527 if opts.get('template') == 'json':
538 if opts.get('template') == 'json':
528 fm = ui.formatter('log', opts)
539 fm = ui.formatter('log', opts)
529 return changesetformatter(ui, repo, fm, *postargs)
540 return changesetformatter(ui, repo, fm, *postargs)
530
541
531 spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
542 spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
532
543
533 if not spec.ref and not spec.tmpl and not spec.mapfile:
544 if not spec.ref and not spec.tmpl and not spec.mapfile:
534 return changesetprinter(ui, repo, *postargs)
545 return changesetprinter(ui, repo, *postargs)
535
546
536 return changesettemplater(ui, repo, spec, *postargs)
547 return changesettemplater(ui, repo, spec, *postargs)
537
548
538 def _makematcher(repo, revs, pats, opts):
549 def _makematcher(repo, revs, pats, opts):
539 """Build matcher and expanded patterns from log options
550 """Build matcher and expanded patterns from log options
540
551
541 If --follow, revs are the revisions to follow from.
552 If --follow, revs are the revisions to follow from.
542
553
543 Returns (match, pats, slowpath) where
554 Returns (match, pats, slowpath) where
544 - match: a matcher built from the given pats and -I/-X opts
555 - match: a matcher built from the given pats and -I/-X opts
545 - pats: patterns used (globs are expanded on Windows)
556 - pats: patterns used (globs are expanded on Windows)
546 - slowpath: True if patterns aren't as simple as scanning filelogs
557 - slowpath: True if patterns aren't as simple as scanning filelogs
547 """
558 """
548 # pats/include/exclude are passed to match.match() directly in
559 # pats/include/exclude are passed to match.match() directly in
549 # _matchfiles() revset but walkchangerevs() builds its matcher with
560 # _matchfiles() revset but walkchangerevs() builds its matcher with
550 # scmutil.match(). The difference is input pats are globbed on
561 # scmutil.match(). The difference is input pats are globbed on
551 # platforms without shell expansion (windows).
562 # platforms without shell expansion (windows).
552 wctx = repo[None]
563 wctx = repo[None]
553 match, pats = scmutil.matchandpats(wctx, pats, opts)
564 match, pats = scmutil.matchandpats(wctx, pats, opts)
554 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
565 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
555 if not slowpath:
566 if not slowpath:
556 follow = opts.get('follow') or opts.get('follow_first')
567 follow = opts.get('follow') or opts.get('follow_first')
557 startctxs = []
568 startctxs = []
558 if follow and opts.get('rev'):
569 if follow and opts.get('rev'):
559 startctxs = [repo[r] for r in revs]
570 startctxs = [repo[r] for r in revs]
560 for f in match.files():
571 for f in match.files():
561 if follow and startctxs:
572 if follow and startctxs:
562 # No idea if the path was a directory at that revision, so
573 # No idea if the path was a directory at that revision, so
563 # take the slow path.
574 # take the slow path.
564 if any(f not in c for c in startctxs):
575 if any(f not in c for c in startctxs):
565 slowpath = True
576 slowpath = True
566 continue
577 continue
567 elif follow and f not in wctx:
578 elif follow and f not in wctx:
568 # If the file exists, it may be a directory, so let it
579 # If the file exists, it may be a directory, so let it
569 # take the slow path.
580 # take the slow path.
570 if os.path.exists(repo.wjoin(f)):
581 if os.path.exists(repo.wjoin(f)):
571 slowpath = True
582 slowpath = True
572 continue
583 continue
573 else:
584 else:
574 raise error.Abort(_('cannot follow file not in parent '
585 raise error.Abort(_('cannot follow file not in parent '
575 'revision: "%s"') % f)
586 'revision: "%s"') % f)
576 filelog = repo.file(f)
587 filelog = repo.file(f)
577 if not filelog:
588 if not filelog:
578 # A zero count may be a directory or deleted file, so
589 # A zero count may be a directory or deleted file, so
579 # try to find matching entries on the slow path.
590 # try to find matching entries on the slow path.
580 if follow:
591 if follow:
581 raise error.Abort(
592 raise error.Abort(
582 _('cannot follow nonexistent file: "%s"') % f)
593 _('cannot follow nonexistent file: "%s"') % f)
583 slowpath = True
594 slowpath = True
584
595
585 # We decided to fall back to the slowpath because at least one
596 # We decided to fall back to the slowpath because at least one
586 # of the paths was not a file. Check to see if at least one of them
597 # of the paths was not a file. Check to see if at least one of them
587 # existed in history - in that case, we'll continue down the
598 # existed in history - in that case, we'll continue down the
588 # slowpath; otherwise, we can turn off the slowpath
599 # slowpath; otherwise, we can turn off the slowpath
589 if slowpath:
600 if slowpath:
590 for path in match.files():
601 for path in match.files():
591 if path == '.' or path in repo.store:
602 if path == '.' or path in repo.store:
592 break
603 break
593 else:
604 else:
594 slowpath = False
605 slowpath = False
595
606
596 return match, pats, slowpath
607 return match, pats, slowpath
597
608
598 def _fileancestors(repo, revs, match, followfirst):
609 def _fileancestors(repo, revs, match, followfirst):
599 fctxs = []
610 fctxs = []
600 for r in revs:
611 for r in revs:
601 ctx = repo[r]
612 ctx = repo[r]
602 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
613 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
603
614
604 # When displaying a revision with --patch --follow FILE, we have
615 # When displaying a revision with --patch --follow FILE, we have
605 # to know which file of the revision must be diffed. With
616 # to know which file of the revision must be diffed. With
606 # --follow, we want the names of the ancestors of FILE in the
617 # --follow, we want the names of the ancestors of FILE in the
607 # revision, stored in "fcache". "fcache" is populated as a side effect
618 # revision, stored in "fcache". "fcache" is populated as a side effect
608 # of the graph traversal.
619 # of the graph traversal.
609 fcache = {}
620 fcache = {}
610 def filematcher(ctx):
621 def filematcher(ctx):
611 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
622 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
612
623
613 def revgen():
624 def revgen():
614 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
625 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
615 fcache[rev] = [c.path() for c in cs]
626 fcache[rev] = [c.path() for c in cs]
616 yield rev
627 yield rev
617 return smartset.generatorset(revgen(), iterasc=False), filematcher
628 return smartset.generatorset(revgen(), iterasc=False), filematcher
618
629
619 def _makenofollowfilematcher(repo, pats, opts):
630 def _makenofollowfilematcher(repo, pats, opts):
620 '''hook for extensions to override the filematcher for non-follow cases'''
631 '''hook for extensions to override the filematcher for non-follow cases'''
621 return None
632 return None
622
633
623 _opt2logrevset = {
634 _opt2logrevset = {
624 'no_merges': ('not merge()', None),
635 'no_merges': ('not merge()', None),
625 'only_merges': ('merge()', None),
636 'only_merges': ('merge()', None),
626 '_matchfiles': (None, '_matchfiles(%ps)'),
637 '_matchfiles': (None, '_matchfiles(%ps)'),
627 'date': ('date(%s)', None),
638 'date': ('date(%s)', None),
628 'branch': ('branch(%s)', '%lr'),
639 'branch': ('branch(%s)', '%lr'),
629 '_patslog': ('filelog(%s)', '%lr'),
640 '_patslog': ('filelog(%s)', '%lr'),
630 'keyword': ('keyword(%s)', '%lr'),
641 'keyword': ('keyword(%s)', '%lr'),
631 'prune': ('ancestors(%s)', 'not %lr'),
642 'prune': ('ancestors(%s)', 'not %lr'),
632 'user': ('user(%s)', '%lr'),
643 'user': ('user(%s)', '%lr'),
633 }
644 }
634
645
635 def _makerevset(repo, match, pats, slowpath, opts):
646 def _makerevset(repo, match, pats, slowpath, opts):
636 """Return a revset string built from log options and file patterns"""
647 """Return a revset string built from log options and file patterns"""
637 opts = dict(opts)
648 opts = dict(opts)
638 # follow or not follow?
649 # follow or not follow?
639 follow = opts.get('follow') or opts.get('follow_first')
650 follow = opts.get('follow') or opts.get('follow_first')
640
651
641 # branch and only_branch are really aliases and must be handled at
652 # branch and only_branch are really aliases and must be handled at
642 # the same time
653 # the same time
643 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
654 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
644 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
655 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
645
656
646 if slowpath:
657 if slowpath:
647 # See walkchangerevs() slow path.
658 # See walkchangerevs() slow path.
648 #
659 #
649 # pats/include/exclude cannot be represented as separate
660 # pats/include/exclude cannot be represented as separate
650 # revset expressions as their filtering logic applies at file
661 # revset expressions as their filtering logic applies at file
651 # level. For instance "-I a -X b" matches a revision touching
662 # level. For instance "-I a -X b" matches a revision touching
652 # "a" and "b" while "file(a) and not file(b)" does
663 # "a" and "b" while "file(a) and not file(b)" does
653 # not. Besides, filesets are evaluated against the working
664 # not. Besides, filesets are evaluated against the working
654 # directory.
665 # directory.
655 matchargs = ['r:', 'd:relpath']
666 matchargs = ['r:', 'd:relpath']
656 for p in pats:
667 for p in pats:
657 matchargs.append('p:' + p)
668 matchargs.append('p:' + p)
658 for p in opts.get('include', []):
669 for p in opts.get('include', []):
659 matchargs.append('i:' + p)
670 matchargs.append('i:' + p)
660 for p in opts.get('exclude', []):
671 for p in opts.get('exclude', []):
661 matchargs.append('x:' + p)
672 matchargs.append('x:' + p)
662 opts['_matchfiles'] = matchargs
673 opts['_matchfiles'] = matchargs
663 elif not follow:
674 elif not follow:
664 opts['_patslog'] = list(pats)
675 opts['_patslog'] = list(pats)
665
676
666 expr = []
677 expr = []
667 for op, val in sorted(opts.iteritems()):
678 for op, val in sorted(opts.iteritems()):
668 if not val:
679 if not val:
669 continue
680 continue
670 if op not in _opt2logrevset:
681 if op not in _opt2logrevset:
671 continue
682 continue
672 revop, listop = _opt2logrevset[op]
683 revop, listop = _opt2logrevset[op]
673 if revop and '%' not in revop:
684 if revop and '%' not in revop:
674 expr.append(revop)
685 expr.append(revop)
675 elif not listop:
686 elif not listop:
676 expr.append(revsetlang.formatspec(revop, val))
687 expr.append(revsetlang.formatspec(revop, val))
677 else:
688 else:
678 if revop:
689 if revop:
679 val = [revsetlang.formatspec(revop, v) for v in val]
690 val = [revsetlang.formatspec(revop, v) for v in val]
680 expr.append(revsetlang.formatspec(listop, val))
691 expr.append(revsetlang.formatspec(listop, val))
681
692
682 if expr:
693 if expr:
683 expr = '(' + ' and '.join(expr) + ')'
694 expr = '(' + ' and '.join(expr) + ')'
684 else:
695 else:
685 expr = None
696 expr = None
686 return expr
697 return expr
687
698
688 def _initialrevs(repo, opts):
699 def _initialrevs(repo, opts):
689 """Return the initial set of revisions to be filtered or followed"""
700 """Return the initial set of revisions to be filtered or followed"""
690 follow = opts.get('follow') or opts.get('follow_first')
701 follow = opts.get('follow') or opts.get('follow_first')
691 if opts.get('rev'):
702 if opts.get('rev'):
692 revs = scmutil.revrange(repo, opts['rev'])
703 revs = scmutil.revrange(repo, opts['rev'])
693 elif follow and repo.dirstate.p1() == nullid:
704 elif follow and repo.dirstate.p1() == nullid:
694 revs = smartset.baseset()
705 revs = smartset.baseset()
695 elif follow:
706 elif follow:
696 revs = repo.revs('.')
707 revs = repo.revs('.')
697 else:
708 else:
698 revs = smartset.spanset(repo)
709 revs = smartset.spanset(repo)
699 revs.reverse()
710 revs.reverse()
700 return revs
711 return revs
701
712
702 def getrevs(repo, pats, opts):
713 def getrevs(repo, pats, opts):
703 """Return (revs, differ) where revs is a smartset
714 """Return (revs, differ) where revs is a smartset
704
715
705 differ is a changesetdiffer with pre-configured file matcher.
716 differ is a changesetdiffer with pre-configured file matcher.
706 """
717 """
707 follow = opts.get('follow') or opts.get('follow_first')
718 follow = opts.get('follow') or opts.get('follow_first')
708 followfirst = opts.get('follow_first')
719 followfirst = opts.get('follow_first')
709 limit = getlimit(opts)
720 limit = getlimit(opts)
710 revs = _initialrevs(repo, opts)
721 revs = _initialrevs(repo, opts)
711 if not revs:
722 if not revs:
712 return smartset.baseset(), None
723 return smartset.baseset(), None
713 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
724 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
714 filematcher = None
725 filematcher = None
715 if follow:
726 if follow:
716 if slowpath or match.always():
727 if slowpath or match.always():
717 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
728 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
718 else:
729 else:
719 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
730 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
720 revs.reverse()
731 revs.reverse()
721 if filematcher is None:
732 if filematcher is None:
722 filematcher = _makenofollowfilematcher(repo, pats, opts)
733 filematcher = _makenofollowfilematcher(repo, pats, opts)
723 if filematcher is None:
734 if filematcher is None:
724 def filematcher(ctx):
735 def filematcher(ctx):
725 return match
736 return match
726
737
727 expr = _makerevset(repo, match, pats, slowpath, opts)
738 expr = _makerevset(repo, match, pats, slowpath, opts)
728 if opts.get('graph') and opts.get('rev'):
739 if opts.get('graph') and opts.get('rev'):
729 # User-specified revs might be unsorted, but don't sort before
740 # User-specified revs might be unsorted, but don't sort before
730 # _makerevset because it might depend on the order of revs
741 # _makerevset because it might depend on the order of revs
731 if not (revs.isdescending() or revs.istopo()):
742 if not (revs.isdescending() or revs.istopo()):
732 revs.sort(reverse=True)
743 revs.sort(reverse=True)
733 if expr:
744 if expr:
734 matcher = revset.match(None, expr)
745 matcher = revset.match(None, expr)
735 revs = matcher(repo, revs)
746 revs = matcher(repo, revs)
736 if limit is not None:
747 if limit is not None:
737 revs = revs.slice(0, limit)
748 revs = revs.slice(0, limit)
738
749
739 differ = changesetdiffer()
750 differ = changesetdiffer()
740 differ._makefilematcher = filematcher
751 differ._makefilematcher = filematcher
741 return revs, differ
752 return revs, differ
742
753
743 def _parselinerangeopt(repo, opts):
754 def _parselinerangeopt(repo, opts):
744 """Parse --line-range log option and return a list of tuples (filename,
755 """Parse --line-range log option and return a list of tuples (filename,
745 (fromline, toline)).
756 (fromline, toline)).
746 """
757 """
747 linerangebyfname = []
758 linerangebyfname = []
748 for pat in opts.get('line_range', []):
759 for pat in opts.get('line_range', []):
749 try:
760 try:
750 pat, linerange = pat.rsplit(',', 1)
761 pat, linerange = pat.rsplit(',', 1)
751 except ValueError:
762 except ValueError:
752 raise error.Abort(_('malformatted line-range pattern %s') % pat)
763 raise error.Abort(_('malformatted line-range pattern %s') % pat)
753 try:
764 try:
754 fromline, toline = map(int, linerange.split(':'))
765 fromline, toline = map(int, linerange.split(':'))
755 except ValueError:
766 except ValueError:
756 raise error.Abort(_("invalid line range for %s") % pat)
767 raise error.Abort(_("invalid line range for %s") % pat)
757 msg = _("line range pattern '%s' must match exactly one file") % pat
768 msg = _("line range pattern '%s' must match exactly one file") % pat
758 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
769 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
759 linerangebyfname.append(
770 linerangebyfname.append(
760 (fname, util.processlinerange(fromline, toline)))
771 (fname, util.processlinerange(fromline, toline)))
761 return linerangebyfname
772 return linerangebyfname
762
773
763 def getlinerangerevs(repo, userrevs, opts):
774 def getlinerangerevs(repo, userrevs, opts):
764 """Return (revs, differ).
775 """Return (revs, differ).
765
776
766 "revs" are revisions obtained by processing "line-range" log options and
777 "revs" are revisions obtained by processing "line-range" log options and
767 walking block ancestors of each specified file/line-range.
778 walking block ancestors of each specified file/line-range.
768
779
769 "differ" is a changesetdiffer with pre-configured file matcher and hunks
780 "differ" is a changesetdiffer with pre-configured file matcher and hunks
770 filter.
781 filter.
771 """
782 """
772 wctx = repo[None]
783 wctx = repo[None]
773
784
774 # Two-levels map of "rev -> file ctx -> [line range]".
785 # Two-levels map of "rev -> file ctx -> [line range]".
775 linerangesbyrev = {}
786 linerangesbyrev = {}
776 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
787 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
777 if fname not in wctx:
788 if fname not in wctx:
778 raise error.Abort(_('cannot follow file not in parent '
789 raise error.Abort(_('cannot follow file not in parent '
779 'revision: "%s"') % fname)
790 'revision: "%s"') % fname)
780 fctx = wctx.filectx(fname)
791 fctx = wctx.filectx(fname)
781 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
792 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
782 rev = fctx.introrev()
793 rev = fctx.introrev()
783 if rev not in userrevs:
794 if rev not in userrevs:
784 continue
795 continue
785 linerangesbyrev.setdefault(
796 linerangesbyrev.setdefault(
786 rev, {}).setdefault(
797 rev, {}).setdefault(
787 fctx.path(), []).append(linerange)
798 fctx.path(), []).append(linerange)
788
799
789 def nofilterhunksfn(fctx, hunks):
800 def nofilterhunksfn(fctx, hunks):
790 return hunks
801 return hunks
791
802
792 def hunksfilter(ctx):
803 def hunksfilter(ctx):
793 fctxlineranges = linerangesbyrev.get(ctx.rev())
804 fctxlineranges = linerangesbyrev.get(ctx.rev())
794 if fctxlineranges is None:
805 if fctxlineranges is None:
795 return nofilterhunksfn
806 return nofilterhunksfn
796
807
797 def filterfn(fctx, hunks):
808 def filterfn(fctx, hunks):
798 lineranges = fctxlineranges.get(fctx.path())
809 lineranges = fctxlineranges.get(fctx.path())
799 if lineranges is not None:
810 if lineranges is not None:
800 for hr, lines in hunks:
811 for hr, lines in hunks:
801 if hr is None: # binary
812 if hr is None: # binary
802 yield hr, lines
813 yield hr, lines
803 continue
814 continue
804 if any(mdiff.hunkinrange(hr[2:], lr)
815 if any(mdiff.hunkinrange(hr[2:], lr)
805 for lr in lineranges):
816 for lr in lineranges):
806 yield hr, lines
817 yield hr, lines
807 else:
818 else:
808 for hunk in hunks:
819 for hunk in hunks:
809 yield hunk
820 yield hunk
810
821
811 return filterfn
822 return filterfn
812
823
813 def filematcher(ctx):
824 def filematcher(ctx):
814 files = list(linerangesbyrev.get(ctx.rev(), []))
825 files = list(linerangesbyrev.get(ctx.rev(), []))
815 return scmutil.matchfiles(repo, files)
826 return scmutil.matchfiles(repo, files)
816
827
817 revs = sorted(linerangesbyrev, reverse=True)
828 revs = sorted(linerangesbyrev, reverse=True)
818
829
819 differ = changesetdiffer()
830 differ = changesetdiffer()
820 differ._makefilematcher = filematcher
831 differ._makefilematcher = filematcher
821 differ._makehunksfilter = hunksfilter
832 differ._makehunksfilter = hunksfilter
822 return revs, differ
833 return revs, differ
823
834
824 def _graphnodeformatter(ui, displayer):
835 def _graphnodeformatter(ui, displayer):
825 spec = ui.config('ui', 'graphnodetemplate')
836 spec = ui.config('ui', 'graphnodetemplate')
826 if not spec:
837 if not spec:
827 return templatekw.getgraphnode # fast path for "{graphnode}"
838 return templatekw.getgraphnode # fast path for "{graphnode}"
828
839
829 spec = templater.unquotestring(spec)
840 spec = templater.unquotestring(spec)
830 if isinstance(displayer, changesettemplater):
841 if isinstance(displayer, changesettemplater):
831 # reuse cache of slow templates
842 # reuse cache of slow templates
832 tres = displayer._tresources
843 tres = displayer._tresources
833 else:
844 else:
834 tres = formatter.templateresources(ui)
845 tres = formatter.templateresources(ui)
835 templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
846 templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
836 resources=tres)
847 resources=tres)
837 def formatnode(repo, ctx):
848 def formatnode(repo, ctx):
838 props = {'ctx': ctx, 'repo': repo}
849 props = {'ctx': ctx, 'repo': repo}
839 return templ.renderdefault(props)
850 return templ.renderdefault(props)
840 return formatnode
851 return formatnode
841
852
842 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
853 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
843 props = props or {}
854 props = props or {}
844 formatnode = _graphnodeformatter(ui, displayer)
855 formatnode = _graphnodeformatter(ui, displayer)
845 state = graphmod.asciistate()
856 state = graphmod.asciistate()
846 styles = state['styles']
857 styles = state['styles']
847
858
848 # only set graph styling if HGPLAIN is not set.
859 # only set graph styling if HGPLAIN is not set.
849 if ui.plain('graph'):
860 if ui.plain('graph'):
850 # set all edge styles to |, the default pre-3.8 behaviour
861 # set all edge styles to |, the default pre-3.8 behaviour
851 styles.update(dict.fromkeys(styles, '|'))
862 styles.update(dict.fromkeys(styles, '|'))
852 else:
863 else:
853 edgetypes = {
864 edgetypes = {
854 'parent': graphmod.PARENT,
865 'parent': graphmod.PARENT,
855 'grandparent': graphmod.GRANDPARENT,
866 'grandparent': graphmod.GRANDPARENT,
856 'missing': graphmod.MISSINGPARENT
867 'missing': graphmod.MISSINGPARENT
857 }
868 }
858 for name, key in edgetypes.items():
869 for name, key in edgetypes.items():
859 # experimental config: experimental.graphstyle.*
870 # experimental config: experimental.graphstyle.*
860 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
871 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
861 styles[key])
872 styles[key])
862 if not styles[key]:
873 if not styles[key]:
863 styles[key] = None
874 styles[key] = None
864
875
865 # experimental config: experimental.graphshorten
876 # experimental config: experimental.graphshorten
866 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
877 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
867
878
868 for rev, type, ctx, parents in dag:
879 for rev, type, ctx, parents in dag:
869 char = formatnode(repo, ctx)
880 char = formatnode(repo, ctx)
870 copies = None
881 copies = None
871 if getrenamed and ctx.rev():
882 if getrenamed and ctx.rev():
872 copies = []
883 copies = []
873 for fn in ctx.files():
884 for fn in ctx.files():
874 rename = getrenamed(fn, ctx.rev())
885 rename = getrenamed(fn, ctx.rev())
875 if rename:
886 if rename:
876 copies.append((fn, rename))
887 copies.append((fn, rename))
877 edges = edgefn(type, char, state, rev, parents)
888 edges = edgefn(type, char, state, rev, parents)
878 firstedge = next(edges)
889 firstedge = next(edges)
879 width = firstedge[2]
890 width = firstedge[2]
880 displayer.show(ctx, copies=copies,
891 displayer.show(ctx, copies=copies,
881 graphwidth=width, **pycompat.strkwargs(props))
892 graphwidth=width, **pycompat.strkwargs(props))
882 lines = displayer.hunk.pop(rev).split('\n')
893 lines = displayer.hunk.pop(rev).split('\n')
883 if not lines[-1]:
894 if not lines[-1]:
884 del lines[-1]
895 del lines[-1]
885 displayer.flush(ctx)
896 displayer.flush(ctx)
886 for type, char, width, coldata in itertools.chain([firstedge], edges):
897 for type, char, width, coldata in itertools.chain([firstedge], edges):
887 graphmod.ascii(ui, state, type, char, lines, coldata)
898 graphmod.ascii(ui, state, type, char, lines, coldata)
888 lines = []
899 lines = []
889 displayer.close()
900 displayer.close()
890
901
891 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
902 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
892 revdag = graphmod.dagwalker(repo, revs)
903 revdag = graphmod.dagwalker(repo, revs)
893 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
904 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
894
905
895 def displayrevs(ui, repo, revs, displayer, getrenamed):
906 def displayrevs(ui, repo, revs, displayer, getrenamed):
896 for rev in revs:
907 for rev in revs:
897 ctx = repo[rev]
908 ctx = repo[rev]
898 copies = None
909 copies = None
899 if getrenamed is not None and rev:
910 if getrenamed is not None and rev:
900 copies = []
911 copies = []
901 for fn in ctx.files():
912 for fn in ctx.files():
902 rename = getrenamed(fn, rev)
913 rename = getrenamed(fn, rev)
903 if rename:
914 if rename:
904 copies.append((fn, rename))
915 copies.append((fn, rename))
905 displayer.show(ctx, copies=copies)
916 displayer.show(ctx, copies=copies)
906 displayer.flush(ctx)
917 displayer.flush(ctx)
907 displayer.close()
918 displayer.close()
908
919
909 def checkunsupportedgraphflags(pats, opts):
920 def checkunsupportedgraphflags(pats, opts):
910 for op in ["newest_first"]:
921 for op in ["newest_first"]:
911 if op in opts and opts[op]:
922 if op in opts and opts[op]:
912 raise error.Abort(_("-G/--graph option is incompatible with --%s")
923 raise error.Abort(_("-G/--graph option is incompatible with --%s")
913 % op.replace("_", "-"))
924 % op.replace("_", "-"))
914
925
915 def graphrevs(repo, nodes, opts):
926 def graphrevs(repo, nodes, opts):
916 limit = getlimit(opts)
927 limit = getlimit(opts)
917 nodes.reverse()
928 nodes.reverse()
918 if limit is not None:
929 if limit is not None:
919 nodes = nodes[:limit]
930 nodes = nodes[:limit]
920 return graphmod.nodes(repo, nodes)
931 return graphmod.nodes(repo, nodes)
@@ -1,2861 +1,2850 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
19 import re
18 import re
20 import shutil
19 import shutil
21 import zlib
20 import zlib
22
21
23 from .i18n import _
22 from .i18n import _
24 from .node import (
23 from .node import (
25 hex,
24 hex,
26 short,
25 short,
27 )
26 )
28 from . import (
27 from . import (
29 copies,
28 copies,
30 diffhelper,
29 diffhelper,
31 diffutil,
30 diffutil,
32 encoding,
31 encoding,
33 error,
32 error,
34 mail,
33 mail,
35 mdiff,
34 mdiff,
36 pathutil,
35 pathutil,
37 pycompat,
36 pycompat,
38 scmutil,
37 scmutil,
39 similar,
38 similar,
40 util,
39 util,
41 vfs as vfsmod,
40 vfs as vfsmod,
42 )
41 )
43 from .utils import (
42 from .utils import (
44 dateutil,
43 dateutil,
45 procutil,
44 procutil,
46 stringutil,
45 stringutil,
47 )
46 )
48
47
49 stringio = util.stringio
48 stringio = util.stringio
50
49
51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55
54
56 PatchError = error.PatchError
55 PatchError = error.PatchError
57
56
58 # public functions
57 # public functions
59
58
60 def split(stream):
59 def split(stream):
61 '''return an iterator of individual patches from a stream'''
60 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
61 def isheader(line, inheader):
63 if inheader and line.startswith((' ', '\t')):
62 if inheader and line.startswith((' ', '\t')):
64 # continuation
63 # continuation
65 return True
64 return True
66 if line.startswith((' ', '-', '+')):
65 if line.startswith((' ', '-', '+')):
67 # diff line - don't check for header pattern in there
66 # diff line - don't check for header pattern in there
68 return False
67 return False
69 l = line.split(': ', 1)
68 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
69 return len(l) == 2 and ' ' not in l[0]
71
70
72 def chunk(lines):
71 def chunk(lines):
73 return stringio(''.join(lines))
72 return stringio(''.join(lines))
74
73
75 def hgsplit(stream, cur):
74 def hgsplit(stream, cur):
76 inheader = True
75 inheader = True
77
76
78 for line in stream:
77 for line in stream:
79 if not line.strip():
78 if not line.strip():
80 inheader = False
79 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
80 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
81 yield chunk(cur)
83 cur = []
82 cur = []
84 inheader = True
83 inheader = True
85
84
86 cur.append(line)
85 cur.append(line)
87
86
88 if cur:
87 if cur:
89 yield chunk(cur)
88 yield chunk(cur)
90
89
91 def mboxsplit(stream, cur):
90 def mboxsplit(stream, cur):
92 for line in stream:
91 for line in stream:
93 if line.startswith('From '):
92 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
95 yield c
94 yield c
96 cur = []
95 cur = []
97
96
98 cur.append(line)
97 cur.append(line)
99
98
100 if cur:
99 if cur:
101 for c in split(chunk(cur[1:])):
100 for c in split(chunk(cur[1:])):
102 yield c
101 yield c
103
102
104 def mimesplit(stream, cur):
103 def mimesplit(stream, cur):
105 def msgfp(m):
104 def msgfp(m):
106 fp = stringio()
105 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
106 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
107 g.flatten(m)
109 fp.seek(0)
108 fp.seek(0)
110 return fp
109 return fp
111
110
112 for line in stream:
111 for line in stream:
113 cur.append(line)
112 cur.append(line)
114 c = chunk(cur)
113 c = chunk(cur)
115
114
116 m = mail.parse(c)
115 m = mail.parse(c)
117 if not m.is_multipart():
116 if not m.is_multipart():
118 yield msgfp(m)
117 yield msgfp(m)
119 else:
118 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
120 for part in m.walk():
122 ct = part.get_content_type()
121 ct = part.get_content_type()
123 if ct not in ok_types:
122 if ct not in ok_types:
124 continue
123 continue
125 yield msgfp(part)
124 yield msgfp(part)
126
125
127 def headersplit(stream, cur):
126 def headersplit(stream, cur):
128 inheader = False
127 inheader = False
129
128
130 for line in stream:
129 for line in stream:
131 if not inheader and isheader(line, inheader):
130 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
131 yield chunk(cur)
133 cur = []
132 cur = []
134 inheader = True
133 inheader = True
135 if inheader and not isheader(line, inheader):
134 if inheader and not isheader(line, inheader):
136 inheader = False
135 inheader = False
137
136
138 cur.append(line)
137 cur.append(line)
139
138
140 if cur:
139 if cur:
141 yield chunk(cur)
140 yield chunk(cur)
142
141
143 def remainder(cur):
142 def remainder(cur):
144 yield chunk(cur)
143 yield chunk(cur)
145
144
146 class fiter(object):
145 class fiter(object):
147 def __init__(self, fp):
146 def __init__(self, fp):
148 self.fp = fp
147 self.fp = fp
149
148
150 def __iter__(self):
149 def __iter__(self):
151 return self
150 return self
152
151
153 def next(self):
152 def next(self):
154 l = self.fp.readline()
153 l = self.fp.readline()
155 if not l:
154 if not l:
156 raise StopIteration
155 raise StopIteration
157 return l
156 return l
158
157
159 __next__ = next
158 __next__ = next
160
159
161 inheader = False
160 inheader = False
162 cur = []
161 cur = []
163
162
164 mimeheaders = ['content-type']
163 mimeheaders = ['content-type']
165
164
166 if not util.safehasattr(stream, 'next'):
165 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
166 # http responses, for example, have readline but not next
168 stream = fiter(stream)
167 stream = fiter(stream)
169
168
170 for line in stream:
169 for line in stream:
171 cur.append(line)
170 cur.append(line)
172 if line.startswith('# HG changeset patch'):
171 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
172 return hgsplit(stream, cur)
174 elif line.startswith('From '):
173 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
174 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
175 elif isheader(line, inheader):
177 inheader = True
176 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
177 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
178 # let email parser handle this
180 return mimesplit(stream, cur)
179 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
180 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
181 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
182 return headersplit(stream, cur)
184 # Not enough info, keep reading
183 # Not enough info, keep reading
185
184
186 # if we are here, we have a very plain patch
185 # if we are here, we have a very plain patch
187 return remainder(cur)
186 return remainder(cur)
188
187
189 ## Some facility for extensible patch parsing:
188 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
189 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
190 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
191 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
192 ('Node ID', 'nodeid'),
194 ]
193 ]
195
194
196 @contextlib.contextmanager
195 @contextlib.contextmanager
197 def extract(ui, fileobj):
196 def extract(ui, fileobj):
198 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
199
198
200 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
201
200
202 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
203 - filename,
202 - filename,
204 - message,
203 - message,
205 - user,
204 - user,
206 - date,
205 - date,
207 - branch,
206 - branch,
208 - node,
207 - node,
209 - p1,
208 - p1,
210 - p2.
209 - p2.
211 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
212 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
213
212
214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, r'wb')
214 tmpfp = os.fdopen(fd, r'wb')
216 try:
215 try:
217 yield _extract(ui, fileobj, tmpname, tmpfp)
216 yield _extract(ui, fileobj, tmpname, tmpfp)
218 finally:
217 finally:
219 tmpfp.close()
218 tmpfp.close()
220 os.unlink(tmpname)
219 os.unlink(tmpname)
221
220
222 def _extract(ui, fileobj, tmpname, tmpfp):
221 def _extract(ui, fileobj, tmpname, tmpfp):
223
222
224 # attempt to detect the start of a patch
223 # attempt to detect the start of a patch
225 # (this heuristic is borrowed from quilt)
224 # (this heuristic is borrowed from quilt)
226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 br'---[ \t].*?^\+\+\+[ \t]|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
229 br'\*\*\*[ \t].*?^---[ \t])',
228 br'\*\*\*[ \t].*?^---[ \t])',
230 re.MULTILINE | re.DOTALL)
229 re.MULTILINE | re.DOTALL)
231
230
232 data = {}
231 data = {}
233
232
234 msg = mail.parse(fileobj)
233 msg = mail.parse(fileobj)
235
234
236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 if not subject and not data['user']:
237 if not subject and not data['user']:
239 # Not an email, restore parsed headers if any
238 # Not an email, restore parsed headers if any
240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 for h in msg.items()) + '\n'
240 for h in msg.items()) + '\n'
242
241
243 # should try to parse msg['Date']
242 # should try to parse msg['Date']
244 parents = []
243 parents = []
245
244
246 if subject:
245 if subject:
247 if subject.startswith('[PATCH'):
246 if subject.startswith('[PATCH'):
248 pend = subject.find(']')
247 pend = subject.find(']')
249 if pend >= 0:
248 if pend >= 0:
250 subject = subject[pend + 1:].lstrip()
249 subject = subject[pend + 1:].lstrip()
251 subject = re.sub(br'\n[ \t]+', ' ', subject)
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 ui.debug('Subject: %s\n' % subject)
251 ui.debug('Subject: %s\n' % subject)
253 if data['user']:
252 if data['user']:
254 ui.debug('From: %s\n' % data['user'])
253 ui.debug('From: %s\n' % data['user'])
255 diffs_seen = 0
254 diffs_seen = 0
256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 message = ''
256 message = ''
258 for part in msg.walk():
257 for part in msg.walk():
259 content_type = pycompat.bytestr(part.get_content_type())
258 content_type = pycompat.bytestr(part.get_content_type())
260 ui.debug('Content-Type: %s\n' % content_type)
259 ui.debug('Content-Type: %s\n' % content_type)
261 if content_type not in ok_types:
260 if content_type not in ok_types:
262 continue
261 continue
263 payload = part.get_payload(decode=True)
262 payload = part.get_payload(decode=True)
264 m = diffre.search(payload)
263 m = diffre.search(payload)
265 if m:
264 if m:
266 hgpatch = False
265 hgpatch = False
267 hgpatchheader = False
266 hgpatchheader = False
268 ignoretext = False
267 ignoretext = False
269
268
270 ui.debug('found patch at byte %d\n' % m.start(0))
269 ui.debug('found patch at byte %d\n' % m.start(0))
271 diffs_seen += 1
270 diffs_seen += 1
272 cfp = stringio()
271 cfp = stringio()
273 for line in payload[:m.start(0)].splitlines():
272 for line in payload[:m.start(0)].splitlines():
274 if line.startswith('# HG changeset patch') and not hgpatch:
273 if line.startswith('# HG changeset patch') and not hgpatch:
275 ui.debug('patch generated by hg export\n')
274 ui.debug('patch generated by hg export\n')
276 hgpatch = True
275 hgpatch = True
277 hgpatchheader = True
276 hgpatchheader = True
278 # drop earlier commit message content
277 # drop earlier commit message content
279 cfp.seek(0)
278 cfp.seek(0)
280 cfp.truncate()
279 cfp.truncate()
281 subject = None
280 subject = None
282 elif hgpatchheader:
281 elif hgpatchheader:
283 if line.startswith('# User '):
282 if line.startswith('# User '):
284 data['user'] = line[7:]
283 data['user'] = line[7:]
285 ui.debug('From: %s\n' % data['user'])
284 ui.debug('From: %s\n' % data['user'])
286 elif line.startswith("# Parent "):
285 elif line.startswith("# Parent "):
287 parents.append(line[9:].lstrip())
286 parents.append(line[9:].lstrip())
288 elif line.startswith("# "):
287 elif line.startswith("# "):
289 for header, key in patchheadermap:
288 for header, key in patchheadermap:
290 prefix = '# %s ' % header
289 prefix = '# %s ' % header
291 if line.startswith(prefix):
290 if line.startswith(prefix):
292 data[key] = line[len(prefix):]
291 data[key] = line[len(prefix):]
293 else:
292 else:
294 hgpatchheader = False
293 hgpatchheader = False
295 elif line == '---':
294 elif line == '---':
296 ignoretext = True
295 ignoretext = True
297 if not hgpatchheader and not ignoretext:
296 if not hgpatchheader and not ignoretext:
298 cfp.write(line)
297 cfp.write(line)
299 cfp.write('\n')
298 cfp.write('\n')
300 message = cfp.getvalue()
299 message = cfp.getvalue()
301 if tmpfp:
300 if tmpfp:
302 tmpfp.write(payload)
301 tmpfp.write(payload)
303 if not payload.endswith('\n'):
302 if not payload.endswith('\n'):
304 tmpfp.write('\n')
303 tmpfp.write('\n')
305 elif not diffs_seen and message and content_type == 'text/plain':
304 elif not diffs_seen and message and content_type == 'text/plain':
306 message += '\n' + payload
305 message += '\n' + payload
307
306
308 if subject and not message.startswith(subject):
307 if subject and not message.startswith(subject):
309 message = '%s\n%s' % (subject, message)
308 message = '%s\n%s' % (subject, message)
310 data['message'] = message
309 data['message'] = message
311 tmpfp.close()
310 tmpfp.close()
312 if parents:
311 if parents:
313 data['p1'] = parents.pop(0)
312 data['p1'] = parents.pop(0)
314 if parents:
313 if parents:
315 data['p2'] = parents.pop(0)
314 data['p2'] = parents.pop(0)
316
315
317 if diffs_seen:
316 if diffs_seen:
318 data['filename'] = tmpname
317 data['filename'] = tmpname
319
318
320 return data
319 return data
321
320
322 class patchmeta(object):
321 class patchmeta(object):
323 """Patched file metadata
322 """Patched file metadata
324
323
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 or COPY. 'path' is patched file path. 'oldpath' is set to the
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 origin file when 'op' is either COPY or RENAME, None otherwise. If
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 file mode is changed, 'mode' is a tuple (islink, isexec) where
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 'islink' is True if the file is a symlink and 'isexec' is True if
328 'islink' is True if the file is a symlink and 'isexec' is True if
330 the file is executable. Otherwise, 'mode' is None.
329 the file is executable. Otherwise, 'mode' is None.
331 """
330 """
332 def __init__(self, path):
331 def __init__(self, path):
333 self.path = path
332 self.path = path
334 self.oldpath = None
333 self.oldpath = None
335 self.mode = None
334 self.mode = None
336 self.op = 'MODIFY'
335 self.op = 'MODIFY'
337 self.binary = False
336 self.binary = False
338
337
339 def setmode(self, mode):
338 def setmode(self, mode):
340 islink = mode & 0o20000
339 islink = mode & 0o20000
341 isexec = mode & 0o100
340 isexec = mode & 0o100
342 self.mode = (islink, isexec)
341 self.mode = (islink, isexec)
343
342
344 def copy(self):
343 def copy(self):
345 other = patchmeta(self.path)
344 other = patchmeta(self.path)
346 other.oldpath = self.oldpath
345 other.oldpath = self.oldpath
347 other.mode = self.mode
346 other.mode = self.mode
348 other.op = self.op
347 other.op = self.op
349 other.binary = self.binary
348 other.binary = self.binary
350 return other
349 return other
351
350
352 def _ispatchinga(self, afile):
351 def _ispatchinga(self, afile):
353 if afile == '/dev/null':
352 if afile == '/dev/null':
354 return self.op == 'ADD'
353 return self.op == 'ADD'
355 return afile == 'a/' + (self.oldpath or self.path)
354 return afile == 'a/' + (self.oldpath or self.path)
356
355
357 def _ispatchingb(self, bfile):
356 def _ispatchingb(self, bfile):
358 if bfile == '/dev/null':
357 if bfile == '/dev/null':
359 return self.op == 'DELETE'
358 return self.op == 'DELETE'
360 return bfile == 'b/' + self.path
359 return bfile == 'b/' + self.path
361
360
362 def ispatching(self, afile, bfile):
361 def ispatching(self, afile, bfile):
363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364
363
365 def __repr__(self):
364 def __repr__(self):
366 return r"<patchmeta %s %r>" % (self.op, self.path)
365 return r"<patchmeta %s %r>" % (self.op, self.path)
367
366
368 def readgitpatch(lr):
367 def readgitpatch(lr):
369 """extract git-style metadata about patches from <patchname>"""
368 """extract git-style metadata about patches from <patchname>"""
370
369
371 # Filter patch for git information
370 # Filter patch for git information
372 gp = None
371 gp = None
373 gitpatches = []
372 gitpatches = []
374 for line in lr:
373 for line in lr:
375 line = line.rstrip(' \r\n')
374 line = line.rstrip(' \r\n')
376 if line.startswith('diff --git a/'):
375 if line.startswith('diff --git a/'):
377 m = gitre.match(line)
376 m = gitre.match(line)
378 if m:
377 if m:
379 if gp:
378 if gp:
380 gitpatches.append(gp)
379 gitpatches.append(gp)
381 dst = m.group(2)
380 dst = m.group(2)
382 gp = patchmeta(dst)
381 gp = patchmeta(dst)
383 elif gp:
382 elif gp:
384 if line.startswith('--- '):
383 if line.startswith('--- '):
385 gitpatches.append(gp)
384 gitpatches.append(gp)
386 gp = None
385 gp = None
387 continue
386 continue
388 if line.startswith('rename from '):
387 if line.startswith('rename from '):
389 gp.op = 'RENAME'
388 gp.op = 'RENAME'
390 gp.oldpath = line[12:]
389 gp.oldpath = line[12:]
391 elif line.startswith('rename to '):
390 elif line.startswith('rename to '):
392 gp.path = line[10:]
391 gp.path = line[10:]
393 elif line.startswith('copy from '):
392 elif line.startswith('copy from '):
394 gp.op = 'COPY'
393 gp.op = 'COPY'
395 gp.oldpath = line[10:]
394 gp.oldpath = line[10:]
396 elif line.startswith('copy to '):
395 elif line.startswith('copy to '):
397 gp.path = line[8:]
396 gp.path = line[8:]
398 elif line.startswith('deleted file'):
397 elif line.startswith('deleted file'):
399 gp.op = 'DELETE'
398 gp.op = 'DELETE'
400 elif line.startswith('new file mode '):
399 elif line.startswith('new file mode '):
401 gp.op = 'ADD'
400 gp.op = 'ADD'
402 gp.setmode(int(line[-6:], 8))
401 gp.setmode(int(line[-6:], 8))
403 elif line.startswith('new mode '):
402 elif line.startswith('new mode '):
404 gp.setmode(int(line[-6:], 8))
403 gp.setmode(int(line[-6:], 8))
405 elif line.startswith('GIT binary patch'):
404 elif line.startswith('GIT binary patch'):
406 gp.binary = True
405 gp.binary = True
407 if gp:
406 if gp:
408 gitpatches.append(gp)
407 gitpatches.append(gp)
409
408
410 return gitpatches
409 return gitpatches
411
410
412 class linereader(object):
411 class linereader(object):
413 # simple class to allow pushing lines back into the input stream
412 # simple class to allow pushing lines back into the input stream
414 def __init__(self, fp):
413 def __init__(self, fp):
415 self.fp = fp
414 self.fp = fp
416 self.buf = []
415 self.buf = []
417
416
418 def push(self, line):
417 def push(self, line):
419 if line is not None:
418 if line is not None:
420 self.buf.append(line)
419 self.buf.append(line)
421
420
422 def readline(self):
421 def readline(self):
423 if self.buf:
422 if self.buf:
424 l = self.buf[0]
423 l = self.buf[0]
425 del self.buf[0]
424 del self.buf[0]
426 return l
425 return l
427 return self.fp.readline()
426 return self.fp.readline()
428
427
429 def __iter__(self):
428 def __iter__(self):
430 return iter(self.readline, '')
429 return iter(self.readline, '')
431
430
432 class abstractbackend(object):
431 class abstractbackend(object):
433 def __init__(self, ui):
432 def __init__(self, ui):
434 self.ui = ui
433 self.ui = ui
435
434
436 def getfile(self, fname):
435 def getfile(self, fname):
437 """Return target file data and flags as a (data, (islink,
436 """Return target file data and flags as a (data, (islink,
438 isexec)) tuple. Data is None if file is missing/deleted.
437 isexec)) tuple. Data is None if file is missing/deleted.
439 """
438 """
440 raise NotImplementedError
439 raise NotImplementedError
441
440
442 def setfile(self, fname, data, mode, copysource):
441 def setfile(self, fname, data, mode, copysource):
443 """Write data to target file fname and set its mode. mode is a
442 """Write data to target file fname and set its mode. mode is a
444 (islink, isexec) tuple. If data is None, the file content should
443 (islink, isexec) tuple. If data is None, the file content should
445 be left unchanged. If the file is modified after being copied,
444 be left unchanged. If the file is modified after being copied,
446 copysource is set to the original file name.
445 copysource is set to the original file name.
447 """
446 """
448 raise NotImplementedError
447 raise NotImplementedError
449
448
450 def unlink(self, fname):
449 def unlink(self, fname):
451 """Unlink target file."""
450 """Unlink target file."""
452 raise NotImplementedError
451 raise NotImplementedError
453
452
454 def writerej(self, fname, failed, total, lines):
453 def writerej(self, fname, failed, total, lines):
455 """Write rejected lines for fname. total is the number of hunks
454 """Write rejected lines for fname. total is the number of hunks
456 which failed to apply and total the total number of hunks for this
455 which failed to apply and total the total number of hunks for this
457 files.
456 files.
458 """
457 """
459
458
460 def exists(self, fname):
459 def exists(self, fname):
461 raise NotImplementedError
460 raise NotImplementedError
462
461
463 def close(self):
462 def close(self):
464 raise NotImplementedError
463 raise NotImplementedError
465
464
466 class fsbackend(abstractbackend):
465 class fsbackend(abstractbackend):
467 def __init__(self, ui, basedir):
466 def __init__(self, ui, basedir):
468 super(fsbackend, self).__init__(ui)
467 super(fsbackend, self).__init__(ui)
469 self.opener = vfsmod.vfs(basedir)
468 self.opener = vfsmod.vfs(basedir)
470
469
471 def getfile(self, fname):
470 def getfile(self, fname):
472 if self.opener.islink(fname):
471 if self.opener.islink(fname):
473 return (self.opener.readlink(fname), (True, False))
472 return (self.opener.readlink(fname), (True, False))
474
473
475 isexec = False
474 isexec = False
476 try:
475 try:
477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 except OSError as e:
477 except OSError as e:
479 if e.errno != errno.ENOENT:
478 if e.errno != errno.ENOENT:
480 raise
479 raise
481 try:
480 try:
482 return (self.opener.read(fname), (False, isexec))
481 return (self.opener.read(fname), (False, isexec))
483 except IOError as e:
482 except IOError as e:
484 if e.errno != errno.ENOENT:
483 if e.errno != errno.ENOENT:
485 raise
484 raise
486 return None, None
485 return None, None
487
486
488 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
489 islink, isexec = mode
488 islink, isexec = mode
490 if data is None:
489 if data is None:
491 self.opener.setflags(fname, islink, isexec)
490 self.opener.setflags(fname, islink, isexec)
492 return
491 return
493 if islink:
492 if islink:
494 self.opener.symlink(data, fname)
493 self.opener.symlink(data, fname)
495 else:
494 else:
496 self.opener.write(fname, data)
495 self.opener.write(fname, data)
497 if isexec:
496 if isexec:
498 self.opener.setflags(fname, False, True)
497 self.opener.setflags(fname, False, True)
499
498
500 def unlink(self, fname):
499 def unlink(self, fname):
501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503
502
504 def writerej(self, fname, failed, total, lines):
503 def writerej(self, fname, failed, total, lines):
505 fname = fname + ".rej"
504 fname = fname + ".rej"
506 self.ui.warn(
505 self.ui.warn(
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 (failed, total, fname))
507 (failed, total, fname))
509 fp = self.opener(fname, 'w')
508 fp = self.opener(fname, 'w')
510 fp.writelines(lines)
509 fp.writelines(lines)
511 fp.close()
510 fp.close()
512
511
513 def exists(self, fname):
512 def exists(self, fname):
514 return self.opener.lexists(fname)
513 return self.opener.lexists(fname)
515
514
516 class workingbackend(fsbackend):
515 class workingbackend(fsbackend):
517 def __init__(self, ui, repo, similarity):
516 def __init__(self, ui, repo, similarity):
518 super(workingbackend, self).__init__(ui, repo.root)
517 super(workingbackend, self).__init__(ui, repo.root)
519 self.repo = repo
518 self.repo = repo
520 self.similarity = similarity
519 self.similarity = similarity
521 self.removed = set()
520 self.removed = set()
522 self.changed = set()
521 self.changed = set()
523 self.copied = []
522 self.copied = []
524
523
525 def _checkknown(self, fname):
524 def _checkknown(self, fname):
526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528
527
529 def setfile(self, fname, data, mode, copysource):
528 def setfile(self, fname, data, mode, copysource):
530 self._checkknown(fname)
529 self._checkknown(fname)
531 super(workingbackend, self).setfile(fname, data, mode, copysource)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 if copysource is not None:
531 if copysource is not None:
533 self.copied.append((copysource, fname))
532 self.copied.append((copysource, fname))
534 self.changed.add(fname)
533 self.changed.add(fname)
535
534
536 def unlink(self, fname):
535 def unlink(self, fname):
537 self._checkknown(fname)
536 self._checkknown(fname)
538 super(workingbackend, self).unlink(fname)
537 super(workingbackend, self).unlink(fname)
539 self.removed.add(fname)
538 self.removed.add(fname)
540 self.changed.add(fname)
539 self.changed.add(fname)
541
540
542 def close(self):
541 def close(self):
543 wctx = self.repo[None]
542 wctx = self.repo[None]
544 changed = set(self.changed)
543 changed = set(self.changed)
545 for src, dst in self.copied:
544 for src, dst in self.copied:
546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 if self.removed:
546 if self.removed:
548 wctx.forget(sorted(self.removed))
547 wctx.forget(sorted(self.removed))
549 for f in self.removed:
548 for f in self.removed:
550 if f not in self.repo.dirstate:
549 if f not in self.repo.dirstate:
551 # File was deleted and no longer belongs to the
550 # File was deleted and no longer belongs to the
552 # dirstate, it was probably marked added then
551 # dirstate, it was probably marked added then
553 # deleted, and should not be considered by
552 # deleted, and should not be considered by
554 # marktouched().
553 # marktouched().
555 changed.discard(f)
554 changed.discard(f)
556 if changed:
555 if changed:
557 scmutil.marktouched(self.repo, changed, self.similarity)
556 scmutil.marktouched(self.repo, changed, self.similarity)
558 return sorted(self.changed)
557 return sorted(self.changed)
559
558
560 class filestore(object):
559 class filestore(object):
561 def __init__(self, maxsize=None):
560 def __init__(self, maxsize=None):
562 self.opener = None
561 self.opener = None
563 self.files = {}
562 self.files = {}
564 self.created = 0
563 self.created = 0
565 self.maxsize = maxsize
564 self.maxsize = maxsize
566 if self.maxsize is None:
565 if self.maxsize is None:
567 self.maxsize = 4*(2**20)
566 self.maxsize = 4*(2**20)
568 self.size = 0
567 self.size = 0
569 self.data = {}
568 self.data = {}
570
569
571 def setfile(self, fname, data, mode, copied=None):
570 def setfile(self, fname, data, mode, copied=None):
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 self.data[fname] = (data, mode, copied)
572 self.data[fname] = (data, mode, copied)
574 self.size += len(data)
573 self.size += len(data)
575 else:
574 else:
576 if self.opener is None:
575 if self.opener is None:
577 root = pycompat.mkdtemp(prefix='hg-patch-')
576 root = pycompat.mkdtemp(prefix='hg-patch-')
578 self.opener = vfsmod.vfs(root)
577 self.opener = vfsmod.vfs(root)
579 # Avoid filename issues with these simple names
578 # Avoid filename issues with these simple names
580 fn = '%d' % self.created
579 fn = '%d' % self.created
581 self.opener.write(fn, data)
580 self.opener.write(fn, data)
582 self.created += 1
581 self.created += 1
583 self.files[fname] = (fn, mode, copied)
582 self.files[fname] = (fn, mode, copied)
584
583
585 def getfile(self, fname):
584 def getfile(self, fname):
586 if fname in self.data:
585 if fname in self.data:
587 return self.data[fname]
586 return self.data[fname]
588 if not self.opener or fname not in self.files:
587 if not self.opener or fname not in self.files:
589 return None, None, None
588 return None, None, None
590 fn, mode, copied = self.files[fname]
589 fn, mode, copied = self.files[fname]
591 return self.opener.read(fn), mode, copied
590 return self.opener.read(fn), mode, copied
592
591
593 def close(self):
592 def close(self):
594 if self.opener:
593 if self.opener:
595 shutil.rmtree(self.opener.base)
594 shutil.rmtree(self.opener.base)
596
595
597 class repobackend(abstractbackend):
596 class repobackend(abstractbackend):
598 def __init__(self, ui, repo, ctx, store):
597 def __init__(self, ui, repo, ctx, store):
599 super(repobackend, self).__init__(ui)
598 super(repobackend, self).__init__(ui)
600 self.repo = repo
599 self.repo = repo
601 self.ctx = ctx
600 self.ctx = ctx
602 self.store = store
601 self.store = store
603 self.changed = set()
602 self.changed = set()
604 self.removed = set()
603 self.removed = set()
605 self.copied = {}
604 self.copied = {}
606
605
607 def _checkknown(self, fname):
606 def _checkknown(self, fname):
608 if fname not in self.ctx:
607 if fname not in self.ctx:
609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610
609
611 def getfile(self, fname):
610 def getfile(self, fname):
612 try:
611 try:
613 fctx = self.ctx[fname]
612 fctx = self.ctx[fname]
614 except error.LookupError:
613 except error.LookupError:
615 return None, None
614 return None, None
616 flags = fctx.flags()
615 flags = fctx.flags()
617 return fctx.data(), ('l' in flags, 'x' in flags)
616 return fctx.data(), ('l' in flags, 'x' in flags)
618
617
619 def setfile(self, fname, data, mode, copysource):
618 def setfile(self, fname, data, mode, copysource):
620 if copysource:
619 if copysource:
621 self._checkknown(copysource)
620 self._checkknown(copysource)
622 if data is None:
621 if data is None:
623 data = self.ctx[fname].data()
622 data = self.ctx[fname].data()
624 self.store.setfile(fname, data, mode, copysource)
623 self.store.setfile(fname, data, mode, copysource)
625 self.changed.add(fname)
624 self.changed.add(fname)
626 if copysource:
625 if copysource:
627 self.copied[fname] = copysource
626 self.copied[fname] = copysource
628
627
629 def unlink(self, fname):
628 def unlink(self, fname):
630 self._checkknown(fname)
629 self._checkknown(fname)
631 self.removed.add(fname)
630 self.removed.add(fname)
632
631
633 def exists(self, fname):
632 def exists(self, fname):
634 return fname in self.ctx
633 return fname in self.ctx
635
634
636 def close(self):
635 def close(self):
637 return self.changed | self.removed
636 return self.changed | self.removed
638
637
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643
642
644 class patchfile(object):
643 class patchfile(object):
645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 self.fname = gp.path
645 self.fname = gp.path
647 self.eolmode = eolmode
646 self.eolmode = eolmode
648 self.eol = None
647 self.eol = None
649 self.backend = backend
648 self.backend = backend
650 self.ui = ui
649 self.ui = ui
651 self.lines = []
650 self.lines = []
652 self.exists = False
651 self.exists = False
653 self.missing = True
652 self.missing = True
654 self.mode = gp.mode
653 self.mode = gp.mode
655 self.copysource = gp.oldpath
654 self.copysource = gp.oldpath
656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 self.remove = gp.op == 'DELETE'
656 self.remove = gp.op == 'DELETE'
658 if self.copysource is None:
657 if self.copysource is None:
659 data, mode = backend.getfile(self.fname)
658 data, mode = backend.getfile(self.fname)
660 else:
659 else:
661 data, mode = store.getfile(self.copysource)[:2]
660 data, mode = store.getfile(self.copysource)[:2]
662 if data is not None:
661 if data is not None:
663 self.exists = self.copysource is None or backend.exists(self.fname)
662 self.exists = self.copysource is None or backend.exists(self.fname)
664 self.missing = False
663 self.missing = False
665 if data:
664 if data:
666 self.lines = mdiff.splitnewlines(data)
665 self.lines = mdiff.splitnewlines(data)
667 if self.mode is None:
666 if self.mode is None:
668 self.mode = mode
667 self.mode = mode
669 if self.lines:
668 if self.lines:
670 # Normalize line endings
669 # Normalize line endings
671 if self.lines[0].endswith('\r\n'):
670 if self.lines[0].endswith('\r\n'):
672 self.eol = '\r\n'
671 self.eol = '\r\n'
673 elif self.lines[0].endswith('\n'):
672 elif self.lines[0].endswith('\n'):
674 self.eol = '\n'
673 self.eol = '\n'
675 if eolmode != 'strict':
674 if eolmode != 'strict':
676 nlines = []
675 nlines = []
677 for l in self.lines:
676 for l in self.lines:
678 if l.endswith('\r\n'):
677 if l.endswith('\r\n'):
679 l = l[:-2] + '\n'
678 l = l[:-2] + '\n'
680 nlines.append(l)
679 nlines.append(l)
681 self.lines = nlines
680 self.lines = nlines
682 else:
681 else:
683 if self.create:
682 if self.create:
684 self.missing = False
683 self.missing = False
685 if self.mode is None:
684 if self.mode is None:
686 self.mode = (False, False)
685 self.mode = (False, False)
687 if self.missing:
686 if self.missing:
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 "current directory)\n"))
689 "current directory)\n"))
691
690
692 self.hash = {}
691 self.hash = {}
693 self.dirty = 0
692 self.dirty = 0
694 self.offset = 0
693 self.offset = 0
695 self.skew = 0
694 self.skew = 0
696 self.rej = []
695 self.rej = []
697 self.fileprinted = False
696 self.fileprinted = False
698 self.printfile(False)
697 self.printfile(False)
699 self.hunks = 0
698 self.hunks = 0
700
699
701 def writelines(self, fname, lines, mode):
700 def writelines(self, fname, lines, mode):
702 if self.eolmode == 'auto':
701 if self.eolmode == 'auto':
703 eol = self.eol
702 eol = self.eol
704 elif self.eolmode == 'crlf':
703 elif self.eolmode == 'crlf':
705 eol = '\r\n'
704 eol = '\r\n'
706 else:
705 else:
707 eol = '\n'
706 eol = '\n'
708
707
709 if self.eolmode != 'strict' and eol and eol != '\n':
708 if self.eolmode != 'strict' and eol and eol != '\n':
710 rawlines = []
709 rawlines = []
711 for l in lines:
710 for l in lines:
712 if l and l.endswith('\n'):
711 if l and l.endswith('\n'):
713 l = l[:-1] + eol
712 l = l[:-1] + eol
714 rawlines.append(l)
713 rawlines.append(l)
715 lines = rawlines
714 lines = rawlines
716
715
717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718
717
719 def printfile(self, warn):
718 def printfile(self, warn):
720 if self.fileprinted:
719 if self.fileprinted:
721 return
720 return
722 if warn or self.ui.verbose:
721 if warn or self.ui.verbose:
723 self.fileprinted = True
722 self.fileprinted = True
724 s = _("patching file %s\n") % self.fname
723 s = _("patching file %s\n") % self.fname
725 if warn:
724 if warn:
726 self.ui.warn(s)
725 self.ui.warn(s)
727 else:
726 else:
728 self.ui.note(s)
727 self.ui.note(s)
729
728
730
729
731 def findlines(self, l, linenum):
730 def findlines(self, l, linenum):
732 # looks through the hash and finds candidate lines. The
731 # looks through the hash and finds candidate lines. The
733 # result is a list of line numbers sorted based on distance
732 # result is a list of line numbers sorted based on distance
734 # from linenum
733 # from linenum
735
734
736 cand = self.hash.get(l, [])
735 cand = self.hash.get(l, [])
737 if len(cand) > 1:
736 if len(cand) > 1:
738 # resort our list of potentials forward then back.
737 # resort our list of potentials forward then back.
739 cand.sort(key=lambda x: abs(x - linenum))
738 cand.sort(key=lambda x: abs(x - linenum))
740 return cand
739 return cand
741
740
742 def write_rej(self):
741 def write_rej(self):
743 # our rejects are a little different from patch(1). This always
742 # our rejects are a little different from patch(1). This always
744 # creates rejects in the same form as the original patch. A file
743 # creates rejects in the same form as the original patch. A file
745 # header is inserted so that you can run the reject through patch again
744 # header is inserted so that you can run the reject through patch again
746 # without having to type the filename.
745 # without having to type the filename.
747 if not self.rej:
746 if not self.rej:
748 return
747 return
749 base = os.path.basename(self.fname)
748 base = os.path.basename(self.fname)
750 lines = ["--- %s\n+++ %s\n" % (base, base)]
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 for x in self.rej:
750 for x in self.rej:
752 for l in x.hunk:
751 for l in x.hunk:
753 lines.append(l)
752 lines.append(l)
754 if l[-1:] != '\n':
753 if l[-1:] != '\n':
755 lines.append("\n\\ No newline at end of file\n")
754 lines.append("\n\\ No newline at end of file\n")
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757
756
758 def apply(self, h):
757 def apply(self, h):
759 if not h.complete():
758 if not h.complete():
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 h.lenb))
761 h.lenb))
763
762
764 self.hunks += 1
763 self.hunks += 1
765
764
766 if self.missing:
765 if self.missing:
767 self.rej.append(h)
766 self.rej.append(h)
768 return -1
767 return -1
769
768
770 if self.exists and self.create:
769 if self.exists and self.create:
771 if self.copysource:
770 if self.copysource:
772 self.ui.warn(_("cannot create %s: destination already "
771 self.ui.warn(_("cannot create %s: destination already "
773 "exists\n") % self.fname)
772 "exists\n") % self.fname)
774 else:
773 else:
775 self.ui.warn(_("file %s already exists\n") % self.fname)
774 self.ui.warn(_("file %s already exists\n") % self.fname)
776 self.rej.append(h)
775 self.rej.append(h)
777 return -1
776 return -1
778
777
779 if isinstance(h, binhunk):
778 if isinstance(h, binhunk):
780 if self.remove:
779 if self.remove:
781 self.backend.unlink(self.fname)
780 self.backend.unlink(self.fname)
782 else:
781 else:
783 l = h.new(self.lines)
782 l = h.new(self.lines)
784 self.lines[:] = l
783 self.lines[:] = l
785 self.offset += len(l)
784 self.offset += len(l)
786 self.dirty = True
785 self.dirty = True
787 return 0
786 return 0
788
787
789 horig = h
788 horig = h
790 if (self.eolmode in ('crlf', 'lf')
789 if (self.eolmode in ('crlf', 'lf')
791 or self.eolmode == 'auto' and self.eol):
790 or self.eolmode == 'auto' and self.eol):
792 # If new eols are going to be normalized, then normalize
791 # If new eols are going to be normalized, then normalize
793 # hunk data before patching. Otherwise, preserve input
792 # hunk data before patching. Otherwise, preserve input
794 # line-endings.
793 # line-endings.
795 h = h.getnormalized()
794 h = h.getnormalized()
796
795
797 # fast case first, no offsets, no fuzz
796 # fast case first, no offsets, no fuzz
798 old, oldstart, new, newstart = h.fuzzit(0, False)
797 old, oldstart, new, newstart = h.fuzzit(0, False)
799 oldstart += self.offset
798 oldstart += self.offset
800 orig_start = oldstart
799 orig_start = oldstart
801 # if there's skew we want to emit the "(offset %d lines)" even
800 # if there's skew we want to emit the "(offset %d lines)" even
802 # when the hunk cleanly applies at start + skew, so skip the
801 # when the hunk cleanly applies at start + skew, so skip the
803 # fast case code
802 # fast case code
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 if self.remove:
804 if self.remove:
806 self.backend.unlink(self.fname)
805 self.backend.unlink(self.fname)
807 else:
806 else:
808 self.lines[oldstart:oldstart + len(old)] = new
807 self.lines[oldstart:oldstart + len(old)] = new
809 self.offset += len(new) - len(old)
808 self.offset += len(new) - len(old)
810 self.dirty = True
809 self.dirty = True
811 return 0
810 return 0
812
811
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 self.hash = {}
813 self.hash = {}
815 for x, s in enumerate(self.lines):
814 for x, s in enumerate(self.lines):
816 self.hash.setdefault(s, []).append(x)
815 self.hash.setdefault(s, []).append(x)
817
816
818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
819 for toponly in [True, False]:
818 for toponly in [True, False]:
820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 oldstart = oldstart + self.offset + self.skew
820 oldstart = oldstart + self.offset + self.skew
822 oldstart = min(oldstart, len(self.lines))
821 oldstart = min(oldstart, len(self.lines))
823 if old:
822 if old:
824 cand = self.findlines(old[0][1:], oldstart)
823 cand = self.findlines(old[0][1:], oldstart)
825 else:
824 else:
826 # Only adding lines with no or fuzzed context, just
825 # Only adding lines with no or fuzzed context, just
827 # take the skew in account
826 # take the skew in account
828 cand = [oldstart]
827 cand = [oldstart]
829
828
830 for l in cand:
829 for l in cand:
831 if not old or diffhelper.testhunk(old, self.lines, l):
830 if not old or diffhelper.testhunk(old, self.lines, l):
832 self.lines[l : l + len(old)] = new
831 self.lines[l : l + len(old)] = new
833 self.offset += len(new) - len(old)
832 self.offset += len(new) - len(old)
834 self.skew = l - orig_start
833 self.skew = l - orig_start
835 self.dirty = True
834 self.dirty = True
836 offset = l - orig_start - fuzzlen
835 offset = l - orig_start - fuzzlen
837 if fuzzlen:
836 if fuzzlen:
838 msg = _("Hunk #%d succeeded at %d "
837 msg = _("Hunk #%d succeeded at %d "
839 "with fuzz %d "
838 "with fuzz %d "
840 "(offset %d lines).\n")
839 "(offset %d lines).\n")
841 self.printfile(True)
840 self.printfile(True)
842 self.ui.warn(msg %
841 self.ui.warn(msg %
843 (h.number, l + 1, fuzzlen, offset))
842 (h.number, l + 1, fuzzlen, offset))
844 else:
843 else:
845 msg = _("Hunk #%d succeeded at %d "
844 msg = _("Hunk #%d succeeded at %d "
846 "(offset %d lines).\n")
845 "(offset %d lines).\n")
847 self.ui.note(msg % (h.number, l + 1, offset))
846 self.ui.note(msg % (h.number, l + 1, offset))
848 return fuzzlen
847 return fuzzlen
849 self.printfile(True)
848 self.printfile(True)
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 self.rej.append(horig)
850 self.rej.append(horig)
852 return -1
851 return -1
853
852
854 def close(self):
853 def close(self):
855 if self.dirty:
854 if self.dirty:
856 self.writelines(self.fname, self.lines, self.mode)
855 self.writelines(self.fname, self.lines, self.mode)
857 self.write_rej()
856 self.write_rej()
858 return len(self.rej)
857 return len(self.rej)
859
858
860 class header(object):
859 class header(object):
861 """patch header
860 """patch header
862 """
861 """
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 diff_re = re.compile('diff -r .* (.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
865 allhunks_re = re.compile('(?:index|deleted file) ')
864 allhunks_re = re.compile('(?:index|deleted file) ')
866 pretty_re = re.compile('(?:new file|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
867 special_re = re.compile('(?:index|deleted|copy|rename) ')
866 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 newfile_re = re.compile('(?:new file)')
867 newfile_re = re.compile('(?:new file)')
869
868
870 def __init__(self, header):
869 def __init__(self, header):
871 self.header = header
870 self.header = header
872 self.hunks = []
871 self.hunks = []
873
872
874 def binary(self):
873 def binary(self):
875 return any(h.startswith('index ') for h in self.header)
874 return any(h.startswith('index ') for h in self.header)
876
875
877 def pretty(self, fp):
876 def pretty(self, fp):
878 for h in self.header:
877 for h in self.header:
879 if h.startswith('index '):
878 if h.startswith('index '):
880 fp.write(_('this modifies a binary file (all or nothing)\n'))
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 break
880 break
882 if self.pretty_re.match(h):
881 if self.pretty_re.match(h):
883 fp.write(h)
882 fp.write(h)
884 if self.binary():
883 if self.binary():
885 fp.write(_('this is a binary file\n'))
884 fp.write(_('this is a binary file\n'))
886 break
885 break
887 if h.startswith('---'):
886 if h.startswith('---'):
888 fp.write(_('%d hunks, %d lines changed\n') %
887 fp.write(_('%d hunks, %d lines changed\n') %
889 (len(self.hunks),
888 (len(self.hunks),
890 sum([max(h.added, h.removed) for h in self.hunks])))
889 sum([max(h.added, h.removed) for h in self.hunks])))
891 break
890 break
892 fp.write(h)
891 fp.write(h)
893
892
894 def write(self, fp):
893 def write(self, fp):
895 fp.write(''.join(self.header))
894 fp.write(''.join(self.header))
896
895
897 def allhunks(self):
896 def allhunks(self):
898 return any(self.allhunks_re.match(h) for h in self.header)
897 return any(self.allhunks_re.match(h) for h in self.header)
899
898
900 def files(self):
899 def files(self):
901 match = self.diffgit_re.match(self.header[0])
900 match = self.diffgit_re.match(self.header[0])
902 if match:
901 if match:
903 fromfile, tofile = match.groups()
902 fromfile, tofile = match.groups()
904 if fromfile == tofile:
903 if fromfile == tofile:
905 return [fromfile]
904 return [fromfile]
906 return [fromfile, tofile]
905 return [fromfile, tofile]
907 else:
906 else:
908 return self.diff_re.match(self.header[0]).groups()
907 return self.diff_re.match(self.header[0]).groups()
909
908
910 def filename(self):
909 def filename(self):
911 return self.files()[-1]
910 return self.files()[-1]
912
911
913 def __repr__(self):
912 def __repr__(self):
914 return '<header %s>' % (' '.join(map(repr, self.files())))
913 return '<header %s>' % (' '.join(map(repr, self.files())))
915
914
916 def isnewfile(self):
915 def isnewfile(self):
917 return any(self.newfile_re.match(h) for h in self.header)
916 return any(self.newfile_re.match(h) for h in self.header)
918
917
919 def special(self):
918 def special(self):
920 # Special files are shown only at the header level and not at the hunk
919 # Special files are shown only at the header level and not at the hunk
921 # level for example a file that has been deleted is a special file.
920 # level for example a file that has been deleted is a special file.
922 # The user cannot change the content of the operation, in the case of
921 # The user cannot change the content of the operation, in the case of
923 # the deleted file he has to take the deletion or not take it, he
922 # the deleted file he has to take the deletion or not take it, he
924 # cannot take some of it.
923 # cannot take some of it.
925 # Newly added files are special if they are empty, they are not special
924 # Newly added files are special if they are empty, they are not special
926 # if they have some content as we want to be able to change it
925 # if they have some content as we want to be able to change it
927 nocontent = len(self.header) == 2
926 nocontent = len(self.header) == 2
928 emptynewfile = self.isnewfile() and nocontent
927 emptynewfile = self.isnewfile() and nocontent
929 return emptynewfile or \
928 return emptynewfile or \
930 any(self.special_re.match(h) for h in self.header)
929 any(self.special_re.match(h) for h in self.header)
931
930
932 class recordhunk(object):
931 class recordhunk(object):
933 """patch hunk
932 """patch hunk
934
933
935 XXX shouldn't we merge this with the other hunk class?
934 XXX shouldn't we merge this with the other hunk class?
936 """
935 """
937
936
938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 maxcontext=None):
938 maxcontext=None):
940 def trimcontext(lines, reverse=False):
939 def trimcontext(lines, reverse=False):
941 if maxcontext is not None:
940 if maxcontext is not None:
942 delta = len(lines) - maxcontext
941 delta = len(lines) - maxcontext
943 if delta > 0:
942 if delta > 0:
944 if reverse:
943 if reverse:
945 return delta, lines[delta:]
944 return delta, lines[delta:]
946 else:
945 else:
947 return delta, lines[:maxcontext]
946 return delta, lines[:maxcontext]
948 return 0, lines
947 return 0, lines
949
948
950 self.header = header
949 self.header = header
951 trimedbefore, self.before = trimcontext(before, True)
950 trimedbefore, self.before = trimcontext(before, True)
952 self.fromline = fromline + trimedbefore
951 self.fromline = fromline + trimedbefore
953 self.toline = toline + trimedbefore
952 self.toline = toline + trimedbefore
954 _trimedafter, self.after = trimcontext(after, False)
953 _trimedafter, self.after = trimcontext(after, False)
955 self.proc = proc
954 self.proc = proc
956 self.hunk = hunk
955 self.hunk = hunk
957 self.added, self.removed = self.countchanges(self.hunk)
956 self.added, self.removed = self.countchanges(self.hunk)
958
957
959 def __eq__(self, v):
958 def __eq__(self, v):
960 if not isinstance(v, recordhunk):
959 if not isinstance(v, recordhunk):
961 return False
960 return False
962
961
963 return ((v.hunk == self.hunk) and
962 return ((v.hunk == self.hunk) and
964 (v.proc == self.proc) and
963 (v.proc == self.proc) and
965 (self.fromline == v.fromline) and
964 (self.fromline == v.fromline) and
966 (self.header.files() == v.header.files()))
965 (self.header.files() == v.header.files()))
967
966
968 def __hash__(self):
967 def __hash__(self):
969 return hash((tuple(self.hunk),
968 return hash((tuple(self.hunk),
970 tuple(self.header.files()),
969 tuple(self.header.files()),
971 self.fromline,
970 self.fromline,
972 self.proc))
971 self.proc))
973
972
974 def countchanges(self, hunk):
973 def countchanges(self, hunk):
975 """hunk -> (n+,n-)"""
974 """hunk -> (n+,n-)"""
976 add = len([h for h in hunk if h.startswith('+')])
975 add = len([h for h in hunk if h.startswith('+')])
977 rem = len([h for h in hunk if h.startswith('-')])
976 rem = len([h for h in hunk if h.startswith('-')])
978 return add, rem
977 return add, rem
979
978
980 def reversehunk(self):
979 def reversehunk(self):
981 """return another recordhunk which is the reverse of the hunk
980 """return another recordhunk which is the reverse of the hunk
982
981
983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 that, swap fromline/toline and +/- signs while keep other things
983 that, swap fromline/toline and +/- signs while keep other things
985 unchanged.
984 unchanged.
986 """
985 """
987 m = {'+': '-', '-': '+', '\\': '\\'}
986 m = {'+': '-', '-': '+', '\\': '\\'}
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 self.before, hunk, self.after)
989 self.before, hunk, self.after)
991
990
992 def write(self, fp):
991 def write(self, fp):
993 delta = len(self.before) + len(self.after)
992 delta = len(self.before) + len(self.after)
994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 delta -= 1
994 delta -= 1
996 fromlen = delta + self.removed
995 fromlen = delta + self.removed
997 tolen = delta + self.added
996 tolen = delta + self.added
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 (self.fromline, fromlen, self.toline, tolen,
998 (self.fromline, fromlen, self.toline, tolen,
1000 self.proc and (' ' + self.proc)))
999 self.proc and (' ' + self.proc)))
1001 fp.write(''.join(self.before + self.hunk + self.after))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1002
1001
1003 pretty = write
1002 pretty = write
1004
1003
1005 def filename(self):
1004 def filename(self):
1006 return self.header.filename()
1005 return self.header.filename()
1007
1006
1008 def __repr__(self):
1007 def __repr__(self):
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010
1009
1011 def getmessages():
1010 def getmessages():
1012 return {
1011 return {
1013 'multiple': {
1012 'multiple': {
1014 'apply': _("apply change %d/%d to '%s'?"),
1013 'apply': _("apply change %d/%d to '%s'?"),
1015 'discard': _("discard change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1015 'record': _("record change %d/%d to '%s'?"),
1017 },
1016 },
1018 'single': {
1017 'single': {
1019 'apply': _("apply this change to '%s'?"),
1018 'apply': _("apply this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1019 'discard': _("discard this change to '%s'?"),
1021 'record': _("record this change to '%s'?"),
1020 'record': _("record this change to '%s'?"),
1022 },
1021 },
1023 'help': {
1022 'help': {
1024 'apply': _('[Ynesfdaq?]'
1023 'apply': _('[Ynesfdaq?]'
1025 '$$ &Yes, apply this change'
1024 '$$ &Yes, apply this change'
1026 '$$ &No, skip this change'
1025 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1026 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1027 '$$ &Skip remaining changes to this file'
1029 '$$ Apply remaining changes to this &file'
1028 '$$ Apply remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1029 '$$ &Done, skip remaining changes and files'
1031 '$$ Apply &all changes to all remaining files'
1030 '$$ Apply &all changes to all remaining files'
1032 '$$ &Quit, applying no changes'
1031 '$$ &Quit, applying no changes'
1033 '$$ &? (display help)'),
1032 '$$ &? (display help)'),
1034 'discard': _('[Ynesfdaq?]'
1033 'discard': _('[Ynesfdaq?]'
1035 '$$ &Yes, discard this change'
1034 '$$ &Yes, discard this change'
1036 '$$ &No, skip this change'
1035 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1036 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1037 '$$ &Skip remaining changes to this file'
1039 '$$ Discard remaining changes to this &file'
1038 '$$ Discard remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1039 '$$ &Done, skip remaining changes and files'
1041 '$$ Discard &all changes to all remaining files'
1040 '$$ Discard &all changes to all remaining files'
1042 '$$ &Quit, discarding no changes'
1041 '$$ &Quit, discarding no changes'
1043 '$$ &? (display help)'),
1042 '$$ &? (display help)'),
1044 'record': _('[Ynesfdaq?]'
1043 'record': _('[Ynesfdaq?]'
1045 '$$ &Yes, record this change'
1044 '$$ &Yes, record this change'
1046 '$$ &No, skip this change'
1045 '$$ &No, skip this change'
1047 '$$ &Edit this change manually'
1046 '$$ &Edit this change manually'
1048 '$$ &Skip remaining changes to this file'
1047 '$$ &Skip remaining changes to this file'
1049 '$$ Record remaining changes to this &file'
1048 '$$ Record remaining changes to this &file'
1050 '$$ &Done, skip remaining changes and files'
1049 '$$ &Done, skip remaining changes and files'
1051 '$$ Record &all changes to all remaining files'
1050 '$$ Record &all changes to all remaining files'
1052 '$$ &Quit, recording no changes'
1051 '$$ &Quit, recording no changes'
1053 '$$ &? (display help)'),
1052 '$$ &? (display help)'),
1054 }
1053 }
1055 }
1054 }
1056
1055
1057 def filterpatch(ui, headers, operation=None):
1056 def filterpatch(ui, headers, operation=None):
1058 """Interactively filter patch chunks into applied-only chunks"""
1057 """Interactively filter patch chunks into applied-only chunks"""
1059 messages = getmessages()
1058 messages = getmessages()
1060
1059
1061 if operation is None:
1060 if operation is None:
1062 operation = 'record'
1061 operation = 'record'
1063
1062
1064 def prompt(skipfile, skipall, query, chunk):
1063 def prompt(skipfile, skipall, query, chunk):
1065 """prompt query, and process base inputs
1064 """prompt query, and process base inputs
1066
1065
1067 - y/n for the rest of file
1066 - y/n for the rest of file
1068 - y/n for the rest
1067 - y/n for the rest
1069 - ? (help)
1068 - ? (help)
1070 - q (quit)
1069 - q (quit)
1071
1070
1072 Return True/False and possibly updated skipfile and skipall.
1071 Return True/False and possibly updated skipfile and skipall.
1073 """
1072 """
1074 newpatches = None
1073 newpatches = None
1075 if skipall is not None:
1074 if skipall is not None:
1076 return skipall, skipfile, skipall, newpatches
1075 return skipall, skipfile, skipall, newpatches
1077 if skipfile is not None:
1076 if skipfile is not None:
1078 return skipfile, skipfile, skipall, newpatches
1077 return skipfile, skipfile, skipall, newpatches
1079 while True:
1078 while True:
1080 resps = messages['help'][operation]
1079 resps = messages['help'][operation]
1081 r = ui.promptchoice("%s %s" % (query, resps))
1080 r = ui.promptchoice("%s %s" % (query, resps))
1082 ui.write("\n")
1081 ui.write("\n")
1083 if r == 8: # ?
1082 if r == 8: # ?
1084 for c, t in ui.extractchoices(resps)[1]:
1083 for c, t in ui.extractchoices(resps)[1]:
1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1084 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 continue
1085 continue
1087 elif r == 0: # yes
1086 elif r == 0: # yes
1088 ret = True
1087 ret = True
1089 elif r == 1: # no
1088 elif r == 1: # no
1090 ret = False
1089 ret = False
1091 elif r == 2: # Edit patch
1090 elif r == 2: # Edit patch
1092 if chunk is None:
1091 if chunk is None:
1093 ui.write(_('cannot edit patch for whole file'))
1092 ui.write(_('cannot edit patch for whole file'))
1094 ui.write("\n")
1093 ui.write("\n")
1095 continue
1094 continue
1096 if chunk.header.binary():
1095 if chunk.header.binary():
1097 ui.write(_('cannot edit patch for binary file'))
1096 ui.write(_('cannot edit patch for binary file'))
1098 ui.write("\n")
1097 ui.write("\n")
1099 continue
1098 continue
1100 # Patch comment based on the Git one (based on comment at end of
1099 # Patch comment based on the Git one (based on comment at end of
1101 # https://mercurial-scm.org/wiki/RecordExtension)
1100 # https://mercurial-scm.org/wiki/RecordExtension)
1102 phelp = '---' + _("""
1101 phelp = '---' + _("""
1103 To remove '-' lines, make them ' ' lines (context).
1102 To remove '-' lines, make them ' ' lines (context).
1104 To remove '+' lines, delete them.
1103 To remove '+' lines, delete them.
1105 Lines starting with # will be removed from the patch.
1104 Lines starting with # will be removed from the patch.
1106
1105
1107 If the patch applies cleanly, the edited hunk will immediately be
1106 If the patch applies cleanly, the edited hunk will immediately be
1108 added to the record list. If it does not apply cleanly, a rejects
1107 added to the record list. If it does not apply cleanly, a rejects
1109 file will be generated: you can use that when you try again. If
1108 file will be generated: you can use that when you try again. If
1110 all lines of the hunk are removed, then the edit is aborted and
1109 all lines of the hunk are removed, then the edit is aborted and
1111 the hunk is left unchanged.
1110 the hunk is left unchanged.
1112 """)
1111 """)
1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1112 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 suffix=".diff")
1113 suffix=".diff")
1115 ncpatchfp = None
1114 ncpatchfp = None
1116 try:
1115 try:
1117 # Write the initial patch
1116 # Write the initial patch
1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1117 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 chunk.header.write(f)
1118 chunk.header.write(f)
1120 chunk.write(f)
1119 chunk.write(f)
1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1120 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 f.close()
1121 f.close()
1123 # Start the editor and wait for it to complete
1122 # Start the editor and wait for it to complete
1124 editor = ui.geteditor()
1123 editor = ui.geteditor()
1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1124 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 environ={'HGUSER': ui.username()},
1125 environ={'HGUSER': ui.username()},
1127 blockedtag='filterpatch')
1126 blockedtag='filterpatch')
1128 if ret != 0:
1127 if ret != 0:
1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1128 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 continue
1129 continue
1131 # Remove comment lines
1130 # Remove comment lines
1132 patchfp = open(patchfn, r'rb')
1131 patchfp = open(patchfn, r'rb')
1133 ncpatchfp = stringio()
1132 ncpatchfp = stringio()
1134 for line in util.iterfile(patchfp):
1133 for line in util.iterfile(patchfp):
1135 line = util.fromnativeeol(line)
1134 line = util.fromnativeeol(line)
1136 if not line.startswith('#'):
1135 if not line.startswith('#'):
1137 ncpatchfp.write(line)
1136 ncpatchfp.write(line)
1138 patchfp.close()
1137 patchfp.close()
1139 ncpatchfp.seek(0)
1138 ncpatchfp.seek(0)
1140 newpatches = parsepatch(ncpatchfp)
1139 newpatches = parsepatch(ncpatchfp)
1141 finally:
1140 finally:
1142 os.unlink(patchfn)
1141 os.unlink(patchfn)
1143 del ncpatchfp
1142 del ncpatchfp
1144 # Signal that the chunk shouldn't be applied as-is, but
1143 # Signal that the chunk shouldn't be applied as-is, but
1145 # provide the new patch to be used instead.
1144 # provide the new patch to be used instead.
1146 ret = False
1145 ret = False
1147 elif r == 3: # Skip
1146 elif r == 3: # Skip
1148 ret = skipfile = False
1147 ret = skipfile = False
1149 elif r == 4: # file (Record remaining)
1148 elif r == 4: # file (Record remaining)
1150 ret = skipfile = True
1149 ret = skipfile = True
1151 elif r == 5: # done, skip remaining
1150 elif r == 5: # done, skip remaining
1152 ret = skipall = False
1151 ret = skipall = False
1153 elif r == 6: # all
1152 elif r == 6: # all
1154 ret = skipall = True
1153 ret = skipall = True
1155 elif r == 7: # quit
1154 elif r == 7: # quit
1156 raise error.Abort(_('user quit'))
1155 raise error.Abort(_('user quit'))
1157 return ret, skipfile, skipall, newpatches
1156 return ret, skipfile, skipall, newpatches
1158
1157
1159 seen = set()
1158 seen = set()
1160 applied = {} # 'filename' -> [] of chunks
1159 applied = {} # 'filename' -> [] of chunks
1161 skipfile, skipall = None, None
1160 skipfile, skipall = None, None
1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1161 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 for h in headers:
1162 for h in headers:
1164 pos += len(h.hunks)
1163 pos += len(h.hunks)
1165 skipfile = None
1164 skipfile = None
1166 fixoffset = 0
1165 fixoffset = 0
1167 hdr = ''.join(h.header)
1166 hdr = ''.join(h.header)
1168 if hdr in seen:
1167 if hdr in seen:
1169 continue
1168 continue
1170 seen.add(hdr)
1169 seen.add(hdr)
1171 if skipall is None:
1170 if skipall is None:
1172 h.pretty(ui)
1171 h.pretty(ui)
1173 msg = (_('examine changes to %s?') %
1172 msg = (_('examine changes to %s?') %
1174 _(' and ').join("'%s'" % f for f in h.files()))
1173 _(' and ').join("'%s'" % f for f in h.files()))
1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1174 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 if not r:
1175 if not r:
1177 continue
1176 continue
1178 applied[h.filename()] = [h]
1177 applied[h.filename()] = [h]
1179 if h.allhunks():
1178 if h.allhunks():
1180 applied[h.filename()] += h.hunks
1179 applied[h.filename()] += h.hunks
1181 continue
1180 continue
1182 for i, chunk in enumerate(h.hunks):
1181 for i, chunk in enumerate(h.hunks):
1183 if skipfile is None and skipall is None:
1182 if skipfile is None and skipall is None:
1184 chunk.pretty(ui)
1183 chunk.pretty(ui)
1185 if total == 1:
1184 if total == 1:
1186 msg = messages['single'][operation] % chunk.filename()
1185 msg = messages['single'][operation] % chunk.filename()
1187 else:
1186 else:
1188 idx = pos - len(h.hunks) + i
1187 idx = pos - len(h.hunks) + i
1189 msg = messages['multiple'][operation] % (idx, total,
1188 msg = messages['multiple'][operation] % (idx, total,
1190 chunk.filename())
1189 chunk.filename())
1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1190 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 skipall, msg, chunk)
1191 skipall, msg, chunk)
1193 if r:
1192 if r:
1194 if fixoffset:
1193 if fixoffset:
1195 chunk = copy.copy(chunk)
1194 chunk = copy.copy(chunk)
1196 chunk.toline += fixoffset
1195 chunk.toline += fixoffset
1197 applied[chunk.filename()].append(chunk)
1196 applied[chunk.filename()].append(chunk)
1198 elif newpatches is not None:
1197 elif newpatches is not None:
1199 for newpatch in newpatches:
1198 for newpatch in newpatches:
1200 for newhunk in newpatch.hunks:
1199 for newhunk in newpatch.hunks:
1201 if fixoffset:
1200 if fixoffset:
1202 newhunk.toline += fixoffset
1201 newhunk.toline += fixoffset
1203 applied[newhunk.filename()].append(newhunk)
1202 applied[newhunk.filename()].append(newhunk)
1204 else:
1203 else:
1205 fixoffset += chunk.removed - chunk.added
1204 fixoffset += chunk.removed - chunk.added
1206 return (sum([h for h in applied.itervalues()
1205 return (sum([h for h in applied.itervalues()
1207 if h[0].special() or len(h) > 1], []), {})
1206 if h[0].special() or len(h) > 1], []), {})
1208 class hunk(object):
1207 class hunk(object):
1209 def __init__(self, desc, num, lr, context):
1208 def __init__(self, desc, num, lr, context):
1210 self.number = num
1209 self.number = num
1211 self.desc = desc
1210 self.desc = desc
1212 self.hunk = [desc]
1211 self.hunk = [desc]
1213 self.a = []
1212 self.a = []
1214 self.b = []
1213 self.b = []
1215 self.starta = self.lena = None
1214 self.starta = self.lena = None
1216 self.startb = self.lenb = None
1215 self.startb = self.lenb = None
1217 if lr is not None:
1216 if lr is not None:
1218 if context:
1217 if context:
1219 self.read_context_hunk(lr)
1218 self.read_context_hunk(lr)
1220 else:
1219 else:
1221 self.read_unified_hunk(lr)
1220 self.read_unified_hunk(lr)
1222
1221
1223 def getnormalized(self):
1222 def getnormalized(self):
1224 """Return a copy with line endings normalized to LF."""
1223 """Return a copy with line endings normalized to LF."""
1225
1224
1226 def normalize(lines):
1225 def normalize(lines):
1227 nlines = []
1226 nlines = []
1228 for line in lines:
1227 for line in lines:
1229 if line.endswith('\r\n'):
1228 if line.endswith('\r\n'):
1230 line = line[:-2] + '\n'
1229 line = line[:-2] + '\n'
1231 nlines.append(line)
1230 nlines.append(line)
1232 return nlines
1231 return nlines
1233
1232
1234 # Dummy object, it is rebuilt manually
1233 # Dummy object, it is rebuilt manually
1235 nh = hunk(self.desc, self.number, None, None)
1234 nh = hunk(self.desc, self.number, None, None)
1236 nh.number = self.number
1235 nh.number = self.number
1237 nh.desc = self.desc
1236 nh.desc = self.desc
1238 nh.hunk = self.hunk
1237 nh.hunk = self.hunk
1239 nh.a = normalize(self.a)
1238 nh.a = normalize(self.a)
1240 nh.b = normalize(self.b)
1239 nh.b = normalize(self.b)
1241 nh.starta = self.starta
1240 nh.starta = self.starta
1242 nh.startb = self.startb
1241 nh.startb = self.startb
1243 nh.lena = self.lena
1242 nh.lena = self.lena
1244 nh.lenb = self.lenb
1243 nh.lenb = self.lenb
1245 return nh
1244 return nh
1246
1245
1247 def read_unified_hunk(self, lr):
1246 def read_unified_hunk(self, lr):
1248 m = unidesc.match(self.desc)
1247 m = unidesc.match(self.desc)
1249 if not m:
1248 if not m:
1250 raise PatchError(_("bad hunk #%d") % self.number)
1249 raise PatchError(_("bad hunk #%d") % self.number)
1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1250 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 if self.lena is None:
1251 if self.lena is None:
1253 self.lena = 1
1252 self.lena = 1
1254 else:
1253 else:
1255 self.lena = int(self.lena)
1254 self.lena = int(self.lena)
1256 if self.lenb is None:
1255 if self.lenb is None:
1257 self.lenb = 1
1256 self.lenb = 1
1258 else:
1257 else:
1259 self.lenb = int(self.lenb)
1258 self.lenb = int(self.lenb)
1260 self.starta = int(self.starta)
1259 self.starta = int(self.starta)
1261 self.startb = int(self.startb)
1260 self.startb = int(self.startb)
1262 try:
1261 try:
1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1262 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 self.a, self.b)
1263 self.a, self.b)
1265 except error.ParseError as e:
1264 except error.ParseError as e:
1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1265 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 # if we hit eof before finishing out the hunk, the last line will
1266 # if we hit eof before finishing out the hunk, the last line will
1268 # be zero length. Lets try to fix it up.
1267 # be zero length. Lets try to fix it up.
1269 while len(self.hunk[-1]) == 0:
1268 while len(self.hunk[-1]) == 0:
1270 del self.hunk[-1]
1269 del self.hunk[-1]
1271 del self.a[-1]
1270 del self.a[-1]
1272 del self.b[-1]
1271 del self.b[-1]
1273 self.lena -= 1
1272 self.lena -= 1
1274 self.lenb -= 1
1273 self.lenb -= 1
1275 self._fixnewline(lr)
1274 self._fixnewline(lr)
1276
1275
1277 def read_context_hunk(self, lr):
1276 def read_context_hunk(self, lr):
1278 self.desc = lr.readline()
1277 self.desc = lr.readline()
1279 m = contextdesc.match(self.desc)
1278 m = contextdesc.match(self.desc)
1280 if not m:
1279 if not m:
1281 raise PatchError(_("bad hunk #%d") % self.number)
1280 raise PatchError(_("bad hunk #%d") % self.number)
1282 self.starta, aend = m.groups()
1281 self.starta, aend = m.groups()
1283 self.starta = int(self.starta)
1282 self.starta = int(self.starta)
1284 if aend is None:
1283 if aend is None:
1285 aend = self.starta
1284 aend = self.starta
1286 self.lena = int(aend) - self.starta
1285 self.lena = int(aend) - self.starta
1287 if self.starta:
1286 if self.starta:
1288 self.lena += 1
1287 self.lena += 1
1289 for x in pycompat.xrange(self.lena):
1288 for x in pycompat.xrange(self.lena):
1290 l = lr.readline()
1289 l = lr.readline()
1291 if l.startswith('---'):
1290 if l.startswith('---'):
1292 # lines addition, old block is empty
1291 # lines addition, old block is empty
1293 lr.push(l)
1292 lr.push(l)
1294 break
1293 break
1295 s = l[2:]
1294 s = l[2:]
1296 if l.startswith('- ') or l.startswith('! '):
1295 if l.startswith('- ') or l.startswith('! '):
1297 u = '-' + s
1296 u = '-' + s
1298 elif l.startswith(' '):
1297 elif l.startswith(' '):
1299 u = ' ' + s
1298 u = ' ' + s
1300 else:
1299 else:
1301 raise PatchError(_("bad hunk #%d old text line %d") %
1300 raise PatchError(_("bad hunk #%d old text line %d") %
1302 (self.number, x))
1301 (self.number, x))
1303 self.a.append(u)
1302 self.a.append(u)
1304 self.hunk.append(u)
1303 self.hunk.append(u)
1305
1304
1306 l = lr.readline()
1305 l = lr.readline()
1307 if l.startswith(br'\ '):
1306 if l.startswith(br'\ '):
1308 s = self.a[-1][:-1]
1307 s = self.a[-1][:-1]
1309 self.a[-1] = s
1308 self.a[-1] = s
1310 self.hunk[-1] = s
1309 self.hunk[-1] = s
1311 l = lr.readline()
1310 l = lr.readline()
1312 m = contextdesc.match(l)
1311 m = contextdesc.match(l)
1313 if not m:
1312 if not m:
1314 raise PatchError(_("bad hunk #%d") % self.number)
1313 raise PatchError(_("bad hunk #%d") % self.number)
1315 self.startb, bend = m.groups()
1314 self.startb, bend = m.groups()
1316 self.startb = int(self.startb)
1315 self.startb = int(self.startb)
1317 if bend is None:
1316 if bend is None:
1318 bend = self.startb
1317 bend = self.startb
1319 self.lenb = int(bend) - self.startb
1318 self.lenb = int(bend) - self.startb
1320 if self.startb:
1319 if self.startb:
1321 self.lenb += 1
1320 self.lenb += 1
1322 hunki = 1
1321 hunki = 1
1323 for x in pycompat.xrange(self.lenb):
1322 for x in pycompat.xrange(self.lenb):
1324 l = lr.readline()
1323 l = lr.readline()
1325 if l.startswith(br'\ '):
1324 if l.startswith(br'\ '):
1326 # XXX: the only way to hit this is with an invalid line range.
1325 # XXX: the only way to hit this is with an invalid line range.
1327 # The no-eol marker is not counted in the line range, but I
1326 # The no-eol marker is not counted in the line range, but I
1328 # guess there are diff(1) out there which behave differently.
1327 # guess there are diff(1) out there which behave differently.
1329 s = self.b[-1][:-1]
1328 s = self.b[-1][:-1]
1330 self.b[-1] = s
1329 self.b[-1] = s
1331 self.hunk[hunki - 1] = s
1330 self.hunk[hunki - 1] = s
1332 continue
1331 continue
1333 if not l:
1332 if not l:
1334 # line deletions, new block is empty and we hit EOF
1333 # line deletions, new block is empty and we hit EOF
1335 lr.push(l)
1334 lr.push(l)
1336 break
1335 break
1337 s = l[2:]
1336 s = l[2:]
1338 if l.startswith('+ ') or l.startswith('! '):
1337 if l.startswith('+ ') or l.startswith('! '):
1339 u = '+' + s
1338 u = '+' + s
1340 elif l.startswith(' '):
1339 elif l.startswith(' '):
1341 u = ' ' + s
1340 u = ' ' + s
1342 elif len(self.b) == 0:
1341 elif len(self.b) == 0:
1343 # line deletions, new block is empty
1342 # line deletions, new block is empty
1344 lr.push(l)
1343 lr.push(l)
1345 break
1344 break
1346 else:
1345 else:
1347 raise PatchError(_("bad hunk #%d old text line %d") %
1346 raise PatchError(_("bad hunk #%d old text line %d") %
1348 (self.number, x))
1347 (self.number, x))
1349 self.b.append(s)
1348 self.b.append(s)
1350 while True:
1349 while True:
1351 if hunki >= len(self.hunk):
1350 if hunki >= len(self.hunk):
1352 h = ""
1351 h = ""
1353 else:
1352 else:
1354 h = self.hunk[hunki]
1353 h = self.hunk[hunki]
1355 hunki += 1
1354 hunki += 1
1356 if h == u:
1355 if h == u:
1357 break
1356 break
1358 elif h.startswith('-'):
1357 elif h.startswith('-'):
1359 continue
1358 continue
1360 else:
1359 else:
1361 self.hunk.insert(hunki - 1, u)
1360 self.hunk.insert(hunki - 1, u)
1362 break
1361 break
1363
1362
1364 if not self.a:
1363 if not self.a:
1365 # this happens when lines were only added to the hunk
1364 # this happens when lines were only added to the hunk
1366 for x in self.hunk:
1365 for x in self.hunk:
1367 if x.startswith('-') or x.startswith(' '):
1366 if x.startswith('-') or x.startswith(' '):
1368 self.a.append(x)
1367 self.a.append(x)
1369 if not self.b:
1368 if not self.b:
1370 # this happens when lines were only deleted from the hunk
1369 # this happens when lines were only deleted from the hunk
1371 for x in self.hunk:
1370 for x in self.hunk:
1372 if x.startswith('+') or x.startswith(' '):
1371 if x.startswith('+') or x.startswith(' '):
1373 self.b.append(x[1:])
1372 self.b.append(x[1:])
1374 # @@ -start,len +start,len @@
1373 # @@ -start,len +start,len @@
1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1374 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 self.startb, self.lenb)
1375 self.startb, self.lenb)
1377 self.hunk[0] = self.desc
1376 self.hunk[0] = self.desc
1378 self._fixnewline(lr)
1377 self._fixnewline(lr)
1379
1378
1380 def _fixnewline(self, lr):
1379 def _fixnewline(self, lr):
1381 l = lr.readline()
1380 l = lr.readline()
1382 if l.startswith(br'\ '):
1381 if l.startswith(br'\ '):
1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1382 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 else:
1383 else:
1385 lr.push(l)
1384 lr.push(l)
1386
1385
1387 def complete(self):
1386 def complete(self):
1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1387 return len(self.a) == self.lena and len(self.b) == self.lenb
1389
1388
1390 def _fuzzit(self, old, new, fuzz, toponly):
1389 def _fuzzit(self, old, new, fuzz, toponly):
1391 # this removes context lines from the top and bottom of list 'l'. It
1390 # this removes context lines from the top and bottom of list 'l'. It
1392 # checks the hunk to make sure only context lines are removed, and then
1391 # checks the hunk to make sure only context lines are removed, and then
1393 # returns a new shortened list of lines.
1392 # returns a new shortened list of lines.
1394 fuzz = min(fuzz, len(old))
1393 fuzz = min(fuzz, len(old))
1395 if fuzz:
1394 if fuzz:
1396 top = 0
1395 top = 0
1397 bot = 0
1396 bot = 0
1398 hlen = len(self.hunk)
1397 hlen = len(self.hunk)
1399 for x in pycompat.xrange(hlen - 1):
1398 for x in pycompat.xrange(hlen - 1):
1400 # the hunk starts with the @@ line, so use x+1
1399 # the hunk starts with the @@ line, so use x+1
1401 if self.hunk[x + 1].startswith(' '):
1400 if self.hunk[x + 1].startswith(' '):
1402 top += 1
1401 top += 1
1403 else:
1402 else:
1404 break
1403 break
1405 if not toponly:
1404 if not toponly:
1406 for x in pycompat.xrange(hlen - 1):
1405 for x in pycompat.xrange(hlen - 1):
1407 if self.hunk[hlen - bot - 1].startswith(' '):
1406 if self.hunk[hlen - bot - 1].startswith(' '):
1408 bot += 1
1407 bot += 1
1409 else:
1408 else:
1410 break
1409 break
1411
1410
1412 bot = min(fuzz, bot)
1411 bot = min(fuzz, bot)
1413 top = min(fuzz, top)
1412 top = min(fuzz, top)
1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1413 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 return old, new, 0
1414 return old, new, 0
1416
1415
1417 def fuzzit(self, fuzz, toponly):
1416 def fuzzit(self, fuzz, toponly):
1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1417 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 oldstart = self.starta + top
1418 oldstart = self.starta + top
1420 newstart = self.startb + top
1419 newstart = self.startb + top
1421 # zero length hunk ranges already have their start decremented
1420 # zero length hunk ranges already have their start decremented
1422 if self.lena and oldstart > 0:
1421 if self.lena and oldstart > 0:
1423 oldstart -= 1
1422 oldstart -= 1
1424 if self.lenb and newstart > 0:
1423 if self.lenb and newstart > 0:
1425 newstart -= 1
1424 newstart -= 1
1426 return old, oldstart, new, newstart
1425 return old, oldstart, new, newstart
1427
1426
1428 class binhunk(object):
1427 class binhunk(object):
1429 'A binary patch file.'
1428 'A binary patch file.'
1430 def __init__(self, lr, fname):
1429 def __init__(self, lr, fname):
1431 self.text = None
1430 self.text = None
1432 self.delta = False
1431 self.delta = False
1433 self.hunk = ['GIT binary patch\n']
1432 self.hunk = ['GIT binary patch\n']
1434 self._fname = fname
1433 self._fname = fname
1435 self._read(lr)
1434 self._read(lr)
1436
1435
1437 def complete(self):
1436 def complete(self):
1438 return self.text is not None
1437 return self.text is not None
1439
1438
1440 def new(self, lines):
1439 def new(self, lines):
1441 if self.delta:
1440 if self.delta:
1442 return [applybindelta(self.text, ''.join(lines))]
1441 return [applybindelta(self.text, ''.join(lines))]
1443 return [self.text]
1442 return [self.text]
1444
1443
1445 def _read(self, lr):
1444 def _read(self, lr):
1446 def getline(lr, hunk):
1445 def getline(lr, hunk):
1447 l = lr.readline()
1446 l = lr.readline()
1448 hunk.append(l)
1447 hunk.append(l)
1449 return l.rstrip('\r\n')
1448 return l.rstrip('\r\n')
1450
1449
1451 while True:
1450 while True:
1452 line = getline(lr, self.hunk)
1451 line = getline(lr, self.hunk)
1453 if not line:
1452 if not line:
1454 raise PatchError(_('could not extract "%s" binary data')
1453 raise PatchError(_('could not extract "%s" binary data')
1455 % self._fname)
1454 % self._fname)
1456 if line.startswith('literal '):
1455 if line.startswith('literal '):
1457 size = int(line[8:].rstrip())
1456 size = int(line[8:].rstrip())
1458 break
1457 break
1459 if line.startswith('delta '):
1458 if line.startswith('delta '):
1460 size = int(line[6:].rstrip())
1459 size = int(line[6:].rstrip())
1461 self.delta = True
1460 self.delta = True
1462 break
1461 break
1463 dec = []
1462 dec = []
1464 line = getline(lr, self.hunk)
1463 line = getline(lr, self.hunk)
1465 while len(line) > 1:
1464 while len(line) > 1:
1466 l = line[0:1]
1465 l = line[0:1]
1467 if l <= 'Z' and l >= 'A':
1466 if l <= 'Z' and l >= 'A':
1468 l = ord(l) - ord('A') + 1
1467 l = ord(l) - ord('A') + 1
1469 else:
1468 else:
1470 l = ord(l) - ord('a') + 27
1469 l = ord(l) - ord('a') + 27
1471 try:
1470 try:
1472 dec.append(util.b85decode(line[1:])[:l])
1471 dec.append(util.b85decode(line[1:])[:l])
1473 except ValueError as e:
1472 except ValueError as e:
1474 raise PatchError(_('could not decode "%s" binary patch: %s')
1473 raise PatchError(_('could not decode "%s" binary patch: %s')
1475 % (self._fname, stringutil.forcebytestr(e)))
1474 % (self._fname, stringutil.forcebytestr(e)))
1476 line = getline(lr, self.hunk)
1475 line = getline(lr, self.hunk)
1477 text = zlib.decompress(''.join(dec))
1476 text = zlib.decompress(''.join(dec))
1478 if len(text) != size:
1477 if len(text) != size:
1479 raise PatchError(_('"%s" length is %d bytes, should be %d')
1478 raise PatchError(_('"%s" length is %d bytes, should be %d')
1480 % (self._fname, len(text), size))
1479 % (self._fname, len(text), size))
1481 self.text = text
1480 self.text = text
1482
1481
1483 def parsefilename(str):
1482 def parsefilename(str):
1484 # --- filename \t|space stuff
1483 # --- filename \t|space stuff
1485 s = str[4:].rstrip('\r\n')
1484 s = str[4:].rstrip('\r\n')
1486 i = s.find('\t')
1485 i = s.find('\t')
1487 if i < 0:
1486 if i < 0:
1488 i = s.find(' ')
1487 i = s.find(' ')
1489 if i < 0:
1488 if i < 0:
1490 return s
1489 return s
1491 return s[:i]
1490 return s[:i]
1492
1491
1493 def reversehunks(hunks):
1492 def reversehunks(hunks):
1494 '''reverse the signs in the hunks given as argument
1493 '''reverse the signs in the hunks given as argument
1495
1494
1496 This function operates on hunks coming out of patch.filterpatch, that is
1495 This function operates on hunks coming out of patch.filterpatch, that is
1497 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1496 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1498
1497
1499 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1498 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1500 ... --- a/folder1/g
1499 ... --- a/folder1/g
1501 ... +++ b/folder1/g
1500 ... +++ b/folder1/g
1502 ... @@ -1,7 +1,7 @@
1501 ... @@ -1,7 +1,7 @@
1503 ... +firstline
1502 ... +firstline
1504 ... c
1503 ... c
1505 ... 1
1504 ... 1
1506 ... 2
1505 ... 2
1507 ... + 3
1506 ... + 3
1508 ... -4
1507 ... -4
1509 ... 5
1508 ... 5
1510 ... d
1509 ... d
1511 ... +lastline"""
1510 ... +lastline"""
1512 >>> hunks = parsepatch([rawpatch])
1511 >>> hunks = parsepatch([rawpatch])
1513 >>> hunkscomingfromfilterpatch = []
1512 >>> hunkscomingfromfilterpatch = []
1514 >>> for h in hunks:
1513 >>> for h in hunks:
1515 ... hunkscomingfromfilterpatch.append(h)
1514 ... hunkscomingfromfilterpatch.append(h)
1516 ... hunkscomingfromfilterpatch.extend(h.hunks)
1515 ... hunkscomingfromfilterpatch.extend(h.hunks)
1517
1516
1518 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1517 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1519 >>> from . import util
1518 >>> from . import util
1520 >>> fp = util.stringio()
1519 >>> fp = util.stringio()
1521 >>> for c in reversedhunks:
1520 >>> for c in reversedhunks:
1522 ... c.write(fp)
1521 ... c.write(fp)
1523 >>> fp.seek(0) or None
1522 >>> fp.seek(0) or None
1524 >>> reversedpatch = fp.read()
1523 >>> reversedpatch = fp.read()
1525 >>> print(pycompat.sysstr(reversedpatch))
1524 >>> print(pycompat.sysstr(reversedpatch))
1526 diff --git a/folder1/g b/folder1/g
1525 diff --git a/folder1/g b/folder1/g
1527 --- a/folder1/g
1526 --- a/folder1/g
1528 +++ b/folder1/g
1527 +++ b/folder1/g
1529 @@ -1,4 +1,3 @@
1528 @@ -1,4 +1,3 @@
1530 -firstline
1529 -firstline
1531 c
1530 c
1532 1
1531 1
1533 2
1532 2
1534 @@ -2,6 +1,6 @@
1533 @@ -2,6 +1,6 @@
1535 c
1534 c
1536 1
1535 1
1537 2
1536 2
1538 - 3
1537 - 3
1539 +4
1538 +4
1540 5
1539 5
1541 d
1540 d
1542 @@ -6,3 +5,2 @@
1541 @@ -6,3 +5,2 @@
1543 5
1542 5
1544 d
1543 d
1545 -lastline
1544 -lastline
1546
1545
1547 '''
1546 '''
1548
1547
1549 newhunks = []
1548 newhunks = []
1550 for c in hunks:
1549 for c in hunks:
1551 if util.safehasattr(c, 'reversehunk'):
1550 if util.safehasattr(c, 'reversehunk'):
1552 c = c.reversehunk()
1551 c = c.reversehunk()
1553 newhunks.append(c)
1552 newhunks.append(c)
1554 return newhunks
1553 return newhunks
1555
1554
1556 def parsepatch(originalchunks, maxcontext=None):
1555 def parsepatch(originalchunks, maxcontext=None):
1557 """patch -> [] of headers -> [] of hunks
1556 """patch -> [] of headers -> [] of hunks
1558
1557
1559 If maxcontext is not None, trim context lines if necessary.
1558 If maxcontext is not None, trim context lines if necessary.
1560
1559
1561 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1560 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1562 ... --- a/folder1/g
1561 ... --- a/folder1/g
1563 ... +++ b/folder1/g
1562 ... +++ b/folder1/g
1564 ... @@ -1,8 +1,10 @@
1563 ... @@ -1,8 +1,10 @@
1565 ... 1
1564 ... 1
1566 ... 2
1565 ... 2
1567 ... -3
1566 ... -3
1568 ... 4
1567 ... 4
1569 ... 5
1568 ... 5
1570 ... 6
1569 ... 6
1571 ... +6.1
1570 ... +6.1
1572 ... +6.2
1571 ... +6.2
1573 ... 7
1572 ... 7
1574 ... 8
1573 ... 8
1575 ... +9'''
1574 ... +9'''
1576 >>> out = util.stringio()
1575 >>> out = util.stringio()
1577 >>> headers = parsepatch([rawpatch], maxcontext=1)
1576 >>> headers = parsepatch([rawpatch], maxcontext=1)
1578 >>> for header in headers:
1577 >>> for header in headers:
1579 ... header.write(out)
1578 ... header.write(out)
1580 ... for hunk in header.hunks:
1579 ... for hunk in header.hunks:
1581 ... hunk.write(out)
1580 ... hunk.write(out)
1582 >>> print(pycompat.sysstr(out.getvalue()))
1581 >>> print(pycompat.sysstr(out.getvalue()))
1583 diff --git a/folder1/g b/folder1/g
1582 diff --git a/folder1/g b/folder1/g
1584 --- a/folder1/g
1583 --- a/folder1/g
1585 +++ b/folder1/g
1584 +++ b/folder1/g
1586 @@ -2,3 +2,2 @@
1585 @@ -2,3 +2,2 @@
1587 2
1586 2
1588 -3
1587 -3
1589 4
1588 4
1590 @@ -6,2 +5,4 @@
1589 @@ -6,2 +5,4 @@
1591 6
1590 6
1592 +6.1
1591 +6.1
1593 +6.2
1592 +6.2
1594 7
1593 7
1595 @@ -8,1 +9,2 @@
1594 @@ -8,1 +9,2 @@
1596 8
1595 8
1597 +9
1596 +9
1598 """
1597 """
1599 class parser(object):
1598 class parser(object):
1600 """patch parsing state machine"""
1599 """patch parsing state machine"""
1601 def __init__(self):
1600 def __init__(self):
1602 self.fromline = 0
1601 self.fromline = 0
1603 self.toline = 0
1602 self.toline = 0
1604 self.proc = ''
1603 self.proc = ''
1605 self.header = None
1604 self.header = None
1606 self.context = []
1605 self.context = []
1607 self.before = []
1606 self.before = []
1608 self.hunk = []
1607 self.hunk = []
1609 self.headers = []
1608 self.headers = []
1610
1609
1611 def addrange(self, limits):
1610 def addrange(self, limits):
1612 self.addcontext([])
1611 self.addcontext([])
1613 fromstart, fromend, tostart, toend, proc = limits
1612 fromstart, fromend, tostart, toend, proc = limits
1614 self.fromline = int(fromstart)
1613 self.fromline = int(fromstart)
1615 self.toline = int(tostart)
1614 self.toline = int(tostart)
1616 self.proc = proc
1615 self.proc = proc
1617
1616
1618 def addcontext(self, context):
1617 def addcontext(self, context):
1619 if self.hunk:
1618 if self.hunk:
1620 h = recordhunk(self.header, self.fromline, self.toline,
1619 h = recordhunk(self.header, self.fromline, self.toline,
1621 self.proc, self.before, self.hunk, context, maxcontext)
1620 self.proc, self.before, self.hunk, context, maxcontext)
1622 self.header.hunks.append(h)
1621 self.header.hunks.append(h)
1623 self.fromline += len(self.before) + h.removed
1622 self.fromline += len(self.before) + h.removed
1624 self.toline += len(self.before) + h.added
1623 self.toline += len(self.before) + h.added
1625 self.before = []
1624 self.before = []
1626 self.hunk = []
1625 self.hunk = []
1627 self.context = context
1626 self.context = context
1628
1627
1629 def addhunk(self, hunk):
1628 def addhunk(self, hunk):
1630 if self.context:
1629 if self.context:
1631 self.before = self.context
1630 self.before = self.context
1632 self.context = []
1631 self.context = []
1633 if self.hunk:
1632 if self.hunk:
1634 self.addcontext([])
1633 self.addcontext([])
1635 self.hunk = hunk
1634 self.hunk = hunk
1636
1635
1637 def newfile(self, hdr):
1636 def newfile(self, hdr):
1638 self.addcontext([])
1637 self.addcontext([])
1639 h = header(hdr)
1638 h = header(hdr)
1640 self.headers.append(h)
1639 self.headers.append(h)
1641 self.header = h
1640 self.header = h
1642
1641
1643 def addother(self, line):
1642 def addother(self, line):
1644 pass # 'other' lines are ignored
1643 pass # 'other' lines are ignored
1645
1644
1646 def finished(self):
1645 def finished(self):
1647 self.addcontext([])
1646 self.addcontext([])
1648 return self.headers
1647 return self.headers
1649
1648
1650 transitions = {
1649 transitions = {
1651 'file': {'context': addcontext,
1650 'file': {'context': addcontext,
1652 'file': newfile,
1651 'file': newfile,
1653 'hunk': addhunk,
1652 'hunk': addhunk,
1654 'range': addrange},
1653 'range': addrange},
1655 'context': {'file': newfile,
1654 'context': {'file': newfile,
1656 'hunk': addhunk,
1655 'hunk': addhunk,
1657 'range': addrange,
1656 'range': addrange,
1658 'other': addother},
1657 'other': addother},
1659 'hunk': {'context': addcontext,
1658 'hunk': {'context': addcontext,
1660 'file': newfile,
1659 'file': newfile,
1661 'range': addrange},
1660 'range': addrange},
1662 'range': {'context': addcontext,
1661 'range': {'context': addcontext,
1663 'hunk': addhunk},
1662 'hunk': addhunk},
1664 'other': {'other': addother},
1663 'other': {'other': addother},
1665 }
1664 }
1666
1665
1667 p = parser()
1666 p = parser()
1668 fp = stringio()
1667 fp = stringio()
1669 fp.write(''.join(originalchunks))
1668 fp.write(''.join(originalchunks))
1670 fp.seek(0)
1669 fp.seek(0)
1671
1670
1672 state = 'context'
1671 state = 'context'
1673 for newstate, data in scanpatch(fp):
1672 for newstate, data in scanpatch(fp):
1674 try:
1673 try:
1675 p.transitions[state][newstate](p, data)
1674 p.transitions[state][newstate](p, data)
1676 except KeyError:
1675 except KeyError:
1677 raise PatchError('unhandled transition: %s -> %s' %
1676 raise PatchError('unhandled transition: %s -> %s' %
1678 (state, newstate))
1677 (state, newstate))
1679 state = newstate
1678 state = newstate
1680 del fp
1679 del fp
1681 return p.finished()
1680 return p.finished()
1682
1681
1683 def pathtransform(path, strip, prefix):
1682 def pathtransform(path, strip, prefix):
1684 '''turn a path from a patch into a path suitable for the repository
1683 '''turn a path from a patch into a path suitable for the repository
1685
1684
1686 prefix, if not empty, is expected to be normalized with a / at the end.
1685 prefix, if not empty, is expected to be normalized with a / at the end.
1687
1686
1688 Returns (stripped components, path in repository).
1687 Returns (stripped components, path in repository).
1689
1688
1690 >>> pathtransform(b'a/b/c', 0, b'')
1689 >>> pathtransform(b'a/b/c', 0, b'')
1691 ('', 'a/b/c')
1690 ('', 'a/b/c')
1692 >>> pathtransform(b' a/b/c ', 0, b'')
1691 >>> pathtransform(b' a/b/c ', 0, b'')
1693 ('', ' a/b/c')
1692 ('', ' a/b/c')
1694 >>> pathtransform(b' a/b/c ', 2, b'')
1693 >>> pathtransform(b' a/b/c ', 2, b'')
1695 ('a/b/', 'c')
1694 ('a/b/', 'c')
1696 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1695 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1697 ('', 'd/e/a/b/c')
1696 ('', 'd/e/a/b/c')
1698 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1697 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1699 ('a//b/', 'd/e/c')
1698 ('a//b/', 'd/e/c')
1700 >>> pathtransform(b'a/b/c', 3, b'')
1699 >>> pathtransform(b'a/b/c', 3, b'')
1701 Traceback (most recent call last):
1700 Traceback (most recent call last):
1702 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1701 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1703 '''
1702 '''
1704 pathlen = len(path)
1703 pathlen = len(path)
1705 i = 0
1704 i = 0
1706 if strip == 0:
1705 if strip == 0:
1707 return '', prefix + path.rstrip()
1706 return '', prefix + path.rstrip()
1708 count = strip
1707 count = strip
1709 while count > 0:
1708 while count > 0:
1710 i = path.find('/', i)
1709 i = path.find('/', i)
1711 if i == -1:
1710 if i == -1:
1712 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1711 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1713 (count, strip, path))
1712 (count, strip, path))
1714 i += 1
1713 i += 1
1715 # consume '//' in the path
1714 # consume '//' in the path
1716 while i < pathlen - 1 and path[i:i + 1] == '/':
1715 while i < pathlen - 1 and path[i:i + 1] == '/':
1717 i += 1
1716 i += 1
1718 count -= 1
1717 count -= 1
1719 return path[:i].lstrip(), prefix + path[i:].rstrip()
1718 return path[:i].lstrip(), prefix + path[i:].rstrip()
1720
1719
1721 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1720 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1722 nulla = afile_orig == "/dev/null"
1721 nulla = afile_orig == "/dev/null"
1723 nullb = bfile_orig == "/dev/null"
1722 nullb = bfile_orig == "/dev/null"
1724 create = nulla and hunk.starta == 0 and hunk.lena == 0
1723 create = nulla and hunk.starta == 0 and hunk.lena == 0
1725 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1724 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1726 abase, afile = pathtransform(afile_orig, strip, prefix)
1725 abase, afile = pathtransform(afile_orig, strip, prefix)
1727 gooda = not nulla and backend.exists(afile)
1726 gooda = not nulla and backend.exists(afile)
1728 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1727 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1729 if afile == bfile:
1728 if afile == bfile:
1730 goodb = gooda
1729 goodb = gooda
1731 else:
1730 else:
1732 goodb = not nullb and backend.exists(bfile)
1731 goodb = not nullb and backend.exists(bfile)
1733 missing = not goodb and not gooda and not create
1732 missing = not goodb and not gooda and not create
1734
1733
1735 # some diff programs apparently produce patches where the afile is
1734 # some diff programs apparently produce patches where the afile is
1736 # not /dev/null, but afile starts with bfile
1735 # not /dev/null, but afile starts with bfile
1737 abasedir = afile[:afile.rfind('/') + 1]
1736 abasedir = afile[:afile.rfind('/') + 1]
1738 bbasedir = bfile[:bfile.rfind('/') + 1]
1737 bbasedir = bfile[:bfile.rfind('/') + 1]
1739 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1738 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1740 and hunk.starta == 0 and hunk.lena == 0):
1739 and hunk.starta == 0 and hunk.lena == 0):
1741 create = True
1740 create = True
1742 missing = False
1741 missing = False
1743
1742
1744 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1743 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1745 # diff is between a file and its backup. In this case, the original
1744 # diff is between a file and its backup. In this case, the original
1746 # file should be patched (see original mpatch code).
1745 # file should be patched (see original mpatch code).
1747 isbackup = (abase == bbase and bfile.startswith(afile))
1746 isbackup = (abase == bbase and bfile.startswith(afile))
1748 fname = None
1747 fname = None
1749 if not missing:
1748 if not missing:
1750 if gooda and goodb:
1749 if gooda and goodb:
1751 if isbackup:
1750 if isbackup:
1752 fname = afile
1751 fname = afile
1753 else:
1752 else:
1754 fname = bfile
1753 fname = bfile
1755 elif gooda:
1754 elif gooda:
1756 fname = afile
1755 fname = afile
1757
1756
1758 if not fname:
1757 if not fname:
1759 if not nullb:
1758 if not nullb:
1760 if isbackup:
1759 if isbackup:
1761 fname = afile
1760 fname = afile
1762 else:
1761 else:
1763 fname = bfile
1762 fname = bfile
1764 elif not nulla:
1763 elif not nulla:
1765 fname = afile
1764 fname = afile
1766 else:
1765 else:
1767 raise PatchError(_("undefined source and destination files"))
1766 raise PatchError(_("undefined source and destination files"))
1768
1767
1769 gp = patchmeta(fname)
1768 gp = patchmeta(fname)
1770 if create:
1769 if create:
1771 gp.op = 'ADD'
1770 gp.op = 'ADD'
1772 elif remove:
1771 elif remove:
1773 gp.op = 'DELETE'
1772 gp.op = 'DELETE'
1774 return gp
1773 return gp
1775
1774
1776 def scanpatch(fp):
1775 def scanpatch(fp):
1777 """like patch.iterhunks, but yield different events
1776 """like patch.iterhunks, but yield different events
1778
1777
1779 - ('file', [header_lines + fromfile + tofile])
1778 - ('file', [header_lines + fromfile + tofile])
1780 - ('context', [context_lines])
1779 - ('context', [context_lines])
1781 - ('hunk', [hunk_lines])
1780 - ('hunk', [hunk_lines])
1782 - ('range', (-start,len, +start,len, proc))
1781 - ('range', (-start,len, +start,len, proc))
1783 """
1782 """
1784 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1783 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1785 lr = linereader(fp)
1784 lr = linereader(fp)
1786
1785
1787 def scanwhile(first, p):
1786 def scanwhile(first, p):
1788 """scan lr while predicate holds"""
1787 """scan lr while predicate holds"""
1789 lines = [first]
1788 lines = [first]
1790 for line in iter(lr.readline, ''):
1789 for line in iter(lr.readline, ''):
1791 if p(line):
1790 if p(line):
1792 lines.append(line)
1791 lines.append(line)
1793 else:
1792 else:
1794 lr.push(line)
1793 lr.push(line)
1795 break
1794 break
1796 return lines
1795 return lines
1797
1796
1798 for line in iter(lr.readline, ''):
1797 for line in iter(lr.readline, ''):
1799 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1798 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1800 def notheader(line):
1799 def notheader(line):
1801 s = line.split(None, 1)
1800 s = line.split(None, 1)
1802 return not s or s[0] not in ('---', 'diff')
1801 return not s or s[0] not in ('---', 'diff')
1803 header = scanwhile(line, notheader)
1802 header = scanwhile(line, notheader)
1804 fromfile = lr.readline()
1803 fromfile = lr.readline()
1805 if fromfile.startswith('---'):
1804 if fromfile.startswith('---'):
1806 tofile = lr.readline()
1805 tofile = lr.readline()
1807 header += [fromfile, tofile]
1806 header += [fromfile, tofile]
1808 else:
1807 else:
1809 lr.push(fromfile)
1808 lr.push(fromfile)
1810 yield 'file', header
1809 yield 'file', header
1811 elif line.startswith(' '):
1810 elif line.startswith(' '):
1812 cs = (' ', '\\')
1811 cs = (' ', '\\')
1813 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1812 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1814 elif line.startswith(('-', '+')):
1813 elif line.startswith(('-', '+')):
1815 cs = ('-', '+', '\\')
1814 cs = ('-', '+', '\\')
1816 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1815 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1817 else:
1816 else:
1818 m = lines_re.match(line)
1817 m = lines_re.match(line)
1819 if m:
1818 if m:
1820 yield 'range', m.groups()
1819 yield 'range', m.groups()
1821 else:
1820 else:
1822 yield 'other', line
1821 yield 'other', line
1823
1822
1824 def scangitpatch(lr, firstline):
1823 def scangitpatch(lr, firstline):
1825 """
1824 """
1826 Git patches can emit:
1825 Git patches can emit:
1827 - rename a to b
1826 - rename a to b
1828 - change b
1827 - change b
1829 - copy a to c
1828 - copy a to c
1830 - change c
1829 - change c
1831
1830
1832 We cannot apply this sequence as-is, the renamed 'a' could not be
1831 We cannot apply this sequence as-is, the renamed 'a' could not be
1833 found for it would have been renamed already. And we cannot copy
1832 found for it would have been renamed already. And we cannot copy
1834 from 'b' instead because 'b' would have been changed already. So
1833 from 'b' instead because 'b' would have been changed already. So
1835 we scan the git patch for copy and rename commands so we can
1834 we scan the git patch for copy and rename commands so we can
1836 perform the copies ahead of time.
1835 perform the copies ahead of time.
1837 """
1836 """
1838 pos = 0
1837 pos = 0
1839 try:
1838 try:
1840 pos = lr.fp.tell()
1839 pos = lr.fp.tell()
1841 fp = lr.fp
1840 fp = lr.fp
1842 except IOError:
1841 except IOError:
1843 fp = stringio(lr.fp.read())
1842 fp = stringio(lr.fp.read())
1844 gitlr = linereader(fp)
1843 gitlr = linereader(fp)
1845 gitlr.push(firstline)
1844 gitlr.push(firstline)
1846 gitpatches = readgitpatch(gitlr)
1845 gitpatches = readgitpatch(gitlr)
1847 fp.seek(pos)
1846 fp.seek(pos)
1848 return gitpatches
1847 return gitpatches
1849
1848
1850 def iterhunks(fp):
1849 def iterhunks(fp):
1851 """Read a patch and yield the following events:
1850 """Read a patch and yield the following events:
1852 - ("file", afile, bfile, firsthunk): select a new target file.
1851 - ("file", afile, bfile, firsthunk): select a new target file.
1853 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1852 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1854 "file" event.
1853 "file" event.
1855 - ("git", gitchanges): current diff is in git format, gitchanges
1854 - ("git", gitchanges): current diff is in git format, gitchanges
1856 maps filenames to gitpatch records. Unique event.
1855 maps filenames to gitpatch records. Unique event.
1857 """
1856 """
1858 afile = ""
1857 afile = ""
1859 bfile = ""
1858 bfile = ""
1860 state = None
1859 state = None
1861 hunknum = 0
1860 hunknum = 0
1862 emitfile = newfile = False
1861 emitfile = newfile = False
1863 gitpatches = None
1862 gitpatches = None
1864
1863
1865 # our states
1864 # our states
1866 BFILE = 1
1865 BFILE = 1
1867 context = None
1866 context = None
1868 lr = linereader(fp)
1867 lr = linereader(fp)
1869
1868
1870 for x in iter(lr.readline, ''):
1869 for x in iter(lr.readline, ''):
1871 if state == BFILE and (
1870 if state == BFILE and (
1872 (not context and x.startswith('@'))
1871 (not context and x.startswith('@'))
1873 or (context is not False and x.startswith('***************'))
1872 or (context is not False and x.startswith('***************'))
1874 or x.startswith('GIT binary patch')):
1873 or x.startswith('GIT binary patch')):
1875 gp = None
1874 gp = None
1876 if (gitpatches and
1875 if (gitpatches and
1877 gitpatches[-1].ispatching(afile, bfile)):
1876 gitpatches[-1].ispatching(afile, bfile)):
1878 gp = gitpatches.pop()
1877 gp = gitpatches.pop()
1879 if x.startswith('GIT binary patch'):
1878 if x.startswith('GIT binary patch'):
1880 h = binhunk(lr, gp.path)
1879 h = binhunk(lr, gp.path)
1881 else:
1880 else:
1882 if context is None and x.startswith('***************'):
1881 if context is None and x.startswith('***************'):
1883 context = True
1882 context = True
1884 h = hunk(x, hunknum + 1, lr, context)
1883 h = hunk(x, hunknum + 1, lr, context)
1885 hunknum += 1
1884 hunknum += 1
1886 if emitfile:
1885 if emitfile:
1887 emitfile = False
1886 emitfile = False
1888 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1887 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1889 yield 'hunk', h
1888 yield 'hunk', h
1890 elif x.startswith('diff --git a/'):
1889 elif x.startswith('diff --git a/'):
1891 m = gitre.match(x.rstrip(' \r\n'))
1890 m = gitre.match(x.rstrip(' \r\n'))
1892 if not m:
1891 if not m:
1893 continue
1892 continue
1894 if gitpatches is None:
1893 if gitpatches is None:
1895 # scan whole input for git metadata
1894 # scan whole input for git metadata
1896 gitpatches = scangitpatch(lr, x)
1895 gitpatches = scangitpatch(lr, x)
1897 yield 'git', [g.copy() for g in gitpatches
1896 yield 'git', [g.copy() for g in gitpatches
1898 if g.op in ('COPY', 'RENAME')]
1897 if g.op in ('COPY', 'RENAME')]
1899 gitpatches.reverse()
1898 gitpatches.reverse()
1900 afile = 'a/' + m.group(1)
1899 afile = 'a/' + m.group(1)
1901 bfile = 'b/' + m.group(2)
1900 bfile = 'b/' + m.group(2)
1902 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1901 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1903 gp = gitpatches.pop()
1902 gp = gitpatches.pop()
1904 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1903 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1905 if not gitpatches:
1904 if not gitpatches:
1906 raise PatchError(_('failed to synchronize metadata for "%s"')
1905 raise PatchError(_('failed to synchronize metadata for "%s"')
1907 % afile[2:])
1906 % afile[2:])
1908 newfile = True
1907 newfile = True
1909 elif x.startswith('---'):
1908 elif x.startswith('---'):
1910 # check for a unified diff
1909 # check for a unified diff
1911 l2 = lr.readline()
1910 l2 = lr.readline()
1912 if not l2.startswith('+++'):
1911 if not l2.startswith('+++'):
1913 lr.push(l2)
1912 lr.push(l2)
1914 continue
1913 continue
1915 newfile = True
1914 newfile = True
1916 context = False
1915 context = False
1917 afile = parsefilename(x)
1916 afile = parsefilename(x)
1918 bfile = parsefilename(l2)
1917 bfile = parsefilename(l2)
1919 elif x.startswith('***'):
1918 elif x.startswith('***'):
1920 # check for a context diff
1919 # check for a context diff
1921 l2 = lr.readline()
1920 l2 = lr.readline()
1922 if not l2.startswith('---'):
1921 if not l2.startswith('---'):
1923 lr.push(l2)
1922 lr.push(l2)
1924 continue
1923 continue
1925 l3 = lr.readline()
1924 l3 = lr.readline()
1926 lr.push(l3)
1925 lr.push(l3)
1927 if not l3.startswith("***************"):
1926 if not l3.startswith("***************"):
1928 lr.push(l2)
1927 lr.push(l2)
1929 continue
1928 continue
1930 newfile = True
1929 newfile = True
1931 context = True
1930 context = True
1932 afile = parsefilename(x)
1931 afile = parsefilename(x)
1933 bfile = parsefilename(l2)
1932 bfile = parsefilename(l2)
1934
1933
1935 if newfile:
1934 if newfile:
1936 newfile = False
1935 newfile = False
1937 emitfile = True
1936 emitfile = True
1938 state = BFILE
1937 state = BFILE
1939 hunknum = 0
1938 hunknum = 0
1940
1939
1941 while gitpatches:
1940 while gitpatches:
1942 gp = gitpatches.pop()
1941 gp = gitpatches.pop()
1943 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1944
1943
1945 def applybindelta(binchunk, data):
1944 def applybindelta(binchunk, data):
1946 """Apply a binary delta hunk
1945 """Apply a binary delta hunk
1947 The algorithm used is the algorithm from git's patch-delta.c
1946 The algorithm used is the algorithm from git's patch-delta.c
1948 """
1947 """
1949 def deltahead(binchunk):
1948 def deltahead(binchunk):
1950 i = 0
1949 i = 0
1951 for c in pycompat.bytestr(binchunk):
1950 for c in pycompat.bytestr(binchunk):
1952 i += 1
1951 i += 1
1953 if not (ord(c) & 0x80):
1952 if not (ord(c) & 0x80):
1954 return i
1953 return i
1955 return i
1954 return i
1956 out = ""
1955 out = ""
1957 s = deltahead(binchunk)
1956 s = deltahead(binchunk)
1958 binchunk = binchunk[s:]
1957 binchunk = binchunk[s:]
1959 s = deltahead(binchunk)
1958 s = deltahead(binchunk)
1960 binchunk = binchunk[s:]
1959 binchunk = binchunk[s:]
1961 i = 0
1960 i = 0
1962 while i < len(binchunk):
1961 while i < len(binchunk):
1963 cmd = ord(binchunk[i:i + 1])
1962 cmd = ord(binchunk[i:i + 1])
1964 i += 1
1963 i += 1
1965 if (cmd & 0x80):
1964 if (cmd & 0x80):
1966 offset = 0
1965 offset = 0
1967 size = 0
1966 size = 0
1968 if (cmd & 0x01):
1967 if (cmd & 0x01):
1969 offset = ord(binchunk[i:i + 1])
1968 offset = ord(binchunk[i:i + 1])
1970 i += 1
1969 i += 1
1971 if (cmd & 0x02):
1970 if (cmd & 0x02):
1972 offset |= ord(binchunk[i:i + 1]) << 8
1971 offset |= ord(binchunk[i:i + 1]) << 8
1973 i += 1
1972 i += 1
1974 if (cmd & 0x04):
1973 if (cmd & 0x04):
1975 offset |= ord(binchunk[i:i + 1]) << 16
1974 offset |= ord(binchunk[i:i + 1]) << 16
1976 i += 1
1975 i += 1
1977 if (cmd & 0x08):
1976 if (cmd & 0x08):
1978 offset |= ord(binchunk[i:i + 1]) << 24
1977 offset |= ord(binchunk[i:i + 1]) << 24
1979 i += 1
1978 i += 1
1980 if (cmd & 0x10):
1979 if (cmd & 0x10):
1981 size = ord(binchunk[i:i + 1])
1980 size = ord(binchunk[i:i + 1])
1982 i += 1
1981 i += 1
1983 if (cmd & 0x20):
1982 if (cmd & 0x20):
1984 size |= ord(binchunk[i:i + 1]) << 8
1983 size |= ord(binchunk[i:i + 1]) << 8
1985 i += 1
1984 i += 1
1986 if (cmd & 0x40):
1985 if (cmd & 0x40):
1987 size |= ord(binchunk[i:i + 1]) << 16
1986 size |= ord(binchunk[i:i + 1]) << 16
1988 i += 1
1987 i += 1
1989 if size == 0:
1988 if size == 0:
1990 size = 0x10000
1989 size = 0x10000
1991 offset_end = offset + size
1990 offset_end = offset + size
1992 out += data[offset:offset_end]
1991 out += data[offset:offset_end]
1993 elif cmd != 0:
1992 elif cmd != 0:
1994 offset_end = i + cmd
1993 offset_end = i + cmd
1995 out += binchunk[i:offset_end]
1994 out += binchunk[i:offset_end]
1996 i += cmd
1995 i += cmd
1997 else:
1996 else:
1998 raise PatchError(_('unexpected delta opcode 0'))
1997 raise PatchError(_('unexpected delta opcode 0'))
1999 return out
1998 return out
2000
1999
2001 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2002 """Reads a patch from fp and tries to apply it.
2001 """Reads a patch from fp and tries to apply it.
2003
2002
2004 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2005 there was any fuzz.
2004 there was any fuzz.
2006
2005
2007 If 'eolmode' is 'strict', the patch content and patched file are
2006 If 'eolmode' is 'strict', the patch content and patched file are
2008 read in binary mode. Otherwise, line endings are ignored when
2007 read in binary mode. Otherwise, line endings are ignored when
2009 patching then normalized according to 'eolmode'.
2008 patching then normalized according to 'eolmode'.
2010 """
2009 """
2011 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2012 prefix=prefix, eolmode=eolmode)
2011 prefix=prefix, eolmode=eolmode)
2013
2012
2014 def _canonprefix(repo, prefix):
2013 def _canonprefix(repo, prefix):
2015 if prefix:
2014 if prefix:
2016 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2017 if prefix != '':
2016 if prefix != '':
2018 prefix += '/'
2017 prefix += '/'
2019 return prefix
2018 return prefix
2020
2019
2021 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2022 eolmode='strict'):
2021 eolmode='strict'):
2023 prefix = _canonprefix(backend.repo, prefix)
2022 prefix = _canonprefix(backend.repo, prefix)
2024 def pstrip(p):
2023 def pstrip(p):
2025 return pathtransform(p, strip - 1, prefix)[1]
2024 return pathtransform(p, strip - 1, prefix)[1]
2026
2025
2027 rejects = 0
2026 rejects = 0
2028 err = 0
2027 err = 0
2029 current_file = None
2028 current_file = None
2030
2029
2031 for state, values in iterhunks(fp):
2030 for state, values in iterhunks(fp):
2032 if state == 'hunk':
2031 if state == 'hunk':
2033 if not current_file:
2032 if not current_file:
2034 continue
2033 continue
2035 ret = current_file.apply(values)
2034 ret = current_file.apply(values)
2036 if ret > 0:
2035 if ret > 0:
2037 err = 1
2036 err = 1
2038 elif state == 'file':
2037 elif state == 'file':
2039 if current_file:
2038 if current_file:
2040 rejects += current_file.close()
2039 rejects += current_file.close()
2041 current_file = None
2040 current_file = None
2042 afile, bfile, first_hunk, gp = values
2041 afile, bfile, first_hunk, gp = values
2043 if gp:
2042 if gp:
2044 gp.path = pstrip(gp.path)
2043 gp.path = pstrip(gp.path)
2045 if gp.oldpath:
2044 if gp.oldpath:
2046 gp.oldpath = pstrip(gp.oldpath)
2045 gp.oldpath = pstrip(gp.oldpath)
2047 else:
2046 else:
2048 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2049 prefix)
2048 prefix)
2050 if gp.op == 'RENAME':
2049 if gp.op == 'RENAME':
2051 backend.unlink(gp.oldpath)
2050 backend.unlink(gp.oldpath)
2052 if not first_hunk:
2051 if not first_hunk:
2053 if gp.op == 'DELETE':
2052 if gp.op == 'DELETE':
2054 backend.unlink(gp.path)
2053 backend.unlink(gp.path)
2055 continue
2054 continue
2056 data, mode = None, None
2055 data, mode = None, None
2057 if gp.op in ('RENAME', 'COPY'):
2056 if gp.op in ('RENAME', 'COPY'):
2058 data, mode = store.getfile(gp.oldpath)[:2]
2057 data, mode = store.getfile(gp.oldpath)[:2]
2059 if data is None:
2058 if data is None:
2060 # This means that the old path does not exist
2059 # This means that the old path does not exist
2061 raise PatchError(_("source file '%s' does not exist")
2060 raise PatchError(_("source file '%s' does not exist")
2062 % gp.oldpath)
2061 % gp.oldpath)
2063 if gp.mode:
2062 if gp.mode:
2064 mode = gp.mode
2063 mode = gp.mode
2065 if gp.op == 'ADD':
2064 if gp.op == 'ADD':
2066 # Added files without content have no hunk and
2065 # Added files without content have no hunk and
2067 # must be created
2066 # must be created
2068 data = ''
2067 data = ''
2069 if data or mode:
2068 if data or mode:
2070 if (gp.op in ('ADD', 'RENAME', 'COPY')
2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2071 and backend.exists(gp.path)):
2070 and backend.exists(gp.path)):
2072 raise PatchError(_("cannot create %s: destination "
2071 raise PatchError(_("cannot create %s: destination "
2073 "already exists") % gp.path)
2072 "already exists") % gp.path)
2074 backend.setfile(gp.path, data, mode, gp.oldpath)
2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2075 continue
2074 continue
2076 try:
2075 try:
2077 current_file = patcher(ui, gp, backend, store,
2076 current_file = patcher(ui, gp, backend, store,
2078 eolmode=eolmode)
2077 eolmode=eolmode)
2079 except PatchError as inst:
2078 except PatchError as inst:
2080 ui.warn(str(inst) + '\n')
2079 ui.warn(str(inst) + '\n')
2081 current_file = None
2080 current_file = None
2082 rejects += 1
2081 rejects += 1
2083 continue
2082 continue
2084 elif state == 'git':
2083 elif state == 'git':
2085 for gp in values:
2084 for gp in values:
2086 path = pstrip(gp.oldpath)
2085 path = pstrip(gp.oldpath)
2087 data, mode = backend.getfile(path)
2086 data, mode = backend.getfile(path)
2088 if data is None:
2087 if data is None:
2089 # The error ignored here will trigger a getfile()
2088 # The error ignored here will trigger a getfile()
2090 # error in a place more appropriate for error
2089 # error in a place more appropriate for error
2091 # handling, and will not interrupt the patching
2090 # handling, and will not interrupt the patching
2092 # process.
2091 # process.
2093 pass
2092 pass
2094 else:
2093 else:
2095 store.setfile(path, data, mode)
2094 store.setfile(path, data, mode)
2096 else:
2095 else:
2097 raise error.Abort(_('unsupported parser state: %s') % state)
2096 raise error.Abort(_('unsupported parser state: %s') % state)
2098
2097
2099 if current_file:
2098 if current_file:
2100 rejects += current_file.close()
2099 rejects += current_file.close()
2101
2100
2102 if rejects:
2101 if rejects:
2103 return -1
2102 return -1
2104 return err
2103 return err
2105
2104
2106 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2107 similarity):
2106 similarity):
2108 """use <patcher> to apply <patchname> to the working directory.
2107 """use <patcher> to apply <patchname> to the working directory.
2109 returns whether patch was applied with fuzz factor."""
2108 returns whether patch was applied with fuzz factor."""
2110
2109
2111 fuzz = False
2110 fuzz = False
2112 args = []
2111 args = []
2113 cwd = repo.root
2112 cwd = repo.root
2114 if cwd:
2113 if cwd:
2115 args.append('-d %s' % procutil.shellquote(cwd))
2114 args.append('-d %s' % procutil.shellquote(cwd))
2116 cmd = ('%s %s -p%d < %s'
2115 cmd = ('%s %s -p%d < %s'
2117 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2118 ui.debug('Using external patch tool: %s\n' % cmd)
2117 ui.debug('Using external patch tool: %s\n' % cmd)
2119 fp = procutil.popen(cmd, 'rb')
2118 fp = procutil.popen(cmd, 'rb')
2120 try:
2119 try:
2121 for line in util.iterfile(fp):
2120 for line in util.iterfile(fp):
2122 line = line.rstrip()
2121 line = line.rstrip()
2123 ui.note(line + '\n')
2122 ui.note(line + '\n')
2124 if line.startswith('patching file '):
2123 if line.startswith('patching file '):
2125 pf = util.parsepatchoutput(line)
2124 pf = util.parsepatchoutput(line)
2126 printed_file = False
2125 printed_file = False
2127 files.add(pf)
2126 files.add(pf)
2128 elif line.find('with fuzz') >= 0:
2127 elif line.find('with fuzz') >= 0:
2129 fuzz = True
2128 fuzz = True
2130 if not printed_file:
2129 if not printed_file:
2131 ui.warn(pf + '\n')
2130 ui.warn(pf + '\n')
2132 printed_file = True
2131 printed_file = True
2133 ui.warn(line + '\n')
2132 ui.warn(line + '\n')
2134 elif line.find('saving rejects to file') >= 0:
2133 elif line.find('saving rejects to file') >= 0:
2135 ui.warn(line + '\n')
2134 ui.warn(line + '\n')
2136 elif line.find('FAILED') >= 0:
2135 elif line.find('FAILED') >= 0:
2137 if not printed_file:
2136 if not printed_file:
2138 ui.warn(pf + '\n')
2137 ui.warn(pf + '\n')
2139 printed_file = True
2138 printed_file = True
2140 ui.warn(line + '\n')
2139 ui.warn(line + '\n')
2141 finally:
2140 finally:
2142 if files:
2141 if files:
2143 scmutil.marktouched(repo, files, similarity)
2142 scmutil.marktouched(repo, files, similarity)
2144 code = fp.close()
2143 code = fp.close()
2145 if code:
2144 if code:
2146 raise PatchError(_("patch command failed: %s") %
2145 raise PatchError(_("patch command failed: %s") %
2147 procutil.explainexit(code))
2146 procutil.explainexit(code))
2148 return fuzz
2147 return fuzz
2149
2148
2150 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2151 eolmode='strict'):
2150 eolmode='strict'):
2152 if files is None:
2151 if files is None:
2153 files = set()
2152 files = set()
2154 if eolmode is None:
2153 if eolmode is None:
2155 eolmode = ui.config('patch', 'eol')
2154 eolmode = ui.config('patch', 'eol')
2156 if eolmode.lower() not in eolmodes:
2155 if eolmode.lower() not in eolmodes:
2157 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2158 eolmode = eolmode.lower()
2157 eolmode = eolmode.lower()
2159
2158
2160 store = filestore()
2159 store = filestore()
2161 try:
2160 try:
2162 fp = open(patchobj, 'rb')
2161 fp = open(patchobj, 'rb')
2163 except TypeError:
2162 except TypeError:
2164 fp = patchobj
2163 fp = patchobj
2165 try:
2164 try:
2166 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2167 eolmode=eolmode)
2166 eolmode=eolmode)
2168 finally:
2167 finally:
2169 if fp != patchobj:
2168 if fp != patchobj:
2170 fp.close()
2169 fp.close()
2171 files.update(backend.close())
2170 files.update(backend.close())
2172 store.close()
2171 store.close()
2173 if ret < 0:
2172 if ret < 0:
2174 raise PatchError(_('patch failed to apply'))
2173 raise PatchError(_('patch failed to apply'))
2175 return ret > 0
2174 return ret > 0
2176
2175
2177 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2178 eolmode='strict', similarity=0):
2177 eolmode='strict', similarity=0):
2179 """use builtin patch to apply <patchobj> to the working directory.
2178 """use builtin patch to apply <patchobj> to the working directory.
2180 returns whether patch was applied with fuzz factor."""
2179 returns whether patch was applied with fuzz factor."""
2181 backend = workingbackend(ui, repo, similarity)
2180 backend = workingbackend(ui, repo, similarity)
2182 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2183
2182
2184 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2185 eolmode='strict'):
2184 eolmode='strict'):
2186 backend = repobackend(ui, repo, ctx, store)
2185 backend = repobackend(ui, repo, ctx, store)
2187 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2188
2187
2189 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2190 similarity=0):
2189 similarity=0):
2191 """Apply <patchname> to the working directory.
2190 """Apply <patchname> to the working directory.
2192
2191
2193 'eolmode' specifies how end of lines should be handled. It can be:
2192 'eolmode' specifies how end of lines should be handled. It can be:
2194 - 'strict': inputs are read in binary mode, EOLs are preserved
2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2195 - 'crlf': EOLs are ignored when patching and reset to CRLF
2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2196 - 'lf': EOLs are ignored when patching and reset to LF
2195 - 'lf': EOLs are ignored when patching and reset to LF
2197 - None: get it from user settings, default to 'strict'
2196 - None: get it from user settings, default to 'strict'
2198 'eolmode' is ignored when using an external patcher program.
2197 'eolmode' is ignored when using an external patcher program.
2199
2198
2200 Returns whether patch was applied with fuzz factor.
2199 Returns whether patch was applied with fuzz factor.
2201 """
2200 """
2202 patcher = ui.config('ui', 'patch')
2201 patcher = ui.config('ui', 'patch')
2203 if files is None:
2202 if files is None:
2204 files = set()
2203 files = set()
2205 if patcher:
2204 if patcher:
2206 return _externalpatch(ui, repo, patcher, patchname, strip,
2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2207 files, similarity)
2206 files, similarity)
2208 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2209 similarity)
2208 similarity)
2210
2209
2211 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2212 backend = fsbackend(ui, repo.root)
2211 backend = fsbackend(ui, repo.root)
2213 prefix = _canonprefix(repo, prefix)
2212 prefix = _canonprefix(repo, prefix)
2214 with open(patchpath, 'rb') as fp:
2213 with open(patchpath, 'rb') as fp:
2215 changed = set()
2214 changed = set()
2216 for state, values in iterhunks(fp):
2215 for state, values in iterhunks(fp):
2217 if state == 'file':
2216 if state == 'file':
2218 afile, bfile, first_hunk, gp = values
2217 afile, bfile, first_hunk, gp = values
2219 if gp:
2218 if gp:
2220 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2221 if gp.oldpath:
2220 if gp.oldpath:
2222 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2223 prefix)[1]
2222 prefix)[1]
2224 else:
2223 else:
2225 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2226 prefix)
2225 prefix)
2227 changed.add(gp.path)
2226 changed.add(gp.path)
2228 if gp.op == 'RENAME':
2227 if gp.op == 'RENAME':
2229 changed.add(gp.oldpath)
2228 changed.add(gp.oldpath)
2230 elif state not in ('hunk', 'git'):
2229 elif state not in ('hunk', 'git'):
2231 raise error.Abort(_('unsupported parser state: %s') % state)
2230 raise error.Abort(_('unsupported parser state: %s') % state)
2232 return changed
2231 return changed
2233
2232
2234 class GitDiffRequired(Exception):
2233 class GitDiffRequired(Exception):
2235 pass
2234 pass
2236
2235
2237 diffopts = diffutil.diffallopts
2236 diffopts = diffutil.diffallopts
2238 diffallopts = diffutil.diffallopts
2237 diffallopts = diffutil.diffallopts
2239 difffeatureopts = diffutil.difffeatureopts
2238 difffeatureopts = diffutil.difffeatureopts
2240
2239
2241 def diff(repo, node1=None, node2=None, match=None, changes=None,
2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2242 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2241 opts=None, losedatafn=None, pathfn=None, copy=None,
2243 copysourcematch=None, hunksfilterfn=None):
2242 copysourcematch=None, hunksfilterfn=None):
2244 '''yields diff of changes to files between two nodes, or node and
2243 '''yields diff of changes to files between two nodes, or node and
2245 working directory.
2244 working directory.
2246
2245
2247 if node1 is None, use first dirstate parent instead.
2246 if node1 is None, use first dirstate parent instead.
2248 if node2 is None, compare node1 with working directory.
2247 if node2 is None, compare node1 with working directory.
2249
2248
2250 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2251 every time some change cannot be represented with the current
2250 every time some change cannot be represented with the current
2252 patch format. Return False to upgrade to git patch format, True to
2251 patch format. Return False to upgrade to git patch format, True to
2253 accept the loss or raise an exception to abort the diff. It is
2252 accept the loss or raise an exception to abort the diff. It is
2254 called with the name of current file being diffed as 'fn'. If set
2253 called with the name of current file being diffed as 'fn'. If set
2255 to None, patches will always be upgraded to git format when
2254 to None, patches will always be upgraded to git format when
2256 necessary.
2255 necessary.
2257
2256
2258 prefix is a filename prefix that is prepended to all filenames on
2257 prefix is a filename prefix that is prepended to all filenames on
2259 display (used for subrepos).
2258 display (used for subrepos).
2260
2259
2261 relroot, if not empty, must be normalized with a trailing /. Any match
2260 relroot, if not empty, must be normalized with a trailing /. Any match
2262 patterns that fall outside it will be ignored.
2261 patterns that fall outside it will be ignored.
2263
2262
2264 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2265 information.
2264 information.
2266
2265
2267 if copysourcematch is not None, then copy sources will be filtered by this
2266 if copysourcematch is not None, then copy sources will be filtered by this
2268 matcher
2267 matcher
2269
2268
2270 hunksfilterfn, if not None, should be a function taking a filectx and
2269 hunksfilterfn, if not None, should be a function taking a filectx and
2271 hunks generator that may yield filtered hunks.
2270 hunks generator that may yield filtered hunks.
2272 '''
2271 '''
2273 if not node1 and not node2:
2272 if not node1 and not node2:
2274 node1 = repo.dirstate.p1()
2273 node1 = repo.dirstate.p1()
2275
2274
2276 ctx1 = repo[node1]
2275 ctx1 = repo[node1]
2277 ctx2 = repo[node2]
2276 ctx2 = repo[node2]
2278
2277
2279 for fctx1, fctx2, hdr, hunks in diffhunks(
2278 for fctx1, fctx2, hdr, hunks in diffhunks(
2280 repo, ctx1=ctx1, ctx2=ctx2,
2279 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2281 match=match, changes=changes, opts=opts,
2280 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2282 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2283 copysourcematch=copysourcematch):
2281 copysourcematch=copysourcematch):
2284 if hunksfilterfn is not None:
2282 if hunksfilterfn is not None:
2285 # If the file has been removed, fctx2 is None; but this should
2283 # If the file has been removed, fctx2 is None; but this should
2286 # not occur here since we catch removed files early in
2284 # not occur here since we catch removed files early in
2287 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2285 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2288 assert fctx2 is not None, \
2286 assert fctx2 is not None, \
2289 'fctx2 unexpectly None in diff hunks filtering'
2287 'fctx2 unexpectly None in diff hunks filtering'
2290 hunks = hunksfilterfn(fctx2, hunks)
2288 hunks = hunksfilterfn(fctx2, hunks)
2291 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2289 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2292 if hdr and (text or len(hdr) > 1):
2290 if hdr and (text or len(hdr) > 1):
2293 yield '\n'.join(hdr) + '\n'
2291 yield '\n'.join(hdr) + '\n'
2294 if text:
2292 if text:
2295 yield text
2293 yield text
2296
2294
2297 def diffhunks(repo, ctx1, ctx2, match=None, changes=None,
2295 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2298 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2296 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2299 copysourcematch=None):
2300 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2297 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2301 where `header` is a list of diff headers and `hunks` is an iterable of
2298 where `header` is a list of diff headers and `hunks` is an iterable of
2302 (`hunkrange`, `hunklines`) tuples.
2299 (`hunkrange`, `hunklines`) tuples.
2303
2300
2304 See diff() for the meaning of parameters.
2301 See diff() for the meaning of parameters.
2305 """
2302 """
2306
2303
2307 if opts is None:
2304 if opts is None:
2308 opts = mdiff.defaultopts
2305 opts = mdiff.defaultopts
2309
2306
2310 def lrugetfilectx():
2307 def lrugetfilectx():
2311 cache = {}
2308 cache = {}
2312 order = collections.deque()
2309 order = collections.deque()
2313 def getfilectx(f, ctx):
2310 def getfilectx(f, ctx):
2314 fctx = ctx.filectx(f, filelog=cache.get(f))
2311 fctx = ctx.filectx(f, filelog=cache.get(f))
2315 if f not in cache:
2312 if f not in cache:
2316 if len(cache) > 20:
2313 if len(cache) > 20:
2317 del cache[order.popleft()]
2314 del cache[order.popleft()]
2318 cache[f] = fctx.filelog()
2315 cache[f] = fctx.filelog()
2319 else:
2316 else:
2320 order.remove(f)
2317 order.remove(f)
2321 order.append(f)
2318 order.append(f)
2322 return fctx
2319 return fctx
2323 return getfilectx
2320 return getfilectx
2324 getfilectx = lrugetfilectx()
2321 getfilectx = lrugetfilectx()
2325
2322
2326 if not changes:
2323 if not changes:
2327 changes = ctx1.status(ctx2, match=match)
2324 changes = ctx1.status(ctx2, match=match)
2328 modified, added, removed = changes[:3]
2325 modified, added, removed = changes[:3]
2329
2326
2330 if not modified and not added and not removed:
2327 if not modified and not added and not removed:
2331 return []
2328 return []
2332
2329
2333 if repo.ui.debugflag:
2330 if repo.ui.debugflag:
2334 hexfunc = hex
2331 hexfunc = hex
2335 else:
2332 else:
2336 hexfunc = short
2333 hexfunc = short
2337 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2338
2335
2339 if copy is None:
2336 if copy is None:
2340 copy = {}
2337 copy = {}
2341 if opts.git or opts.upgrade:
2338 if opts.git or opts.upgrade:
2342 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339 copy = copies.pathcopies(ctx1, ctx2, match=match)
2343
2340
2344 if copysourcematch:
2341 if copysourcematch:
2345 # filter out copies where source side isn't inside the matcher
2342 # filter out copies where source side isn't inside the matcher
2346 # (copies.pathcopies() already filtered out the destination)
2343 # (copies.pathcopies() already filtered out the destination)
2347 copy = {dst: src for dst, src in copy.iteritems()
2344 copy = {dst: src for dst, src in copy.iteritems()
2348 if copysourcematch(src)}
2345 if copysourcematch(src)}
2349
2346
2350 modifiedset = set(modified)
2347 modifiedset = set(modified)
2351 addedset = set(added)
2348 addedset = set(added)
2352 removedset = set(removed)
2349 removedset = set(removed)
2353 for f in modified:
2350 for f in modified:
2354 if f not in ctx1:
2351 if f not in ctx1:
2355 # Fix up added, since merged-in additions appear as
2352 # Fix up added, since merged-in additions appear as
2356 # modifications during merges
2353 # modifications during merges
2357 modifiedset.remove(f)
2354 modifiedset.remove(f)
2358 addedset.add(f)
2355 addedset.add(f)
2359 for f in removed:
2356 for f in removed:
2360 if f not in ctx1:
2357 if f not in ctx1:
2361 # Merged-in additions that are then removed are reported as removed.
2358 # Merged-in additions that are then removed are reported as removed.
2362 # They are not in ctx1, so We don't want to show them in the diff.
2359 # They are not in ctx1, so We don't want to show them in the diff.
2363 removedset.remove(f)
2360 removedset.remove(f)
2364 modified = sorted(modifiedset)
2361 modified = sorted(modifiedset)
2365 added = sorted(addedset)
2362 added = sorted(addedset)
2366 removed = sorted(removedset)
2363 removed = sorted(removedset)
2367 for dst, src in list(copy.items()):
2364 for dst, src in list(copy.items()):
2368 if src not in ctx1:
2365 if src not in ctx1:
2369 # Files merged in during a merge and then copied/renamed are
2366 # Files merged in during a merge and then copied/renamed are
2370 # reported as copies. We want to show them in the diff as additions.
2367 # reported as copies. We want to show them in the diff as additions.
2371 del copy[dst]
2368 del copy[dst]
2372
2369
2373 prefetchmatch = scmutil.matchfiles(
2370 prefetchmatch = scmutil.matchfiles(
2374 repo, list(modifiedset | addedset | removedset))
2371 repo, list(modifiedset | addedset | removedset))
2375 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2372 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2376
2373
2377 def difffn(opts, losedata):
2374 def difffn(opts, losedata):
2378 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2375 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2379 copy, getfilectx, opts, losedata, prefix, relroot)
2376 copy, getfilectx, opts, losedata, pathfn)
2380 if opts.upgrade and not opts.git:
2377 if opts.upgrade and not opts.git:
2381 try:
2378 try:
2382 def losedata(fn):
2379 def losedata(fn):
2383 if not losedatafn or not losedatafn(fn=fn):
2380 if not losedatafn or not losedatafn(fn=fn):
2384 raise GitDiffRequired
2381 raise GitDiffRequired
2385 # Buffer the whole output until we are sure it can be generated
2382 # Buffer the whole output until we are sure it can be generated
2386 return list(difffn(opts.copy(git=False), losedata))
2383 return list(difffn(opts.copy(git=False), losedata))
2387 except GitDiffRequired:
2384 except GitDiffRequired:
2388 return difffn(opts.copy(git=True), None)
2385 return difffn(opts.copy(git=True), None)
2389 else:
2386 else:
2390 return difffn(opts, None)
2387 return difffn(opts, None)
2391
2388
2392 def diffsinglehunk(hunklines):
2389 def diffsinglehunk(hunklines):
2393 """yield tokens for a list of lines in a single hunk"""
2390 """yield tokens for a list of lines in a single hunk"""
2394 for line in hunklines:
2391 for line in hunklines:
2395 # chomp
2392 # chomp
2396 chompline = line.rstrip('\r\n')
2393 chompline = line.rstrip('\r\n')
2397 # highlight tabs and trailing whitespace
2394 # highlight tabs and trailing whitespace
2398 stripline = chompline.rstrip()
2395 stripline = chompline.rstrip()
2399 if line.startswith('-'):
2396 if line.startswith('-'):
2400 label = 'diff.deleted'
2397 label = 'diff.deleted'
2401 elif line.startswith('+'):
2398 elif line.startswith('+'):
2402 label = 'diff.inserted'
2399 label = 'diff.inserted'
2403 else:
2400 else:
2404 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2401 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2405 for token in tabsplitter.findall(stripline):
2402 for token in tabsplitter.findall(stripline):
2406 if token.startswith('\t'):
2403 if token.startswith('\t'):
2407 yield (token, 'diff.tab')
2404 yield (token, 'diff.tab')
2408 else:
2405 else:
2409 yield (token, label)
2406 yield (token, label)
2410
2407
2411 if chompline != stripline:
2408 if chompline != stripline:
2412 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2409 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2413 if chompline != line:
2410 if chompline != line:
2414 yield (line[len(chompline):], '')
2411 yield (line[len(chompline):], '')
2415
2412
2416 def diffsinglehunkinline(hunklines):
2413 def diffsinglehunkinline(hunklines):
2417 """yield tokens for a list of lines in a single hunk, with inline colors"""
2414 """yield tokens for a list of lines in a single hunk, with inline colors"""
2418 # prepare deleted, and inserted content
2415 # prepare deleted, and inserted content
2419 a = ''
2416 a = ''
2420 b = ''
2417 b = ''
2421 for line in hunklines:
2418 for line in hunklines:
2422 if line[0:1] == '-':
2419 if line[0:1] == '-':
2423 a += line[1:]
2420 a += line[1:]
2424 elif line[0:1] == '+':
2421 elif line[0:1] == '+':
2425 b += line[1:]
2422 b += line[1:]
2426 else:
2423 else:
2427 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2424 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2428 # fast path: if either side is empty, use diffsinglehunk
2425 # fast path: if either side is empty, use diffsinglehunk
2429 if not a or not b:
2426 if not a or not b:
2430 for t in diffsinglehunk(hunklines):
2427 for t in diffsinglehunk(hunklines):
2431 yield t
2428 yield t
2432 return
2429 return
2433 # re-split the content into words
2430 # re-split the content into words
2434 al = wordsplitter.findall(a)
2431 al = wordsplitter.findall(a)
2435 bl = wordsplitter.findall(b)
2432 bl = wordsplitter.findall(b)
2436 # re-arrange the words to lines since the diff algorithm is line-based
2433 # re-arrange the words to lines since the diff algorithm is line-based
2437 aln = [s if s == '\n' else s + '\n' for s in al]
2434 aln = [s if s == '\n' else s + '\n' for s in al]
2438 bln = [s if s == '\n' else s + '\n' for s in bl]
2435 bln = [s if s == '\n' else s + '\n' for s in bl]
2439 an = ''.join(aln)
2436 an = ''.join(aln)
2440 bn = ''.join(bln)
2437 bn = ''.join(bln)
2441 # run the diff algorithm, prepare atokens and btokens
2438 # run the diff algorithm, prepare atokens and btokens
2442 atokens = []
2439 atokens = []
2443 btokens = []
2440 btokens = []
2444 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2441 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2445 for (a1, a2, b1, b2), btype in blocks:
2442 for (a1, a2, b1, b2), btype in blocks:
2446 changed = btype == '!'
2443 changed = btype == '!'
2447 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2444 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2448 atokens.append((changed, token))
2445 atokens.append((changed, token))
2449 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2446 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2450 btokens.append((changed, token))
2447 btokens.append((changed, token))
2451
2448
2452 # yield deleted tokens, then inserted ones
2449 # yield deleted tokens, then inserted ones
2453 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2450 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2454 ('+', 'diff.inserted', btokens)]:
2451 ('+', 'diff.inserted', btokens)]:
2455 nextisnewline = True
2452 nextisnewline = True
2456 for changed, token in tokens:
2453 for changed, token in tokens:
2457 if nextisnewline:
2454 if nextisnewline:
2458 yield (prefix, label)
2455 yield (prefix, label)
2459 nextisnewline = False
2456 nextisnewline = False
2460 # special handling line end
2457 # special handling line end
2461 isendofline = token.endswith('\n')
2458 isendofline = token.endswith('\n')
2462 if isendofline:
2459 if isendofline:
2463 chomp = token[:-1] # chomp
2460 chomp = token[:-1] # chomp
2464 if chomp.endswith('\r'):
2461 if chomp.endswith('\r'):
2465 chomp = chomp[:-1]
2462 chomp = chomp[:-1]
2466 endofline = token[len(chomp):]
2463 endofline = token[len(chomp):]
2467 token = chomp.rstrip() # detect spaces at the end
2464 token = chomp.rstrip() # detect spaces at the end
2468 endspaces = chomp[len(token):]
2465 endspaces = chomp[len(token):]
2469 # scan tabs
2466 # scan tabs
2470 for maybetab in tabsplitter.findall(token):
2467 for maybetab in tabsplitter.findall(token):
2471 if b'\t' == maybetab[0:1]:
2468 if b'\t' == maybetab[0:1]:
2472 currentlabel = 'diff.tab'
2469 currentlabel = 'diff.tab'
2473 else:
2470 else:
2474 if changed:
2471 if changed:
2475 currentlabel = label + '.changed'
2472 currentlabel = label + '.changed'
2476 else:
2473 else:
2477 currentlabel = label + '.unchanged'
2474 currentlabel = label + '.unchanged'
2478 yield (maybetab, currentlabel)
2475 yield (maybetab, currentlabel)
2479 if isendofline:
2476 if isendofline:
2480 if endspaces:
2477 if endspaces:
2481 yield (endspaces, 'diff.trailingwhitespace')
2478 yield (endspaces, 'diff.trailingwhitespace')
2482 yield (endofline, '')
2479 yield (endofline, '')
2483 nextisnewline = True
2480 nextisnewline = True
2484
2481
2485 def difflabel(func, *args, **kw):
2482 def difflabel(func, *args, **kw):
2486 '''yields 2-tuples of (output, label) based on the output of func()'''
2483 '''yields 2-tuples of (output, label) based on the output of func()'''
2487 if kw.get(r'opts') and kw[r'opts'].worddiff:
2484 if kw.get(r'opts') and kw[r'opts'].worddiff:
2488 dodiffhunk = diffsinglehunkinline
2485 dodiffhunk = diffsinglehunkinline
2489 else:
2486 else:
2490 dodiffhunk = diffsinglehunk
2487 dodiffhunk = diffsinglehunk
2491 headprefixes = [('diff', 'diff.diffline'),
2488 headprefixes = [('diff', 'diff.diffline'),
2492 ('copy', 'diff.extended'),
2489 ('copy', 'diff.extended'),
2493 ('rename', 'diff.extended'),
2490 ('rename', 'diff.extended'),
2494 ('old', 'diff.extended'),
2491 ('old', 'diff.extended'),
2495 ('new', 'diff.extended'),
2492 ('new', 'diff.extended'),
2496 ('deleted', 'diff.extended'),
2493 ('deleted', 'diff.extended'),
2497 ('index', 'diff.extended'),
2494 ('index', 'diff.extended'),
2498 ('similarity', 'diff.extended'),
2495 ('similarity', 'diff.extended'),
2499 ('---', 'diff.file_a'),
2496 ('---', 'diff.file_a'),
2500 ('+++', 'diff.file_b')]
2497 ('+++', 'diff.file_b')]
2501 textprefixes = [('@', 'diff.hunk'),
2498 textprefixes = [('@', 'diff.hunk'),
2502 # - and + are handled by diffsinglehunk
2499 # - and + are handled by diffsinglehunk
2503 ]
2500 ]
2504 head = False
2501 head = False
2505
2502
2506 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2503 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2507 hunkbuffer = []
2504 hunkbuffer = []
2508 def consumehunkbuffer():
2505 def consumehunkbuffer():
2509 if hunkbuffer:
2506 if hunkbuffer:
2510 for token in dodiffhunk(hunkbuffer):
2507 for token in dodiffhunk(hunkbuffer):
2511 yield token
2508 yield token
2512 hunkbuffer[:] = []
2509 hunkbuffer[:] = []
2513
2510
2514 for chunk in func(*args, **kw):
2511 for chunk in func(*args, **kw):
2515 lines = chunk.split('\n')
2512 lines = chunk.split('\n')
2516 linecount = len(lines)
2513 linecount = len(lines)
2517 for i, line in enumerate(lines):
2514 for i, line in enumerate(lines):
2518 if head:
2515 if head:
2519 if line.startswith('@'):
2516 if line.startswith('@'):
2520 head = False
2517 head = False
2521 else:
2518 else:
2522 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2519 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2523 head = True
2520 head = True
2524 diffline = False
2521 diffline = False
2525 if not head and line and line.startswith(('+', '-')):
2522 if not head and line and line.startswith(('+', '-')):
2526 diffline = True
2523 diffline = True
2527
2524
2528 prefixes = textprefixes
2525 prefixes = textprefixes
2529 if head:
2526 if head:
2530 prefixes = headprefixes
2527 prefixes = headprefixes
2531 if diffline:
2528 if diffline:
2532 # buffered
2529 # buffered
2533 bufferedline = line
2530 bufferedline = line
2534 if i + 1 < linecount:
2531 if i + 1 < linecount:
2535 bufferedline += "\n"
2532 bufferedline += "\n"
2536 hunkbuffer.append(bufferedline)
2533 hunkbuffer.append(bufferedline)
2537 else:
2534 else:
2538 # unbuffered
2535 # unbuffered
2539 for token in consumehunkbuffer():
2536 for token in consumehunkbuffer():
2540 yield token
2537 yield token
2541 stripline = line.rstrip()
2538 stripline = line.rstrip()
2542 for prefix, label in prefixes:
2539 for prefix, label in prefixes:
2543 if stripline.startswith(prefix):
2540 if stripline.startswith(prefix):
2544 yield (stripline, label)
2541 yield (stripline, label)
2545 if line != stripline:
2542 if line != stripline:
2546 yield (line[len(stripline):],
2543 yield (line[len(stripline):],
2547 'diff.trailingwhitespace')
2544 'diff.trailingwhitespace')
2548 break
2545 break
2549 else:
2546 else:
2550 yield (line, '')
2547 yield (line, '')
2551 if i + 1 < linecount:
2548 if i + 1 < linecount:
2552 yield ('\n', '')
2549 yield ('\n', '')
2553 for token in consumehunkbuffer():
2550 for token in consumehunkbuffer():
2554 yield token
2551 yield token
2555
2552
2556 def diffui(*args, **kw):
2553 def diffui(*args, **kw):
2557 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2554 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2558 return difflabel(diff, *args, **kw)
2555 return difflabel(diff, *args, **kw)
2559
2556
2560 def _filepairs(modified, added, removed, copy, opts):
2557 def _filepairs(modified, added, removed, copy, opts):
2561 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2558 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2562 before and f2 is the the name after. For added files, f1 will be None,
2559 before and f2 is the the name after. For added files, f1 will be None,
2563 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2560 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2564 or 'rename' (the latter two only if opts.git is set).'''
2561 or 'rename' (the latter two only if opts.git is set).'''
2565 gone = set()
2562 gone = set()
2566
2563
2567 copyto = dict([(v, k) for k, v in copy.items()])
2564 copyto = dict([(v, k) for k, v in copy.items()])
2568
2565
2569 addedset, removedset = set(added), set(removed)
2566 addedset, removedset = set(added), set(removed)
2570
2567
2571 for f in sorted(modified + added + removed):
2568 for f in sorted(modified + added + removed):
2572 copyop = None
2569 copyop = None
2573 f1, f2 = f, f
2570 f1, f2 = f, f
2574 if f in addedset:
2571 if f in addedset:
2575 f1 = None
2572 f1 = None
2576 if f in copy:
2573 if f in copy:
2577 if opts.git:
2574 if opts.git:
2578 f1 = copy[f]
2575 f1 = copy[f]
2579 if f1 in removedset and f1 not in gone:
2576 if f1 in removedset and f1 not in gone:
2580 copyop = 'rename'
2577 copyop = 'rename'
2581 gone.add(f1)
2578 gone.add(f1)
2582 else:
2579 else:
2583 copyop = 'copy'
2580 copyop = 'copy'
2584 elif f in removedset:
2581 elif f in removedset:
2585 f2 = None
2582 f2 = None
2586 if opts.git:
2583 if opts.git:
2587 # have we already reported a copy above?
2584 # have we already reported a copy above?
2588 if (f in copyto and copyto[f] in addedset
2585 if (f in copyto and copyto[f] in addedset
2589 and copy[copyto[f]] == f):
2586 and copy[copyto[f]] == f):
2590 continue
2587 continue
2591 yield f1, f2, copyop
2588 yield f1, f2, copyop
2592
2589
2593 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2590 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2594 copy, getfilectx, opts, losedatafn, prefix, relroot):
2591 copy, getfilectx, opts, losedatafn, pathfn):
2595 '''given input data, generate a diff and yield it in blocks
2592 '''given input data, generate a diff and yield it in blocks
2596
2593
2597 If generating a diff would lose data like flags or binary data and
2594 If generating a diff would lose data like flags or binary data and
2598 losedatafn is not None, it will be called.
2595 losedatafn is not None, it will be called.
2599
2596
2600 relroot is removed and prefix is added to every path in the diff output.
2597 pathfn is applied to every path in the diff output.
2601
2598 '''
2602 If relroot is not empty, this function expects every path in modified,
2603 added, removed and copy to start with it.'''
2604
2599
2605 def gitindex(text):
2600 def gitindex(text):
2606 if not text:
2601 if not text:
2607 text = ""
2602 text = ""
2608 l = len(text)
2603 l = len(text)
2609 s = hashlib.sha1('blob %d\0' % l)
2604 s = hashlib.sha1('blob %d\0' % l)
2610 s.update(text)
2605 s.update(text)
2611 return hex(s.digest())
2606 return hex(s.digest())
2612
2607
2613 if opts.noprefix:
2608 if opts.noprefix:
2614 aprefix = bprefix = ''
2609 aprefix = bprefix = ''
2615 else:
2610 else:
2616 aprefix = 'a/'
2611 aprefix = 'a/'
2617 bprefix = 'b/'
2612 bprefix = 'b/'
2618
2613
2619 def diffline(f, revs):
2614 def diffline(f, revs):
2620 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2615 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2621 return 'diff %s %s' % (revinfo, f)
2616 return 'diff %s %s' % (revinfo, f)
2622
2617
2623 def isempty(fctx):
2618 def isempty(fctx):
2624 return fctx is None or fctx.size() == 0
2619 return fctx is None or fctx.size() == 0
2625
2620
2626 date1 = dateutil.datestr(ctx1.date())
2621 date1 = dateutil.datestr(ctx1.date())
2627 date2 = dateutil.datestr(ctx2.date())
2622 date2 = dateutil.datestr(ctx2.date())
2628
2623
2629 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2624 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2630
2625
2631 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2626 if not pathfn:
2632 or repo.ui.configbool('devel', 'check-relroot')):
2627 pathfn = lambda f: f
2633 for f in modified + added + removed + list(copy) + list(copy.values()):
2634 if f is not None and not f.startswith(relroot):
2635 raise AssertionError(
2636 "file %s doesn't start with relroot %s" % (f, relroot))
2637
2628
2638 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2629 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2639 content1 = None
2630 content1 = None
2640 content2 = None
2631 content2 = None
2641 fctx1 = None
2632 fctx1 = None
2642 fctx2 = None
2633 fctx2 = None
2643 flag1 = None
2634 flag1 = None
2644 flag2 = None
2635 flag2 = None
2645 if f1:
2636 if f1:
2646 fctx1 = getfilectx(f1, ctx1)
2637 fctx1 = getfilectx(f1, ctx1)
2647 if opts.git or losedatafn:
2638 if opts.git or losedatafn:
2648 flag1 = ctx1.flags(f1)
2639 flag1 = ctx1.flags(f1)
2649 if f2:
2640 if f2:
2650 fctx2 = getfilectx(f2, ctx2)
2641 fctx2 = getfilectx(f2, ctx2)
2651 if opts.git or losedatafn:
2642 if opts.git or losedatafn:
2652 flag2 = ctx2.flags(f2)
2643 flag2 = ctx2.flags(f2)
2653 # if binary is True, output "summary" or "base85", but not "text diff"
2644 # if binary is True, output "summary" or "base85", but not "text diff"
2654 if opts.text:
2645 if opts.text:
2655 binary = False
2646 binary = False
2656 else:
2647 else:
2657 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2648 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2658
2649
2659 if losedatafn and not opts.git:
2650 if losedatafn and not opts.git:
2660 if (binary or
2651 if (binary or
2661 # copy/rename
2652 # copy/rename
2662 f2 in copy or
2653 f2 in copy or
2663 # empty file creation
2654 # empty file creation
2664 (not f1 and isempty(fctx2)) or
2655 (not f1 and isempty(fctx2)) or
2665 # empty file deletion
2656 # empty file deletion
2666 (isempty(fctx1) and not f2) or
2657 (isempty(fctx1) and not f2) or
2667 # create with flags
2658 # create with flags
2668 (not f1 and flag2) or
2659 (not f1 and flag2) or
2669 # change flags
2660 # change flags
2670 (f1 and f2 and flag1 != flag2)):
2661 (f1 and f2 and flag1 != flag2)):
2671 losedatafn(f2 or f1)
2662 losedatafn(f2 or f1)
2672
2663
2673 path1 = f1 or f2
2664 path1 = pathfn(f1 or f2)
2674 path2 = f2 or f1
2665 path2 = pathfn(f2 or f1)
2675 path1 = posixpath.join(prefix, path1[len(relroot):])
2676 path2 = posixpath.join(prefix, path2[len(relroot):])
2677 header = []
2666 header = []
2678 if opts.git:
2667 if opts.git:
2679 header.append('diff --git %s%s %s%s' %
2668 header.append('diff --git %s%s %s%s' %
2680 (aprefix, path1, bprefix, path2))
2669 (aprefix, path1, bprefix, path2))
2681 if not f1: # added
2670 if not f1: # added
2682 header.append('new file mode %s' % gitmode[flag2])
2671 header.append('new file mode %s' % gitmode[flag2])
2683 elif not f2: # removed
2672 elif not f2: # removed
2684 header.append('deleted file mode %s' % gitmode[flag1])
2673 header.append('deleted file mode %s' % gitmode[flag1])
2685 else: # modified/copied/renamed
2674 else: # modified/copied/renamed
2686 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2675 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2687 if mode1 != mode2:
2676 if mode1 != mode2:
2688 header.append('old mode %s' % mode1)
2677 header.append('old mode %s' % mode1)
2689 header.append('new mode %s' % mode2)
2678 header.append('new mode %s' % mode2)
2690 if copyop is not None:
2679 if copyop is not None:
2691 if opts.showsimilarity:
2680 if opts.showsimilarity:
2692 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2681 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2693 header.append('similarity index %d%%' % sim)
2682 header.append('similarity index %d%%' % sim)
2694 header.append('%s from %s' % (copyop, path1))
2683 header.append('%s from %s' % (copyop, path1))
2695 header.append('%s to %s' % (copyop, path2))
2684 header.append('%s to %s' % (copyop, path2))
2696 elif revs and not repo.ui.quiet:
2685 elif revs and not repo.ui.quiet:
2697 header.append(diffline(path1, revs))
2686 header.append(diffline(path1, revs))
2698
2687
2699 # fctx.is | diffopts | what to | is fctx.data()
2688 # fctx.is | diffopts | what to | is fctx.data()
2700 # binary() | text nobinary git index | output? | outputted?
2689 # binary() | text nobinary git index | output? | outputted?
2701 # ------------------------------------|----------------------------
2690 # ------------------------------------|----------------------------
2702 # yes | no no no * | summary | no
2691 # yes | no no no * | summary | no
2703 # yes | no no yes * | base85 | yes
2692 # yes | no no yes * | base85 | yes
2704 # yes | no yes no * | summary | no
2693 # yes | no yes no * | summary | no
2705 # yes | no yes yes 0 | summary | no
2694 # yes | no yes yes 0 | summary | no
2706 # yes | no yes yes >0 | summary | semi [1]
2695 # yes | no yes yes >0 | summary | semi [1]
2707 # yes | yes * * * | text diff | yes
2696 # yes | yes * * * | text diff | yes
2708 # no | * * * * | text diff | yes
2697 # no | * * * * | text diff | yes
2709 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2698 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2710 if binary and (not opts.git or (opts.git and opts.nobinary and not
2699 if binary and (not opts.git or (opts.git and opts.nobinary and not
2711 opts.index)):
2700 opts.index)):
2712 # fast path: no binary content will be displayed, content1 and
2701 # fast path: no binary content will be displayed, content1 and
2713 # content2 are only used for equivalent test. cmp() could have a
2702 # content2 are only used for equivalent test. cmp() could have a
2714 # fast path.
2703 # fast path.
2715 if fctx1 is not None:
2704 if fctx1 is not None:
2716 content1 = b'\0'
2705 content1 = b'\0'
2717 if fctx2 is not None:
2706 if fctx2 is not None:
2718 if fctx1 is not None and not fctx1.cmp(fctx2):
2707 if fctx1 is not None and not fctx1.cmp(fctx2):
2719 content2 = b'\0' # not different
2708 content2 = b'\0' # not different
2720 else:
2709 else:
2721 content2 = b'\0\0'
2710 content2 = b'\0\0'
2722 else:
2711 else:
2723 # normal path: load contents
2712 # normal path: load contents
2724 if fctx1 is not None:
2713 if fctx1 is not None:
2725 content1 = fctx1.data()
2714 content1 = fctx1.data()
2726 if fctx2 is not None:
2715 if fctx2 is not None:
2727 content2 = fctx2.data()
2716 content2 = fctx2.data()
2728
2717
2729 if binary and opts.git and not opts.nobinary:
2718 if binary and opts.git and not opts.nobinary:
2730 text = mdiff.b85diff(content1, content2)
2719 text = mdiff.b85diff(content1, content2)
2731 if text:
2720 if text:
2732 header.append('index %s..%s' %
2721 header.append('index %s..%s' %
2733 (gitindex(content1), gitindex(content2)))
2722 (gitindex(content1), gitindex(content2)))
2734 hunks = (None, [text]),
2723 hunks = (None, [text]),
2735 else:
2724 else:
2736 if opts.git and opts.index > 0:
2725 if opts.git and opts.index > 0:
2737 flag = flag1
2726 flag = flag1
2738 if flag is None:
2727 if flag is None:
2739 flag = flag2
2728 flag = flag2
2740 header.append('index %s..%s %s' %
2729 header.append('index %s..%s %s' %
2741 (gitindex(content1)[0:opts.index],
2730 (gitindex(content1)[0:opts.index],
2742 gitindex(content2)[0:opts.index],
2731 gitindex(content2)[0:opts.index],
2743 gitmode[flag]))
2732 gitmode[flag]))
2744
2733
2745 uheaders, hunks = mdiff.unidiff(content1, date1,
2734 uheaders, hunks = mdiff.unidiff(content1, date1,
2746 content2, date2,
2735 content2, date2,
2747 path1, path2,
2736 path1, path2,
2748 binary=binary, opts=opts)
2737 binary=binary, opts=opts)
2749 header.extend(uheaders)
2738 header.extend(uheaders)
2750 yield fctx1, fctx2, header, hunks
2739 yield fctx1, fctx2, header, hunks
2751
2740
2752 def diffstatsum(stats):
2741 def diffstatsum(stats):
2753 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2742 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2754 for f, a, r, b in stats:
2743 for f, a, r, b in stats:
2755 maxfile = max(maxfile, encoding.colwidth(f))
2744 maxfile = max(maxfile, encoding.colwidth(f))
2756 maxtotal = max(maxtotal, a + r)
2745 maxtotal = max(maxtotal, a + r)
2757 addtotal += a
2746 addtotal += a
2758 removetotal += r
2747 removetotal += r
2759 binary = binary or b
2748 binary = binary or b
2760
2749
2761 return maxfile, maxtotal, addtotal, removetotal, binary
2750 return maxfile, maxtotal, addtotal, removetotal, binary
2762
2751
2763 def diffstatdata(lines):
2752 def diffstatdata(lines):
2764 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2753 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2765
2754
2766 results = []
2755 results = []
2767 filename, adds, removes, isbinary = None, 0, 0, False
2756 filename, adds, removes, isbinary = None, 0, 0, False
2768
2757
2769 def addresult():
2758 def addresult():
2770 if filename:
2759 if filename:
2771 results.append((filename, adds, removes, isbinary))
2760 results.append((filename, adds, removes, isbinary))
2772
2761
2773 # inheader is used to track if a line is in the
2762 # inheader is used to track if a line is in the
2774 # header portion of the diff. This helps properly account
2763 # header portion of the diff. This helps properly account
2775 # for lines that start with '--' or '++'
2764 # for lines that start with '--' or '++'
2776 inheader = False
2765 inheader = False
2777
2766
2778 for line in lines:
2767 for line in lines:
2779 if line.startswith('diff'):
2768 if line.startswith('diff'):
2780 addresult()
2769 addresult()
2781 # starting a new file diff
2770 # starting a new file diff
2782 # set numbers to 0 and reset inheader
2771 # set numbers to 0 and reset inheader
2783 inheader = True
2772 inheader = True
2784 adds, removes, isbinary = 0, 0, False
2773 adds, removes, isbinary = 0, 0, False
2785 if line.startswith('diff --git a/'):
2774 if line.startswith('diff --git a/'):
2786 filename = gitre.search(line).group(2)
2775 filename = gitre.search(line).group(2)
2787 elif line.startswith('diff -r'):
2776 elif line.startswith('diff -r'):
2788 # format: "diff -r ... -r ... filename"
2777 # format: "diff -r ... -r ... filename"
2789 filename = diffre.search(line).group(1)
2778 filename = diffre.search(line).group(1)
2790 elif line.startswith('@@'):
2779 elif line.startswith('@@'):
2791 inheader = False
2780 inheader = False
2792 elif line.startswith('+') and not inheader:
2781 elif line.startswith('+') and not inheader:
2793 adds += 1
2782 adds += 1
2794 elif line.startswith('-') and not inheader:
2783 elif line.startswith('-') and not inheader:
2795 removes += 1
2784 removes += 1
2796 elif (line.startswith('GIT binary patch') or
2785 elif (line.startswith('GIT binary patch') or
2797 line.startswith('Binary file')):
2786 line.startswith('Binary file')):
2798 isbinary = True
2787 isbinary = True
2799 elif line.startswith('rename from'):
2788 elif line.startswith('rename from'):
2800 filename = line[12:]
2789 filename = line[12:]
2801 elif line.startswith('rename to'):
2790 elif line.startswith('rename to'):
2802 filename += ' => %s' % line[10:]
2791 filename += ' => %s' % line[10:]
2803 addresult()
2792 addresult()
2804 return results
2793 return results
2805
2794
2806 def diffstat(lines, width=80):
2795 def diffstat(lines, width=80):
2807 output = []
2796 output = []
2808 stats = diffstatdata(lines)
2797 stats = diffstatdata(lines)
2809 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2798 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2810
2799
2811 countwidth = len(str(maxtotal))
2800 countwidth = len(str(maxtotal))
2812 if hasbinary and countwidth < 3:
2801 if hasbinary and countwidth < 3:
2813 countwidth = 3
2802 countwidth = 3
2814 graphwidth = width - countwidth - maxname - 6
2803 graphwidth = width - countwidth - maxname - 6
2815 if graphwidth < 10:
2804 if graphwidth < 10:
2816 graphwidth = 10
2805 graphwidth = 10
2817
2806
2818 def scale(i):
2807 def scale(i):
2819 if maxtotal <= graphwidth:
2808 if maxtotal <= graphwidth:
2820 return i
2809 return i
2821 # If diffstat runs out of room it doesn't print anything,
2810 # If diffstat runs out of room it doesn't print anything,
2822 # which isn't very useful, so always print at least one + or -
2811 # which isn't very useful, so always print at least one + or -
2823 # if there were at least some changes.
2812 # if there were at least some changes.
2824 return max(i * graphwidth // maxtotal, int(bool(i)))
2813 return max(i * graphwidth // maxtotal, int(bool(i)))
2825
2814
2826 for filename, adds, removes, isbinary in stats:
2815 for filename, adds, removes, isbinary in stats:
2827 if isbinary:
2816 if isbinary:
2828 count = 'Bin'
2817 count = 'Bin'
2829 else:
2818 else:
2830 count = '%d' % (adds + removes)
2819 count = '%d' % (adds + removes)
2831 pluses = '+' * scale(adds)
2820 pluses = '+' * scale(adds)
2832 minuses = '-' * scale(removes)
2821 minuses = '-' * scale(removes)
2833 output.append(' %s%s | %*s %s%s\n' %
2822 output.append(' %s%s | %*s %s%s\n' %
2834 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2823 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2835 countwidth, count, pluses, minuses))
2824 countwidth, count, pluses, minuses))
2836
2825
2837 if stats:
2826 if stats:
2838 output.append(_(' %d files changed, %d insertions(+), '
2827 output.append(_(' %d files changed, %d insertions(+), '
2839 '%d deletions(-)\n')
2828 '%d deletions(-)\n')
2840 % (len(stats), totaladds, totalremoves))
2829 % (len(stats), totaladds, totalremoves))
2841
2830
2842 return ''.join(output)
2831 return ''.join(output)
2843
2832
2844 def diffstatui(*args, **kw):
2833 def diffstatui(*args, **kw):
2845 '''like diffstat(), but yields 2-tuples of (output, label) for
2834 '''like diffstat(), but yields 2-tuples of (output, label) for
2846 ui.write()
2835 ui.write()
2847 '''
2836 '''
2848
2837
2849 for line in diffstat(*args, **kw).splitlines():
2838 for line in diffstat(*args, **kw).splitlines():
2850 if line and line[-1] in '+-':
2839 if line and line[-1] in '+-':
2851 name, graph = line.rsplit(' ', 1)
2840 name, graph = line.rsplit(' ', 1)
2852 yield (name + ' ', '')
2841 yield (name + ' ', '')
2853 m = re.search(br'\++', graph)
2842 m = re.search(br'\++', graph)
2854 if m:
2843 if m:
2855 yield (m.group(0), 'diffstat.inserted')
2844 yield (m.group(0), 'diffstat.inserted')
2856 m = re.search(br'-+', graph)
2845 m = re.search(br'-+', graph)
2857 if m:
2846 if m:
2858 yield (m.group(0), 'diffstat.deleted')
2847 yield (m.group(0), 'diffstat.deleted')
2859 else:
2848 else:
2860 yield (line, '')
2849 yield (line, '')
2861 yield ('\n', '')
2850 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now