##// END OF EJS Templates
py3: slice over bytes or use .startswith() to prevent getting ascii values...
Pulkit Goyal -
r36465:588048a6 default
parent child Browse files
Show More
@@ -1,360 +1,360 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import gzip
10 import gzip
11 import os
11 import os
12 import struct
12 import struct
13 import tarfile
13 import tarfile
14 import time
14 import time
15 import zipfile
15 import zipfile
16 import zlib
16 import zlib
17
17
18 from .i18n import _
18 from .i18n import _
19
19
20 from . import (
20 from . import (
21 error,
21 error,
22 formatter,
22 formatter,
23 match as matchmod,
23 match as matchmod,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28 stringio = util.stringio
28 stringio = util.stringio
29
29
30 # from unzip source code:
30 # from unzip source code:
31 _UNX_IFREG = 0x8000
31 _UNX_IFREG = 0x8000
32 _UNX_IFLNK = 0xa000
32 _UNX_IFLNK = 0xa000
33
33
34 def tidyprefix(dest, kind, prefix):
34 def tidyprefix(dest, kind, prefix):
35 '''choose prefix to use for names in archive. make sure prefix is
35 '''choose prefix to use for names in archive. make sure prefix is
36 safe for consumers.'''
36 safe for consumers.'''
37
37
38 if prefix:
38 if prefix:
39 prefix = util.normpath(prefix)
39 prefix = util.normpath(prefix)
40 else:
40 else:
41 if not isinstance(dest, bytes):
41 if not isinstance(dest, bytes):
42 raise ValueError('dest must be string if no prefix')
42 raise ValueError('dest must be string if no prefix')
43 prefix = os.path.basename(dest)
43 prefix = os.path.basename(dest)
44 lower = prefix.lower()
44 lower = prefix.lower()
45 for sfx in exts.get(kind, []):
45 for sfx in exts.get(kind, []):
46 if lower.endswith(sfx):
46 if lower.endswith(sfx):
47 prefix = prefix[:-len(sfx)]
47 prefix = prefix[:-len(sfx)]
48 break
48 break
49 lpfx = os.path.normpath(util.localpath(prefix))
49 lpfx = os.path.normpath(util.localpath(prefix))
50 prefix = util.pconvert(lpfx)
50 prefix = util.pconvert(lpfx)
51 if not prefix.endswith('/'):
51 if not prefix.endswith('/'):
52 prefix += '/'
52 prefix += '/'
53 # Drop the leading '.' path component if present, so Windows can read the
53 # Drop the leading '.' path component if present, so Windows can read the
54 # zip files (issue4634)
54 # zip files (issue4634)
55 if prefix.startswith('./'):
55 if prefix.startswith('./'):
56 prefix = prefix[2:]
56 prefix = prefix[2:]
57 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
57 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
58 raise error.Abort(_('archive prefix contains illegal components'))
58 raise error.Abort(_('archive prefix contains illegal components'))
59 return prefix
59 return prefix
60
60
61 exts = {
61 exts = {
62 'tar': ['.tar'],
62 'tar': ['.tar'],
63 'tbz2': ['.tbz2', '.tar.bz2'],
63 'tbz2': ['.tbz2', '.tar.bz2'],
64 'tgz': ['.tgz', '.tar.gz'],
64 'tgz': ['.tgz', '.tar.gz'],
65 'zip': ['.zip'],
65 'zip': ['.zip'],
66 }
66 }
67
67
68 def guesskind(dest):
68 def guesskind(dest):
69 for kind, extensions in exts.iteritems():
69 for kind, extensions in exts.iteritems():
70 if any(dest.endswith(ext) for ext in extensions):
70 if any(dest.endswith(ext) for ext in extensions):
71 return kind
71 return kind
72 return None
72 return None
73
73
74 def _rootctx(repo):
74 def _rootctx(repo):
75 # repo[0] may be hidden
75 # repo[0] may be hidden
76 for rev in repo:
76 for rev in repo:
77 return repo[rev]
77 return repo[rev]
78 return repo['null']
78 return repo['null']
79
79
80 # {tags} on ctx includes local tags and 'tip', with no current way to limit
80 # {tags} on ctx includes local tags and 'tip', with no current way to limit
81 # that to global tags. Therefore, use {latesttag} as a substitute when
81 # that to global tags. Therefore, use {latesttag} as a substitute when
82 # the distance is 0, since that will be the list of global tags on ctx.
82 # the distance is 0, since that will be the list of global tags on ctx.
83 _defaultmetatemplate = br'''
83 _defaultmetatemplate = br'''
84 repo: {root}
84 repo: {root}
85 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
85 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
86 branch: {branch|utf8}
86 branch: {branch|utf8}
87 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
87 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
88 separate("\n",
88 separate("\n",
89 join(latesttag % "latesttag: {tag}", "\n"),
89 join(latesttag % "latesttag: {tag}", "\n"),
90 "latesttagdistance: {latesttagdistance}",
90 "latesttagdistance: {latesttagdistance}",
91 "changessincelatesttag: {changessincelatesttag}"))}
91 "changessincelatesttag: {changessincelatesttag}"))}
92 '''[1:] # drop leading '\n'
92 '''[1:] # drop leading '\n'
93
93
94 def buildmetadata(ctx):
94 def buildmetadata(ctx):
95 '''build content of .hg_archival.txt'''
95 '''build content of .hg_archival.txt'''
96 repo = ctx.repo()
96 repo = ctx.repo()
97
97
98 opts = {
98 opts = {
99 'template': repo.ui.config('experimental', 'archivemetatemplate',
99 'template': repo.ui.config('experimental', 'archivemetatemplate',
100 _defaultmetatemplate)
100 _defaultmetatemplate)
101 }
101 }
102
102
103 out = util.stringio()
103 out = util.stringio()
104
104
105 fm = formatter.formatter(repo.ui, out, 'archive', opts)
105 fm = formatter.formatter(repo.ui, out, 'archive', opts)
106 fm.startitem()
106 fm.startitem()
107 fm.context(ctx=ctx)
107 fm.context(ctx=ctx)
108 fm.data(root=_rootctx(repo).hex())
108 fm.data(root=_rootctx(repo).hex())
109
109
110 if ctx.rev() is None:
110 if ctx.rev() is None:
111 dirty = ''
111 dirty = ''
112 if ctx.dirty(missing=True):
112 if ctx.dirty(missing=True):
113 dirty = '+'
113 dirty = '+'
114 fm.data(dirty=dirty)
114 fm.data(dirty=dirty)
115 fm.end()
115 fm.end()
116
116
117 return out.getvalue()
117 return out.getvalue()
118
118
119 class tarit(object):
119 class tarit(object):
120 '''write archive to tar file or stream. can write uncompressed,
120 '''write archive to tar file or stream. can write uncompressed,
121 or compress with gzip or bzip2.'''
121 or compress with gzip or bzip2.'''
122
122
123 class GzipFileWithTime(gzip.GzipFile):
123 class GzipFileWithTime(gzip.GzipFile):
124
124
125 def __init__(self, *args, **kw):
125 def __init__(self, *args, **kw):
126 timestamp = None
126 timestamp = None
127 if 'timestamp' in kw:
127 if 'timestamp' in kw:
128 timestamp = kw.pop(r'timestamp')
128 timestamp = kw.pop(r'timestamp')
129 if timestamp is None:
129 if timestamp is None:
130 self.timestamp = time.time()
130 self.timestamp = time.time()
131 else:
131 else:
132 self.timestamp = timestamp
132 self.timestamp = timestamp
133 gzip.GzipFile.__init__(self, *args, **kw)
133 gzip.GzipFile.__init__(self, *args, **kw)
134
134
135 def _write_gzip_header(self):
135 def _write_gzip_header(self):
136 self.fileobj.write('\037\213') # magic header
136 self.fileobj.write('\037\213') # magic header
137 self.fileobj.write('\010') # compression method
137 self.fileobj.write('\010') # compression method
138 fname = self.name
138 fname = self.name
139 if fname and fname.endswith('.gz'):
139 if fname and fname.endswith('.gz'):
140 fname = fname[:-3]
140 fname = fname[:-3]
141 flags = 0
141 flags = 0
142 if fname:
142 if fname:
143 flags = gzip.FNAME
143 flags = gzip.FNAME
144 self.fileobj.write(chr(flags))
144 self.fileobj.write(chr(flags))
145 gzip.write32u(self.fileobj, long(self.timestamp))
145 gzip.write32u(self.fileobj, long(self.timestamp))
146 self.fileobj.write('\002')
146 self.fileobj.write('\002')
147 self.fileobj.write('\377')
147 self.fileobj.write('\377')
148 if fname:
148 if fname:
149 self.fileobj.write(fname + '\000')
149 self.fileobj.write(fname + '\000')
150
150
151 def __init__(self, dest, mtime, kind=''):
151 def __init__(self, dest, mtime, kind=''):
152 self.mtime = mtime
152 self.mtime = mtime
153 self.fileobj = None
153 self.fileobj = None
154
154
155 def taropen(mode, name='', fileobj=None):
155 def taropen(mode, name='', fileobj=None):
156 if kind == 'gz':
156 if kind == 'gz':
157 mode = mode[0]
157 mode = mode[0:1]
158 if not fileobj:
158 if not fileobj:
159 fileobj = open(name, mode + 'b')
159 fileobj = open(name, mode + 'b')
160 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
160 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
161 zlib.Z_BEST_COMPRESSION,
161 zlib.Z_BEST_COMPRESSION,
162 fileobj, timestamp=mtime)
162 fileobj, timestamp=mtime)
163 self.fileobj = gzfileobj
163 self.fileobj = gzfileobj
164 return tarfile.TarFile.taropen(name, mode, gzfileobj)
164 return tarfile.TarFile.taropen(name, mode, gzfileobj)
165 else:
165 else:
166 return tarfile.open(name, mode + kind, fileobj)
166 return tarfile.open(name, mode + kind, fileobj)
167
167
168 if isinstance(dest, str):
168 if isinstance(dest, str):
169 self.z = taropen('w:', name=dest)
169 self.z = taropen('w:', name=dest)
170 else:
170 else:
171 self.z = taropen('w|', fileobj=dest)
171 self.z = taropen('w|', fileobj=dest)
172
172
173 def addfile(self, name, mode, islink, data):
173 def addfile(self, name, mode, islink, data):
174 i = tarfile.TarInfo(name)
174 i = tarfile.TarInfo(name)
175 i.mtime = self.mtime
175 i.mtime = self.mtime
176 i.size = len(data)
176 i.size = len(data)
177 if islink:
177 if islink:
178 i.type = tarfile.SYMTYPE
178 i.type = tarfile.SYMTYPE
179 i.mode = 0o777
179 i.mode = 0o777
180 i.linkname = data
180 i.linkname = data
181 data = None
181 data = None
182 i.size = 0
182 i.size = 0
183 else:
183 else:
184 i.mode = mode
184 i.mode = mode
185 data = stringio(data)
185 data = stringio(data)
186 self.z.addfile(i, data)
186 self.z.addfile(i, data)
187
187
188 def done(self):
188 def done(self):
189 self.z.close()
189 self.z.close()
190 if self.fileobj:
190 if self.fileobj:
191 self.fileobj.close()
191 self.fileobj.close()
192
192
193 class tellable(object):
193 class tellable(object):
194 '''provide tell method for zipfile.ZipFile when writing to http
194 '''provide tell method for zipfile.ZipFile when writing to http
195 response file object.'''
195 response file object.'''
196
196
197 def __init__(self, fp):
197 def __init__(self, fp):
198 self.fp = fp
198 self.fp = fp
199 self.offset = 0
199 self.offset = 0
200
200
201 def __getattr__(self, key):
201 def __getattr__(self, key):
202 return getattr(self.fp, key)
202 return getattr(self.fp, key)
203
203
204 def write(self, s):
204 def write(self, s):
205 self.fp.write(s)
205 self.fp.write(s)
206 self.offset += len(s)
206 self.offset += len(s)
207
207
208 def tell(self):
208 def tell(self):
209 return self.offset
209 return self.offset
210
210
211 class zipit(object):
211 class zipit(object):
212 '''write archive to zip file or stream. can write uncompressed,
212 '''write archive to zip file or stream. can write uncompressed,
213 or compressed with deflate.'''
213 or compressed with deflate.'''
214
214
215 def __init__(self, dest, mtime, compress=True):
215 def __init__(self, dest, mtime, compress=True):
216 if not isinstance(dest, str):
216 if not isinstance(dest, str):
217 try:
217 try:
218 dest.tell()
218 dest.tell()
219 except (AttributeError, IOError):
219 except (AttributeError, IOError):
220 dest = tellable(dest)
220 dest = tellable(dest)
221 self.z = zipfile.ZipFile(dest, r'w',
221 self.z = zipfile.ZipFile(dest, r'w',
222 compress and zipfile.ZIP_DEFLATED or
222 compress and zipfile.ZIP_DEFLATED or
223 zipfile.ZIP_STORED)
223 zipfile.ZIP_STORED)
224
224
225 # Python's zipfile module emits deprecation warnings if we try
225 # Python's zipfile module emits deprecation warnings if we try
226 # to store files with a date before 1980.
226 # to store files with a date before 1980.
227 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
227 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
228 if mtime < epoch:
228 if mtime < epoch:
229 mtime = epoch
229 mtime = epoch
230
230
231 self.mtime = mtime
231 self.mtime = mtime
232 self.date_time = time.gmtime(mtime)[:6]
232 self.date_time = time.gmtime(mtime)[:6]
233
233
234 def addfile(self, name, mode, islink, data):
234 def addfile(self, name, mode, islink, data):
235 i = zipfile.ZipInfo(name, self.date_time)
235 i = zipfile.ZipInfo(name, self.date_time)
236 i.compress_type = self.z.compression
236 i.compress_type = self.z.compression
237 # unzip will not honor unix file modes unless file creator is
237 # unzip will not honor unix file modes unless file creator is
238 # set to unix (id 3).
238 # set to unix (id 3).
239 i.create_system = 3
239 i.create_system = 3
240 ftype = _UNX_IFREG
240 ftype = _UNX_IFREG
241 if islink:
241 if islink:
242 mode = 0o777
242 mode = 0o777
243 ftype = _UNX_IFLNK
243 ftype = _UNX_IFLNK
244 i.external_attr = (mode | ftype) << 16
244 i.external_attr = (mode | ftype) << 16
245 # add "extended-timestamp" extra block, because zip archives
245 # add "extended-timestamp" extra block, because zip archives
246 # without this will be extracted with unexpected timestamp,
246 # without this will be extracted with unexpected timestamp,
247 # if TZ is not configured as GMT
247 # if TZ is not configured as GMT
248 i.extra += struct.pack('<hhBl',
248 i.extra += struct.pack('<hhBl',
249 0x5455, # block type: "extended-timestamp"
249 0x5455, # block type: "extended-timestamp"
250 1 + 4, # size of this block
250 1 + 4, # size of this block
251 1, # "modification time is present"
251 1, # "modification time is present"
252 int(self.mtime)) # last modification (UTC)
252 int(self.mtime)) # last modification (UTC)
253 self.z.writestr(i, data)
253 self.z.writestr(i, data)
254
254
255 def done(self):
255 def done(self):
256 self.z.close()
256 self.z.close()
257
257
258 class fileit(object):
258 class fileit(object):
259 '''write archive as files in directory.'''
259 '''write archive as files in directory.'''
260
260
261 def __init__(self, name, mtime):
261 def __init__(self, name, mtime):
262 self.basedir = name
262 self.basedir = name
263 self.opener = vfsmod.vfs(self.basedir)
263 self.opener = vfsmod.vfs(self.basedir)
264 self.mtime = mtime
264 self.mtime = mtime
265
265
266 def addfile(self, name, mode, islink, data):
266 def addfile(self, name, mode, islink, data):
267 if islink:
267 if islink:
268 self.opener.symlink(data, name)
268 self.opener.symlink(data, name)
269 return
269 return
270 f = self.opener(name, "w", atomictemp=True)
270 f = self.opener(name, "w", atomictemp=True)
271 f.write(data)
271 f.write(data)
272 f.close()
272 f.close()
273 destfile = os.path.join(self.basedir, name)
273 destfile = os.path.join(self.basedir, name)
274 os.chmod(destfile, mode)
274 os.chmod(destfile, mode)
275 if self.mtime is not None:
275 if self.mtime is not None:
276 os.utime(destfile, (self.mtime, self.mtime))
276 os.utime(destfile, (self.mtime, self.mtime))
277
277
278 def done(self):
278 def done(self):
279 pass
279 pass
280
280
281 archivers = {
281 archivers = {
282 'files': fileit,
282 'files': fileit,
283 'tar': tarit,
283 'tar': tarit,
284 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
284 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
285 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
285 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
286 'uzip': lambda name, mtime: zipit(name, mtime, False),
286 'uzip': lambda name, mtime: zipit(name, mtime, False),
287 'zip': zipit,
287 'zip': zipit,
288 }
288 }
289
289
290 def archive(repo, dest, node, kind, decode=True, matchfn=None,
290 def archive(repo, dest, node, kind, decode=True, matchfn=None,
291 prefix='', mtime=None, subrepos=False):
291 prefix='', mtime=None, subrepos=False):
292 '''create archive of repo as it was at node.
292 '''create archive of repo as it was at node.
293
293
294 dest can be name of directory, name of archive file, or file
294 dest can be name of directory, name of archive file, or file
295 object to write archive to.
295 object to write archive to.
296
296
297 kind is type of archive to create.
297 kind is type of archive to create.
298
298
299 decode tells whether to put files through decode filters from
299 decode tells whether to put files through decode filters from
300 hgrc.
300 hgrc.
301
301
302 matchfn is function to filter names of files to write to archive.
302 matchfn is function to filter names of files to write to archive.
303
303
304 prefix is name of path to put before every archive member.
304 prefix is name of path to put before every archive member.
305
305
306 mtime is the modified time, in seconds, or None to use the changeset time.
306 mtime is the modified time, in seconds, or None to use the changeset time.
307
307
308 subrepos tells whether to include subrepos.
308 subrepos tells whether to include subrepos.
309 '''
309 '''
310
310
311 if kind == 'files':
311 if kind == 'files':
312 if prefix:
312 if prefix:
313 raise error.Abort(_('cannot give prefix when archiving to files'))
313 raise error.Abort(_('cannot give prefix when archiving to files'))
314 else:
314 else:
315 prefix = tidyprefix(dest, kind, prefix)
315 prefix = tidyprefix(dest, kind, prefix)
316
316
317 def write(name, mode, islink, getdata):
317 def write(name, mode, islink, getdata):
318 data = getdata()
318 data = getdata()
319 if decode:
319 if decode:
320 data = repo.wwritedata(name, data)
320 data = repo.wwritedata(name, data)
321 archiver.addfile(prefix + name, mode, islink, data)
321 archiver.addfile(prefix + name, mode, islink, data)
322
322
323 if kind not in archivers:
323 if kind not in archivers:
324 raise error.Abort(_("unknown archive type '%s'") % kind)
324 raise error.Abort(_("unknown archive type '%s'") % kind)
325
325
326 ctx = repo[node]
326 ctx = repo[node]
327 archiver = archivers[kind](dest, mtime or ctx.date()[0])
327 archiver = archivers[kind](dest, mtime or ctx.date()[0])
328
328
329 if repo.ui.configbool("ui", "archivemeta"):
329 if repo.ui.configbool("ui", "archivemeta"):
330 name = '.hg_archival.txt'
330 name = '.hg_archival.txt'
331 if not matchfn or matchfn(name):
331 if not matchfn or matchfn(name):
332 write(name, 0o644, False, lambda: buildmetadata(ctx))
332 write(name, 0o644, False, lambda: buildmetadata(ctx))
333
333
334 if matchfn:
334 if matchfn:
335 files = [f for f in ctx.manifest().keys() if matchfn(f)]
335 files = [f for f in ctx.manifest().keys() if matchfn(f)]
336 else:
336 else:
337 files = ctx.manifest().keys()
337 files = ctx.manifest().keys()
338 total = len(files)
338 total = len(files)
339 if total:
339 if total:
340 files.sort()
340 files.sort()
341 scmutil.fileprefetchhooks(repo, ctx, files)
341 scmutil.fileprefetchhooks(repo, ctx, files)
342 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
342 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
343 for i, f in enumerate(files):
343 for i, f in enumerate(files):
344 ff = ctx.flags(f)
344 ff = ctx.flags(f)
345 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
345 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
346 repo.ui.progress(_('archiving'), i + 1, item=f,
346 repo.ui.progress(_('archiving'), i + 1, item=f,
347 unit=_('files'), total=total)
347 unit=_('files'), total=total)
348 repo.ui.progress(_('archiving'), None)
348 repo.ui.progress(_('archiving'), None)
349
349
350 if subrepos:
350 if subrepos:
351 for subpath in sorted(ctx.substate):
351 for subpath in sorted(ctx.substate):
352 sub = ctx.workingsub(subpath)
352 sub = ctx.workingsub(subpath)
353 submatch = matchmod.subdirmatcher(subpath, matchfn)
353 submatch = matchmod.subdirmatcher(subpath, matchfn)
354 total += sub.archive(archiver, prefix, submatch, decode)
354 total += sub.archive(archiver, prefix, submatch, decode)
355
355
356 if total == 0:
356 if total == 0:
357 raise error.Abort(_('no files match the archive pattern'))
357 raise error.Abort(_('no files match the archive pattern'))
358
358
359 archiver.done()
359 archiver.done()
360 return total
360 return total
@@ -1,2903 +1,2903 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 diffhelpers = policy.importmod(r'diffhelpers')
44 diffhelpers = policy.importmod(r'diffhelpers')
45 stringio = util.stringio
45 stringio = util.stringio
46
46
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
49 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50
50
51 PatchError = error.PatchError
51 PatchError = error.PatchError
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = pycompat.emailparser().parse(c)
111 m = pycompat.emailparser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 __next__ = next
154 __next__ = next
155
155
156 inheader = False
156 inheader = False
157 cur = []
157 cur = []
158
158
159 mimeheaders = ['content-type']
159 mimeheaders = ['content-type']
160
160
161 if not util.safehasattr(stream, 'next'):
161 if not util.safehasattr(stream, 'next'):
162 # http responses, for example, have readline but not next
162 # http responses, for example, have readline but not next
163 stream = fiter(stream)
163 stream = fiter(stream)
164
164
165 for line in stream:
165 for line in stream:
166 cur.append(line)
166 cur.append(line)
167 if line.startswith('# HG changeset patch'):
167 if line.startswith('# HG changeset patch'):
168 return hgsplit(stream, cur)
168 return hgsplit(stream, cur)
169 elif line.startswith('From '):
169 elif line.startswith('From '):
170 return mboxsplit(stream, cur)
170 return mboxsplit(stream, cur)
171 elif isheader(line, inheader):
171 elif isheader(line, inheader):
172 inheader = True
172 inheader = True
173 if line.split(':', 1)[0].lower() in mimeheaders:
173 if line.split(':', 1)[0].lower() in mimeheaders:
174 # let email parser handle this
174 # let email parser handle this
175 return mimesplit(stream, cur)
175 return mimesplit(stream, cur)
176 elif line.startswith('--- ') and inheader:
176 elif line.startswith('--- ') and inheader:
177 # No evil headers seen by diff start, split by hand
177 # No evil headers seen by diff start, split by hand
178 return headersplit(stream, cur)
178 return headersplit(stream, cur)
179 # Not enough info, keep reading
179 # Not enough info, keep reading
180
180
181 # if we are here, we have a very plain patch
181 # if we are here, we have a very plain patch
182 return remainder(cur)
182 return remainder(cur)
183
183
184 ## Some facility for extensible patch parsing:
184 ## Some facility for extensible patch parsing:
185 # list of pairs ("header to match", "data key")
185 # list of pairs ("header to match", "data key")
186 patchheadermap = [('Date', 'date'),
186 patchheadermap = [('Date', 'date'),
187 ('Branch', 'branch'),
187 ('Branch', 'branch'),
188 ('Node ID', 'nodeid'),
188 ('Node ID', 'nodeid'),
189 ]
189 ]
190
190
191 def extract(ui, fileobj):
191 def extract(ui, fileobj):
192 '''extract patch from data read from fileobj.
192 '''extract patch from data read from fileobj.
193
193
194 patch can be a normal patch or contained in an email message.
194 patch can be a normal patch or contained in an email message.
195
195
196 return a dictionary. Standard keys are:
196 return a dictionary. Standard keys are:
197 - filename,
197 - filename,
198 - message,
198 - message,
199 - user,
199 - user,
200 - date,
200 - date,
201 - branch,
201 - branch,
202 - node,
202 - node,
203 - p1,
203 - p1,
204 - p2.
204 - p2.
205 Any item can be missing from the dictionary. If filename is missing,
205 Any item can be missing from the dictionary. If filename is missing,
206 fileobj did not contain a patch. Caller must unlink filename when done.'''
206 fileobj did not contain a patch. Caller must unlink filename when done.'''
207
207
208 # attempt to detect the start of a patch
208 # attempt to detect the start of a patch
209 # (this heuristic is borrowed from quilt)
209 # (this heuristic is borrowed from quilt)
210 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
210 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
211 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 br'---[ \t].*?^\+\+\+[ \t]|'
212 br'---[ \t].*?^\+\+\+[ \t]|'
213 br'\*\*\*[ \t].*?^---[ \t])',
213 br'\*\*\*[ \t].*?^---[ \t])',
214 re.MULTILINE | re.DOTALL)
214 re.MULTILINE | re.DOTALL)
215
215
216 data = {}
216 data = {}
217 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
217 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 tmpfp = os.fdopen(fd, pycompat.sysstr('wb'))
218 tmpfp = os.fdopen(fd, pycompat.sysstr('wb'))
219 try:
219 try:
220 msg = pycompat.emailparser().parse(fileobj)
220 msg = pycompat.emailparser().parse(fileobj)
221
221
222 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
222 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 data['user'] = msg['From'] and mail.headdecode(msg['From'])
223 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 if not subject and not data['user']:
224 if not subject and not data['user']:
225 # Not an email, restore parsed headers if any
225 # Not an email, restore parsed headers if any
226 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
226 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227
227
228 # should try to parse msg['Date']
228 # should try to parse msg['Date']
229 parents = []
229 parents = []
230
230
231 if subject:
231 if subject:
232 if subject.startswith('[PATCH'):
232 if subject.startswith('[PATCH'):
233 pend = subject.find(']')
233 pend = subject.find(']')
234 if pend >= 0:
234 if pend >= 0:
235 subject = subject[pend + 1:].lstrip()
235 subject = subject[pend + 1:].lstrip()
236 subject = re.sub(br'\n[ \t]+', ' ', subject)
236 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 ui.debug('Subject: %s\n' % subject)
237 ui.debug('Subject: %s\n' % subject)
238 if data['user']:
238 if data['user']:
239 ui.debug('From: %s\n' % data['user'])
239 ui.debug('From: %s\n' % data['user'])
240 diffs_seen = 0
240 diffs_seen = 0
241 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
241 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 message = ''
242 message = ''
243 for part in msg.walk():
243 for part in msg.walk():
244 content_type = pycompat.bytestr(part.get_content_type())
244 content_type = pycompat.bytestr(part.get_content_type())
245 ui.debug('Content-Type: %s\n' % content_type)
245 ui.debug('Content-Type: %s\n' % content_type)
246 if content_type not in ok_types:
246 if content_type not in ok_types:
247 continue
247 continue
248 payload = part.get_payload(decode=True)
248 payload = part.get_payload(decode=True)
249 m = diffre.search(payload)
249 m = diffre.search(payload)
250 if m:
250 if m:
251 hgpatch = False
251 hgpatch = False
252 hgpatchheader = False
252 hgpatchheader = False
253 ignoretext = False
253 ignoretext = False
254
254
255 ui.debug('found patch at byte %d\n' % m.start(0))
255 ui.debug('found patch at byte %d\n' % m.start(0))
256 diffs_seen += 1
256 diffs_seen += 1
257 cfp = stringio()
257 cfp = stringio()
258 for line in payload[:m.start(0)].splitlines():
258 for line in payload[:m.start(0)].splitlines():
259 if line.startswith('# HG changeset patch') and not hgpatch:
259 if line.startswith('# HG changeset patch') and not hgpatch:
260 ui.debug('patch generated by hg export\n')
260 ui.debug('patch generated by hg export\n')
261 hgpatch = True
261 hgpatch = True
262 hgpatchheader = True
262 hgpatchheader = True
263 # drop earlier commit message content
263 # drop earlier commit message content
264 cfp.seek(0)
264 cfp.seek(0)
265 cfp.truncate()
265 cfp.truncate()
266 subject = None
266 subject = None
267 elif hgpatchheader:
267 elif hgpatchheader:
268 if line.startswith('# User '):
268 if line.startswith('# User '):
269 data['user'] = line[7:]
269 data['user'] = line[7:]
270 ui.debug('From: %s\n' % data['user'])
270 ui.debug('From: %s\n' % data['user'])
271 elif line.startswith("# Parent "):
271 elif line.startswith("# Parent "):
272 parents.append(line[9:].lstrip())
272 parents.append(line[9:].lstrip())
273 elif line.startswith("# "):
273 elif line.startswith("# "):
274 for header, key in patchheadermap:
274 for header, key in patchheadermap:
275 prefix = '# %s ' % header
275 prefix = '# %s ' % header
276 if line.startswith(prefix):
276 if line.startswith(prefix):
277 data[key] = line[len(prefix):]
277 data[key] = line[len(prefix):]
278 else:
278 else:
279 hgpatchheader = False
279 hgpatchheader = False
280 elif line == '---':
280 elif line == '---':
281 ignoretext = True
281 ignoretext = True
282 if not hgpatchheader and not ignoretext:
282 if not hgpatchheader and not ignoretext:
283 cfp.write(line)
283 cfp.write(line)
284 cfp.write('\n')
284 cfp.write('\n')
285 message = cfp.getvalue()
285 message = cfp.getvalue()
286 if tmpfp:
286 if tmpfp:
287 tmpfp.write(payload)
287 tmpfp.write(payload)
288 if not payload.endswith('\n'):
288 if not payload.endswith('\n'):
289 tmpfp.write('\n')
289 tmpfp.write('\n')
290 elif not diffs_seen and message and content_type == 'text/plain':
290 elif not diffs_seen and message and content_type == 'text/plain':
291 message += '\n' + payload
291 message += '\n' + payload
292 except: # re-raises
292 except: # re-raises
293 tmpfp.close()
293 tmpfp.close()
294 os.unlink(tmpname)
294 os.unlink(tmpname)
295 raise
295 raise
296
296
297 if subject and not message.startswith(subject):
297 if subject and not message.startswith(subject):
298 message = '%s\n%s' % (subject, message)
298 message = '%s\n%s' % (subject, message)
299 data['message'] = message
299 data['message'] = message
300 tmpfp.close()
300 tmpfp.close()
301 if parents:
301 if parents:
302 data['p1'] = parents.pop(0)
302 data['p1'] = parents.pop(0)
303 if parents:
303 if parents:
304 data['p2'] = parents.pop(0)
304 data['p2'] = parents.pop(0)
305
305
306 if diffs_seen:
306 if diffs_seen:
307 data['filename'] = tmpname
307 data['filename'] = tmpname
308 else:
308 else:
309 os.unlink(tmpname)
309 os.unlink(tmpname)
310 return data
310 return data
311
311
312 class patchmeta(object):
312 class patchmeta(object):
313 """Patched file metadata
313 """Patched file metadata
314
314
315 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
315 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 or COPY. 'path' is patched file path. 'oldpath' is set to the
316 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 origin file when 'op' is either COPY or RENAME, None otherwise. If
317 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 file mode is changed, 'mode' is a tuple (islink, isexec) where
318 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 'islink' is True if the file is a symlink and 'isexec' is True if
319 'islink' is True if the file is a symlink and 'isexec' is True if
320 the file is executable. Otherwise, 'mode' is None.
320 the file is executable. Otherwise, 'mode' is None.
321 """
321 """
322 def __init__(self, path):
322 def __init__(self, path):
323 self.path = path
323 self.path = path
324 self.oldpath = None
324 self.oldpath = None
325 self.mode = None
325 self.mode = None
326 self.op = 'MODIFY'
326 self.op = 'MODIFY'
327 self.binary = False
327 self.binary = False
328
328
329 def setmode(self, mode):
329 def setmode(self, mode):
330 islink = mode & 0o20000
330 islink = mode & 0o20000
331 isexec = mode & 0o100
331 isexec = mode & 0o100
332 self.mode = (islink, isexec)
332 self.mode = (islink, isexec)
333
333
334 def copy(self):
334 def copy(self):
335 other = patchmeta(self.path)
335 other = patchmeta(self.path)
336 other.oldpath = self.oldpath
336 other.oldpath = self.oldpath
337 other.mode = self.mode
337 other.mode = self.mode
338 other.op = self.op
338 other.op = self.op
339 other.binary = self.binary
339 other.binary = self.binary
340 return other
340 return other
341
341
342 def _ispatchinga(self, afile):
342 def _ispatchinga(self, afile):
343 if afile == '/dev/null':
343 if afile == '/dev/null':
344 return self.op == 'ADD'
344 return self.op == 'ADD'
345 return afile == 'a/' + (self.oldpath or self.path)
345 return afile == 'a/' + (self.oldpath or self.path)
346
346
347 def _ispatchingb(self, bfile):
347 def _ispatchingb(self, bfile):
348 if bfile == '/dev/null':
348 if bfile == '/dev/null':
349 return self.op == 'DELETE'
349 return self.op == 'DELETE'
350 return bfile == 'b/' + self.path
350 return bfile == 'b/' + self.path
351
351
352 def ispatching(self, afile, bfile):
352 def ispatching(self, afile, bfile):
353 return self._ispatchinga(afile) and self._ispatchingb(bfile)
353 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354
354
355 def __repr__(self):
355 def __repr__(self):
356 return "<patchmeta %s %r>" % (self.op, self.path)
356 return "<patchmeta %s %r>" % (self.op, self.path)
357
357
358 def readgitpatch(lr):
358 def readgitpatch(lr):
359 """extract git-style metadata about patches from <patchname>"""
359 """extract git-style metadata about patches from <patchname>"""
360
360
361 # Filter patch for git information
361 # Filter patch for git information
362 gp = None
362 gp = None
363 gitpatches = []
363 gitpatches = []
364 for line in lr:
364 for line in lr:
365 line = line.rstrip(' \r\n')
365 line = line.rstrip(' \r\n')
366 if line.startswith('diff --git a/'):
366 if line.startswith('diff --git a/'):
367 m = gitre.match(line)
367 m = gitre.match(line)
368 if m:
368 if m:
369 if gp:
369 if gp:
370 gitpatches.append(gp)
370 gitpatches.append(gp)
371 dst = m.group(2)
371 dst = m.group(2)
372 gp = patchmeta(dst)
372 gp = patchmeta(dst)
373 elif gp:
373 elif gp:
374 if line.startswith('--- '):
374 if line.startswith('--- '):
375 gitpatches.append(gp)
375 gitpatches.append(gp)
376 gp = None
376 gp = None
377 continue
377 continue
378 if line.startswith('rename from '):
378 if line.startswith('rename from '):
379 gp.op = 'RENAME'
379 gp.op = 'RENAME'
380 gp.oldpath = line[12:]
380 gp.oldpath = line[12:]
381 elif line.startswith('rename to '):
381 elif line.startswith('rename to '):
382 gp.path = line[10:]
382 gp.path = line[10:]
383 elif line.startswith('copy from '):
383 elif line.startswith('copy from '):
384 gp.op = 'COPY'
384 gp.op = 'COPY'
385 gp.oldpath = line[10:]
385 gp.oldpath = line[10:]
386 elif line.startswith('copy to '):
386 elif line.startswith('copy to '):
387 gp.path = line[8:]
387 gp.path = line[8:]
388 elif line.startswith('deleted file'):
388 elif line.startswith('deleted file'):
389 gp.op = 'DELETE'
389 gp.op = 'DELETE'
390 elif line.startswith('new file mode '):
390 elif line.startswith('new file mode '):
391 gp.op = 'ADD'
391 gp.op = 'ADD'
392 gp.setmode(int(line[-6:], 8))
392 gp.setmode(int(line[-6:], 8))
393 elif line.startswith('new mode '):
393 elif line.startswith('new mode '):
394 gp.setmode(int(line[-6:], 8))
394 gp.setmode(int(line[-6:], 8))
395 elif line.startswith('GIT binary patch'):
395 elif line.startswith('GIT binary patch'):
396 gp.binary = True
396 gp.binary = True
397 if gp:
397 if gp:
398 gitpatches.append(gp)
398 gitpatches.append(gp)
399
399
400 return gitpatches
400 return gitpatches
401
401
402 class linereader(object):
402 class linereader(object):
403 # simple class to allow pushing lines back into the input stream
403 # simple class to allow pushing lines back into the input stream
404 def __init__(self, fp):
404 def __init__(self, fp):
405 self.fp = fp
405 self.fp = fp
406 self.buf = []
406 self.buf = []
407
407
408 def push(self, line):
408 def push(self, line):
409 if line is not None:
409 if line is not None:
410 self.buf.append(line)
410 self.buf.append(line)
411
411
412 def readline(self):
412 def readline(self):
413 if self.buf:
413 if self.buf:
414 l = self.buf[0]
414 l = self.buf[0]
415 del self.buf[0]
415 del self.buf[0]
416 return l
416 return l
417 return self.fp.readline()
417 return self.fp.readline()
418
418
419 def __iter__(self):
419 def __iter__(self):
420 return iter(self.readline, '')
420 return iter(self.readline, '')
421
421
422 class abstractbackend(object):
422 class abstractbackend(object):
423 def __init__(self, ui):
423 def __init__(self, ui):
424 self.ui = ui
424 self.ui = ui
425
425
426 def getfile(self, fname):
426 def getfile(self, fname):
427 """Return target file data and flags as a (data, (islink,
427 """Return target file data and flags as a (data, (islink,
428 isexec)) tuple. Data is None if file is missing/deleted.
428 isexec)) tuple. Data is None if file is missing/deleted.
429 """
429 """
430 raise NotImplementedError
430 raise NotImplementedError
431
431
432 def setfile(self, fname, data, mode, copysource):
432 def setfile(self, fname, data, mode, copysource):
433 """Write data to target file fname and set its mode. mode is a
433 """Write data to target file fname and set its mode. mode is a
434 (islink, isexec) tuple. If data is None, the file content should
434 (islink, isexec) tuple. If data is None, the file content should
435 be left unchanged. If the file is modified after being copied,
435 be left unchanged. If the file is modified after being copied,
436 copysource is set to the original file name.
436 copysource is set to the original file name.
437 """
437 """
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def unlink(self, fname):
440 def unlink(self, fname):
441 """Unlink target file."""
441 """Unlink target file."""
442 raise NotImplementedError
442 raise NotImplementedError
443
443
444 def writerej(self, fname, failed, total, lines):
444 def writerej(self, fname, failed, total, lines):
445 """Write rejected lines for fname. total is the number of hunks
445 """Write rejected lines for fname. total is the number of hunks
446 which failed to apply and total the total number of hunks for this
446 which failed to apply and total the total number of hunks for this
447 files.
447 files.
448 """
448 """
449
449
450 def exists(self, fname):
450 def exists(self, fname):
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 def close(self):
453 def close(self):
454 raise NotImplementedError
454 raise NotImplementedError
455
455
456 class fsbackend(abstractbackend):
456 class fsbackend(abstractbackend):
457 def __init__(self, ui, basedir):
457 def __init__(self, ui, basedir):
458 super(fsbackend, self).__init__(ui)
458 super(fsbackend, self).__init__(ui)
459 self.opener = vfsmod.vfs(basedir)
459 self.opener = vfsmod.vfs(basedir)
460
460
461 def getfile(self, fname):
461 def getfile(self, fname):
462 if self.opener.islink(fname):
462 if self.opener.islink(fname):
463 return (self.opener.readlink(fname), (True, False))
463 return (self.opener.readlink(fname), (True, False))
464
464
465 isexec = False
465 isexec = False
466 try:
466 try:
467 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
467 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 except OSError as e:
468 except OSError as e:
469 if e.errno != errno.ENOENT:
469 if e.errno != errno.ENOENT:
470 raise
470 raise
471 try:
471 try:
472 return (self.opener.read(fname), (False, isexec))
472 return (self.opener.read(fname), (False, isexec))
473 except IOError as e:
473 except IOError as e:
474 if e.errno != errno.ENOENT:
474 if e.errno != errno.ENOENT:
475 raise
475 raise
476 return None, None
476 return None, None
477
477
478 def setfile(self, fname, data, mode, copysource):
478 def setfile(self, fname, data, mode, copysource):
479 islink, isexec = mode
479 islink, isexec = mode
480 if data is None:
480 if data is None:
481 self.opener.setflags(fname, islink, isexec)
481 self.opener.setflags(fname, islink, isexec)
482 return
482 return
483 if islink:
483 if islink:
484 self.opener.symlink(data, fname)
484 self.opener.symlink(data, fname)
485 else:
485 else:
486 self.opener.write(fname, data)
486 self.opener.write(fname, data)
487 if isexec:
487 if isexec:
488 self.opener.setflags(fname, False, True)
488 self.opener.setflags(fname, False, True)
489
489
490 def unlink(self, fname):
490 def unlink(self, fname):
491 self.opener.unlinkpath(fname, ignoremissing=True)
491 self.opener.unlinkpath(fname, ignoremissing=True)
492
492
493 def writerej(self, fname, failed, total, lines):
493 def writerej(self, fname, failed, total, lines):
494 fname = fname + ".rej"
494 fname = fname + ".rej"
495 self.ui.warn(
495 self.ui.warn(
496 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
496 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 (failed, total, fname))
497 (failed, total, fname))
498 fp = self.opener(fname, 'w')
498 fp = self.opener(fname, 'w')
499 fp.writelines(lines)
499 fp.writelines(lines)
500 fp.close()
500 fp.close()
501
501
502 def exists(self, fname):
502 def exists(self, fname):
503 return self.opener.lexists(fname)
503 return self.opener.lexists(fname)
504
504
505 class workingbackend(fsbackend):
505 class workingbackend(fsbackend):
506 def __init__(self, ui, repo, similarity):
506 def __init__(self, ui, repo, similarity):
507 super(workingbackend, self).__init__(ui, repo.root)
507 super(workingbackend, self).__init__(ui, repo.root)
508 self.repo = repo
508 self.repo = repo
509 self.similarity = similarity
509 self.similarity = similarity
510 self.removed = set()
510 self.removed = set()
511 self.changed = set()
511 self.changed = set()
512 self.copied = []
512 self.copied = []
513
513
514 def _checkknown(self, fname):
514 def _checkknown(self, fname):
515 if self.repo.dirstate[fname] == '?' and self.exists(fname):
515 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
516 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517
517
518 def setfile(self, fname, data, mode, copysource):
518 def setfile(self, fname, data, mode, copysource):
519 self._checkknown(fname)
519 self._checkknown(fname)
520 super(workingbackend, self).setfile(fname, data, mode, copysource)
520 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 if copysource is not None:
521 if copysource is not None:
522 self.copied.append((copysource, fname))
522 self.copied.append((copysource, fname))
523 self.changed.add(fname)
523 self.changed.add(fname)
524
524
525 def unlink(self, fname):
525 def unlink(self, fname):
526 self._checkknown(fname)
526 self._checkknown(fname)
527 super(workingbackend, self).unlink(fname)
527 super(workingbackend, self).unlink(fname)
528 self.removed.add(fname)
528 self.removed.add(fname)
529 self.changed.add(fname)
529 self.changed.add(fname)
530
530
531 def close(self):
531 def close(self):
532 wctx = self.repo[None]
532 wctx = self.repo[None]
533 changed = set(self.changed)
533 changed = set(self.changed)
534 for src, dst in self.copied:
534 for src, dst in self.copied:
535 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
535 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 if self.removed:
536 if self.removed:
537 wctx.forget(sorted(self.removed))
537 wctx.forget(sorted(self.removed))
538 for f in self.removed:
538 for f in self.removed:
539 if f not in self.repo.dirstate:
539 if f not in self.repo.dirstate:
540 # File was deleted and no longer belongs to the
540 # File was deleted and no longer belongs to the
541 # dirstate, it was probably marked added then
541 # dirstate, it was probably marked added then
542 # deleted, and should not be considered by
542 # deleted, and should not be considered by
543 # marktouched().
543 # marktouched().
544 changed.discard(f)
544 changed.discard(f)
545 if changed:
545 if changed:
546 scmutil.marktouched(self.repo, changed, self.similarity)
546 scmutil.marktouched(self.repo, changed, self.similarity)
547 return sorted(self.changed)
547 return sorted(self.changed)
548
548
549 class filestore(object):
549 class filestore(object):
550 def __init__(self, maxsize=None):
550 def __init__(self, maxsize=None):
551 self.opener = None
551 self.opener = None
552 self.files = {}
552 self.files = {}
553 self.created = 0
553 self.created = 0
554 self.maxsize = maxsize
554 self.maxsize = maxsize
555 if self.maxsize is None:
555 if self.maxsize is None:
556 self.maxsize = 4*(2**20)
556 self.maxsize = 4*(2**20)
557 self.size = 0
557 self.size = 0
558 self.data = {}
558 self.data = {}
559
559
560 def setfile(self, fname, data, mode, copied=None):
560 def setfile(self, fname, data, mode, copied=None):
561 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
561 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 self.data[fname] = (data, mode, copied)
562 self.data[fname] = (data, mode, copied)
563 self.size += len(data)
563 self.size += len(data)
564 else:
564 else:
565 if self.opener is None:
565 if self.opener is None:
566 root = tempfile.mkdtemp(prefix='hg-patch-')
566 root = tempfile.mkdtemp(prefix='hg-patch-')
567 self.opener = vfsmod.vfs(root)
567 self.opener = vfsmod.vfs(root)
568 # Avoid filename issues with these simple names
568 # Avoid filename issues with these simple names
569 fn = '%d' % self.created
569 fn = '%d' % self.created
570 self.opener.write(fn, data)
570 self.opener.write(fn, data)
571 self.created += 1
571 self.created += 1
572 self.files[fname] = (fn, mode, copied)
572 self.files[fname] = (fn, mode, copied)
573
573
574 def getfile(self, fname):
574 def getfile(self, fname):
575 if fname in self.data:
575 if fname in self.data:
576 return self.data[fname]
576 return self.data[fname]
577 if not self.opener or fname not in self.files:
577 if not self.opener or fname not in self.files:
578 return None, None, None
578 return None, None, None
579 fn, mode, copied = self.files[fname]
579 fn, mode, copied = self.files[fname]
580 return self.opener.read(fn), mode, copied
580 return self.opener.read(fn), mode, copied
581
581
582 def close(self):
582 def close(self):
583 if self.opener:
583 if self.opener:
584 shutil.rmtree(self.opener.base)
584 shutil.rmtree(self.opener.base)
585
585
586 class repobackend(abstractbackend):
586 class repobackend(abstractbackend):
587 def __init__(self, ui, repo, ctx, store):
587 def __init__(self, ui, repo, ctx, store):
588 super(repobackend, self).__init__(ui)
588 super(repobackend, self).__init__(ui)
589 self.repo = repo
589 self.repo = repo
590 self.ctx = ctx
590 self.ctx = ctx
591 self.store = store
591 self.store = store
592 self.changed = set()
592 self.changed = set()
593 self.removed = set()
593 self.removed = set()
594 self.copied = {}
594 self.copied = {}
595
595
596 def _checkknown(self, fname):
596 def _checkknown(self, fname):
597 if fname not in self.ctx:
597 if fname not in self.ctx:
598 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
598 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599
599
600 def getfile(self, fname):
600 def getfile(self, fname):
601 try:
601 try:
602 fctx = self.ctx[fname]
602 fctx = self.ctx[fname]
603 except error.LookupError:
603 except error.LookupError:
604 return None, None
604 return None, None
605 flags = fctx.flags()
605 flags = fctx.flags()
606 return fctx.data(), ('l' in flags, 'x' in flags)
606 return fctx.data(), ('l' in flags, 'x' in flags)
607
607
608 def setfile(self, fname, data, mode, copysource):
608 def setfile(self, fname, data, mode, copysource):
609 if copysource:
609 if copysource:
610 self._checkknown(copysource)
610 self._checkknown(copysource)
611 if data is None:
611 if data is None:
612 data = self.ctx[fname].data()
612 data = self.ctx[fname].data()
613 self.store.setfile(fname, data, mode, copysource)
613 self.store.setfile(fname, data, mode, copysource)
614 self.changed.add(fname)
614 self.changed.add(fname)
615 if copysource:
615 if copysource:
616 self.copied[fname] = copysource
616 self.copied[fname] = copysource
617
617
618 def unlink(self, fname):
618 def unlink(self, fname):
619 self._checkknown(fname)
619 self._checkknown(fname)
620 self.removed.add(fname)
620 self.removed.add(fname)
621
621
622 def exists(self, fname):
622 def exists(self, fname):
623 return fname in self.ctx
623 return fname in self.ctx
624
624
625 def close(self):
625 def close(self):
626 return self.changed | self.removed
626 return self.changed | self.removed
627
627
628 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
628 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
629 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
630 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 eolmodes = ['strict', 'crlf', 'lf', 'auto']
631 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632
632
633 class patchfile(object):
633 class patchfile(object):
634 def __init__(self, ui, gp, backend, store, eolmode='strict'):
634 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 self.fname = gp.path
635 self.fname = gp.path
636 self.eolmode = eolmode
636 self.eolmode = eolmode
637 self.eol = None
637 self.eol = None
638 self.backend = backend
638 self.backend = backend
639 self.ui = ui
639 self.ui = ui
640 self.lines = []
640 self.lines = []
641 self.exists = False
641 self.exists = False
642 self.missing = True
642 self.missing = True
643 self.mode = gp.mode
643 self.mode = gp.mode
644 self.copysource = gp.oldpath
644 self.copysource = gp.oldpath
645 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
645 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 self.remove = gp.op == 'DELETE'
646 self.remove = gp.op == 'DELETE'
647 if self.copysource is None:
647 if self.copysource is None:
648 data, mode = backend.getfile(self.fname)
648 data, mode = backend.getfile(self.fname)
649 else:
649 else:
650 data, mode = store.getfile(self.copysource)[:2]
650 data, mode = store.getfile(self.copysource)[:2]
651 if data is not None:
651 if data is not None:
652 self.exists = self.copysource is None or backend.exists(self.fname)
652 self.exists = self.copysource is None or backend.exists(self.fname)
653 self.missing = False
653 self.missing = False
654 if data:
654 if data:
655 self.lines = mdiff.splitnewlines(data)
655 self.lines = mdiff.splitnewlines(data)
656 if self.mode is None:
656 if self.mode is None:
657 self.mode = mode
657 self.mode = mode
658 if self.lines:
658 if self.lines:
659 # Normalize line endings
659 # Normalize line endings
660 if self.lines[0].endswith('\r\n'):
660 if self.lines[0].endswith('\r\n'):
661 self.eol = '\r\n'
661 self.eol = '\r\n'
662 elif self.lines[0].endswith('\n'):
662 elif self.lines[0].endswith('\n'):
663 self.eol = '\n'
663 self.eol = '\n'
664 if eolmode != 'strict':
664 if eolmode != 'strict':
665 nlines = []
665 nlines = []
666 for l in self.lines:
666 for l in self.lines:
667 if l.endswith('\r\n'):
667 if l.endswith('\r\n'):
668 l = l[:-2] + '\n'
668 l = l[:-2] + '\n'
669 nlines.append(l)
669 nlines.append(l)
670 self.lines = nlines
670 self.lines = nlines
671 else:
671 else:
672 if self.create:
672 if self.create:
673 self.missing = False
673 self.missing = False
674 if self.mode is None:
674 if self.mode is None:
675 self.mode = (False, False)
675 self.mode = (False, False)
676 if self.missing:
676 if self.missing:
677 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
677 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
678 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 "current directory)\n"))
679 "current directory)\n"))
680
680
681 self.hash = {}
681 self.hash = {}
682 self.dirty = 0
682 self.dirty = 0
683 self.offset = 0
683 self.offset = 0
684 self.skew = 0
684 self.skew = 0
685 self.rej = []
685 self.rej = []
686 self.fileprinted = False
686 self.fileprinted = False
687 self.printfile(False)
687 self.printfile(False)
688 self.hunks = 0
688 self.hunks = 0
689
689
690 def writelines(self, fname, lines, mode):
690 def writelines(self, fname, lines, mode):
691 if self.eolmode == 'auto':
691 if self.eolmode == 'auto':
692 eol = self.eol
692 eol = self.eol
693 elif self.eolmode == 'crlf':
693 elif self.eolmode == 'crlf':
694 eol = '\r\n'
694 eol = '\r\n'
695 else:
695 else:
696 eol = '\n'
696 eol = '\n'
697
697
698 if self.eolmode != 'strict' and eol and eol != '\n':
698 if self.eolmode != 'strict' and eol and eol != '\n':
699 rawlines = []
699 rawlines = []
700 for l in lines:
700 for l in lines:
701 if l and l[-1] == '\n':
701 if l and l[-1] == '\n':
702 l = l[:-1] + eol
702 l = l[:-1] + eol
703 rawlines.append(l)
703 rawlines.append(l)
704 lines = rawlines
704 lines = rawlines
705
705
706 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
706 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707
707
708 def printfile(self, warn):
708 def printfile(self, warn):
709 if self.fileprinted:
709 if self.fileprinted:
710 return
710 return
711 if warn or self.ui.verbose:
711 if warn or self.ui.verbose:
712 self.fileprinted = True
712 self.fileprinted = True
713 s = _("patching file %s\n") % self.fname
713 s = _("patching file %s\n") % self.fname
714 if warn:
714 if warn:
715 self.ui.warn(s)
715 self.ui.warn(s)
716 else:
716 else:
717 self.ui.note(s)
717 self.ui.note(s)
718
718
719
719
720 def findlines(self, l, linenum):
720 def findlines(self, l, linenum):
721 # looks through the hash and finds candidate lines. The
721 # looks through the hash and finds candidate lines. The
722 # result is a list of line numbers sorted based on distance
722 # result is a list of line numbers sorted based on distance
723 # from linenum
723 # from linenum
724
724
725 cand = self.hash.get(l, [])
725 cand = self.hash.get(l, [])
726 if len(cand) > 1:
726 if len(cand) > 1:
727 # resort our list of potentials forward then back.
727 # resort our list of potentials forward then back.
728 cand.sort(key=lambda x: abs(x - linenum))
728 cand.sort(key=lambda x: abs(x - linenum))
729 return cand
729 return cand
730
730
731 def write_rej(self):
731 def write_rej(self):
732 # our rejects are a little different from patch(1). This always
732 # our rejects are a little different from patch(1). This always
733 # creates rejects in the same form as the original patch. A file
733 # creates rejects in the same form as the original patch. A file
734 # header is inserted so that you can run the reject through patch again
734 # header is inserted so that you can run the reject through patch again
735 # without having to type the filename.
735 # without having to type the filename.
736 if not self.rej:
736 if not self.rej:
737 return
737 return
738 base = os.path.basename(self.fname)
738 base = os.path.basename(self.fname)
739 lines = ["--- %s\n+++ %s\n" % (base, base)]
739 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 for x in self.rej:
740 for x in self.rej:
741 for l in x.hunk:
741 for l in x.hunk:
742 lines.append(l)
742 lines.append(l)
743 if l[-1:] != '\n':
743 if l[-1:] != '\n':
744 lines.append("\n\ No newline at end of file\n")
744 lines.append("\n\ No newline at end of file\n")
745 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
745 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746
746
747 def apply(self, h):
747 def apply(self, h):
748 if not h.complete():
748 if not h.complete():
749 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
749 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 (h.number, h.desc, len(h.a), h.lena, len(h.b),
750 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 h.lenb))
751 h.lenb))
752
752
753 self.hunks += 1
753 self.hunks += 1
754
754
755 if self.missing:
755 if self.missing:
756 self.rej.append(h)
756 self.rej.append(h)
757 return -1
757 return -1
758
758
759 if self.exists and self.create:
759 if self.exists and self.create:
760 if self.copysource:
760 if self.copysource:
761 self.ui.warn(_("cannot create %s: destination already "
761 self.ui.warn(_("cannot create %s: destination already "
762 "exists\n") % self.fname)
762 "exists\n") % self.fname)
763 else:
763 else:
764 self.ui.warn(_("file %s already exists\n") % self.fname)
764 self.ui.warn(_("file %s already exists\n") % self.fname)
765 self.rej.append(h)
765 self.rej.append(h)
766 return -1
766 return -1
767
767
768 if isinstance(h, binhunk):
768 if isinstance(h, binhunk):
769 if self.remove:
769 if self.remove:
770 self.backend.unlink(self.fname)
770 self.backend.unlink(self.fname)
771 else:
771 else:
772 l = h.new(self.lines)
772 l = h.new(self.lines)
773 self.lines[:] = l
773 self.lines[:] = l
774 self.offset += len(l)
774 self.offset += len(l)
775 self.dirty = True
775 self.dirty = True
776 return 0
776 return 0
777
777
778 horig = h
778 horig = h
779 if (self.eolmode in ('crlf', 'lf')
779 if (self.eolmode in ('crlf', 'lf')
780 or self.eolmode == 'auto' and self.eol):
780 or self.eolmode == 'auto' and self.eol):
781 # If new eols are going to be normalized, then normalize
781 # If new eols are going to be normalized, then normalize
782 # hunk data before patching. Otherwise, preserve input
782 # hunk data before patching. Otherwise, preserve input
783 # line-endings.
783 # line-endings.
784 h = h.getnormalized()
784 h = h.getnormalized()
785
785
786 # fast case first, no offsets, no fuzz
786 # fast case first, no offsets, no fuzz
787 old, oldstart, new, newstart = h.fuzzit(0, False)
787 old, oldstart, new, newstart = h.fuzzit(0, False)
788 oldstart += self.offset
788 oldstart += self.offset
789 orig_start = oldstart
789 orig_start = oldstart
790 # if there's skew we want to emit the "(offset %d lines)" even
790 # if there's skew we want to emit the "(offset %d lines)" even
791 # when the hunk cleanly applies at start + skew, so skip the
791 # when the hunk cleanly applies at start + skew, so skip the
792 # fast case code
792 # fast case code
793 if (self.skew == 0 and
793 if (self.skew == 0 and
794 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
794 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 if self.remove:
795 if self.remove:
796 self.backend.unlink(self.fname)
796 self.backend.unlink(self.fname)
797 else:
797 else:
798 self.lines[oldstart:oldstart + len(old)] = new
798 self.lines[oldstart:oldstart + len(old)] = new
799 self.offset += len(new) - len(old)
799 self.offset += len(new) - len(old)
800 self.dirty = True
800 self.dirty = True
801 return 0
801 return 0
802
802
803 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
803 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 self.hash = {}
804 self.hash = {}
805 for x, s in enumerate(self.lines):
805 for x, s in enumerate(self.lines):
806 self.hash.setdefault(s, []).append(x)
806 self.hash.setdefault(s, []).append(x)
807
807
808 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
808 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 for toponly in [True, False]:
809 for toponly in [True, False]:
810 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
810 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 oldstart = oldstart + self.offset + self.skew
811 oldstart = oldstart + self.offset + self.skew
812 oldstart = min(oldstart, len(self.lines))
812 oldstart = min(oldstart, len(self.lines))
813 if old:
813 if old:
814 cand = self.findlines(old[0][1:], oldstart)
814 cand = self.findlines(old[0][1:], oldstart)
815 else:
815 else:
816 # Only adding lines with no or fuzzed context, just
816 # Only adding lines with no or fuzzed context, just
817 # take the skew in account
817 # take the skew in account
818 cand = [oldstart]
818 cand = [oldstart]
819
819
820 for l in cand:
820 for l in cand:
821 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
821 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 self.lines[l : l + len(old)] = new
822 self.lines[l : l + len(old)] = new
823 self.offset += len(new) - len(old)
823 self.offset += len(new) - len(old)
824 self.skew = l - orig_start
824 self.skew = l - orig_start
825 self.dirty = True
825 self.dirty = True
826 offset = l - orig_start - fuzzlen
826 offset = l - orig_start - fuzzlen
827 if fuzzlen:
827 if fuzzlen:
828 msg = _("Hunk #%d succeeded at %d "
828 msg = _("Hunk #%d succeeded at %d "
829 "with fuzz %d "
829 "with fuzz %d "
830 "(offset %d lines).\n")
830 "(offset %d lines).\n")
831 self.printfile(True)
831 self.printfile(True)
832 self.ui.warn(msg %
832 self.ui.warn(msg %
833 (h.number, l + 1, fuzzlen, offset))
833 (h.number, l + 1, fuzzlen, offset))
834 else:
834 else:
835 msg = _("Hunk #%d succeeded at %d "
835 msg = _("Hunk #%d succeeded at %d "
836 "(offset %d lines).\n")
836 "(offset %d lines).\n")
837 self.ui.note(msg % (h.number, l + 1, offset))
837 self.ui.note(msg % (h.number, l + 1, offset))
838 return fuzzlen
838 return fuzzlen
839 self.printfile(True)
839 self.printfile(True)
840 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
840 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 self.rej.append(horig)
841 self.rej.append(horig)
842 return -1
842 return -1
843
843
844 def close(self):
844 def close(self):
845 if self.dirty:
845 if self.dirty:
846 self.writelines(self.fname, self.lines, self.mode)
846 self.writelines(self.fname, self.lines, self.mode)
847 self.write_rej()
847 self.write_rej()
848 return len(self.rej)
848 return len(self.rej)
849
849
850 class header(object):
850 class header(object):
851 """patch header
851 """patch header
852 """
852 """
853 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
853 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 diff_re = re.compile('diff -r .* (.*)$')
854 diff_re = re.compile('diff -r .* (.*)$')
855 allhunks_re = re.compile('(?:index|deleted file) ')
855 allhunks_re = re.compile('(?:index|deleted file) ')
856 pretty_re = re.compile('(?:new file|deleted file) ')
856 pretty_re = re.compile('(?:new file|deleted file) ')
857 special_re = re.compile('(?:index|deleted|copy|rename) ')
857 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 newfile_re = re.compile('(?:new file)')
858 newfile_re = re.compile('(?:new file)')
859
859
860 def __init__(self, header):
860 def __init__(self, header):
861 self.header = header
861 self.header = header
862 self.hunks = []
862 self.hunks = []
863
863
864 def binary(self):
864 def binary(self):
865 return any(h.startswith('index ') for h in self.header)
865 return any(h.startswith('index ') for h in self.header)
866
866
867 def pretty(self, fp):
867 def pretty(self, fp):
868 for h in self.header:
868 for h in self.header:
869 if h.startswith('index '):
869 if h.startswith('index '):
870 fp.write(_('this modifies a binary file (all or nothing)\n'))
870 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 break
871 break
872 if self.pretty_re.match(h):
872 if self.pretty_re.match(h):
873 fp.write(h)
873 fp.write(h)
874 if self.binary():
874 if self.binary():
875 fp.write(_('this is a binary file\n'))
875 fp.write(_('this is a binary file\n'))
876 break
876 break
877 if h.startswith('---'):
877 if h.startswith('---'):
878 fp.write(_('%d hunks, %d lines changed\n') %
878 fp.write(_('%d hunks, %d lines changed\n') %
879 (len(self.hunks),
879 (len(self.hunks),
880 sum([max(h.added, h.removed) for h in self.hunks])))
880 sum([max(h.added, h.removed) for h in self.hunks])))
881 break
881 break
882 fp.write(h)
882 fp.write(h)
883
883
884 def write(self, fp):
884 def write(self, fp):
885 fp.write(''.join(self.header))
885 fp.write(''.join(self.header))
886
886
887 def allhunks(self):
887 def allhunks(self):
888 return any(self.allhunks_re.match(h) for h in self.header)
888 return any(self.allhunks_re.match(h) for h in self.header)
889
889
890 def files(self):
890 def files(self):
891 match = self.diffgit_re.match(self.header[0])
891 match = self.diffgit_re.match(self.header[0])
892 if match:
892 if match:
893 fromfile, tofile = match.groups()
893 fromfile, tofile = match.groups()
894 if fromfile == tofile:
894 if fromfile == tofile:
895 return [fromfile]
895 return [fromfile]
896 return [fromfile, tofile]
896 return [fromfile, tofile]
897 else:
897 else:
898 return self.diff_re.match(self.header[0]).groups()
898 return self.diff_re.match(self.header[0]).groups()
899
899
900 def filename(self):
900 def filename(self):
901 return self.files()[-1]
901 return self.files()[-1]
902
902
903 def __repr__(self):
903 def __repr__(self):
904 return '<header %s>' % (' '.join(map(repr, self.files())))
904 return '<header %s>' % (' '.join(map(repr, self.files())))
905
905
906 def isnewfile(self):
906 def isnewfile(self):
907 return any(self.newfile_re.match(h) for h in self.header)
907 return any(self.newfile_re.match(h) for h in self.header)
908
908
909 def special(self):
909 def special(self):
910 # Special files are shown only at the header level and not at the hunk
910 # Special files are shown only at the header level and not at the hunk
911 # level for example a file that has been deleted is a special file.
911 # level for example a file that has been deleted is a special file.
912 # The user cannot change the content of the operation, in the case of
912 # The user cannot change the content of the operation, in the case of
913 # the deleted file he has to take the deletion or not take it, he
913 # the deleted file he has to take the deletion or not take it, he
914 # cannot take some of it.
914 # cannot take some of it.
915 # Newly added files are special if they are empty, they are not special
915 # Newly added files are special if they are empty, they are not special
916 # if they have some content as we want to be able to change it
916 # if they have some content as we want to be able to change it
917 nocontent = len(self.header) == 2
917 nocontent = len(self.header) == 2
918 emptynewfile = self.isnewfile() and nocontent
918 emptynewfile = self.isnewfile() and nocontent
919 return emptynewfile or \
919 return emptynewfile or \
920 any(self.special_re.match(h) for h in self.header)
920 any(self.special_re.match(h) for h in self.header)
921
921
922 class recordhunk(object):
922 class recordhunk(object):
923 """patch hunk
923 """patch hunk
924
924
925 XXX shouldn't we merge this with the other hunk class?
925 XXX shouldn't we merge this with the other hunk class?
926 """
926 """
927
927
928 def __init__(self, header, fromline, toline, proc, before, hunk, after,
928 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 maxcontext=None):
929 maxcontext=None):
930 def trimcontext(lines, reverse=False):
930 def trimcontext(lines, reverse=False):
931 if maxcontext is not None:
931 if maxcontext is not None:
932 delta = len(lines) - maxcontext
932 delta = len(lines) - maxcontext
933 if delta > 0:
933 if delta > 0:
934 if reverse:
934 if reverse:
935 return delta, lines[delta:]
935 return delta, lines[delta:]
936 else:
936 else:
937 return delta, lines[:maxcontext]
937 return delta, lines[:maxcontext]
938 return 0, lines
938 return 0, lines
939
939
940 self.header = header
940 self.header = header
941 trimedbefore, self.before = trimcontext(before, True)
941 trimedbefore, self.before = trimcontext(before, True)
942 self.fromline = fromline + trimedbefore
942 self.fromline = fromline + trimedbefore
943 self.toline = toline + trimedbefore
943 self.toline = toline + trimedbefore
944 _trimedafter, self.after = trimcontext(after, False)
944 _trimedafter, self.after = trimcontext(after, False)
945 self.proc = proc
945 self.proc = proc
946 self.hunk = hunk
946 self.hunk = hunk
947 self.added, self.removed = self.countchanges(self.hunk)
947 self.added, self.removed = self.countchanges(self.hunk)
948
948
949 def __eq__(self, v):
949 def __eq__(self, v):
950 if not isinstance(v, recordhunk):
950 if not isinstance(v, recordhunk):
951 return False
951 return False
952
952
953 return ((v.hunk == self.hunk) and
953 return ((v.hunk == self.hunk) and
954 (v.proc == self.proc) and
954 (v.proc == self.proc) and
955 (self.fromline == v.fromline) and
955 (self.fromline == v.fromline) and
956 (self.header.files() == v.header.files()))
956 (self.header.files() == v.header.files()))
957
957
958 def __hash__(self):
958 def __hash__(self):
959 return hash((tuple(self.hunk),
959 return hash((tuple(self.hunk),
960 tuple(self.header.files()),
960 tuple(self.header.files()),
961 self.fromline,
961 self.fromline,
962 self.proc))
962 self.proc))
963
963
964 def countchanges(self, hunk):
964 def countchanges(self, hunk):
965 """hunk -> (n+,n-)"""
965 """hunk -> (n+,n-)"""
966 add = len([h for h in hunk if h.startswith('+')])
966 add = len([h for h in hunk if h.startswith('+')])
967 rem = len([h for h in hunk if h.startswith('-')])
967 rem = len([h for h in hunk if h.startswith('-')])
968 return add, rem
968 return add, rem
969
969
970 def reversehunk(self):
970 def reversehunk(self):
971 """return another recordhunk which is the reverse of the hunk
971 """return another recordhunk which is the reverse of the hunk
972
972
973 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
973 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 that, swap fromline/toline and +/- signs while keep other things
974 that, swap fromline/toline and +/- signs while keep other things
975 unchanged.
975 unchanged.
976 """
976 """
977 m = {'+': '-', '-': '+', '\\': '\\'}
977 m = {'+': '-', '-': '+', '\\': '\\'}
978 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
978 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 return recordhunk(self.header, self.toline, self.fromline, self.proc,
979 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 self.before, hunk, self.after)
980 self.before, hunk, self.after)
981
981
982 def write(self, fp):
982 def write(self, fp):
983 delta = len(self.before) + len(self.after)
983 delta = len(self.before) + len(self.after)
984 if self.after and self.after[-1] == '\\ No newline at end of file\n':
984 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 delta -= 1
985 delta -= 1
986 fromlen = delta + self.removed
986 fromlen = delta + self.removed
987 tolen = delta + self.added
987 tolen = delta + self.added
988 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
988 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 (self.fromline, fromlen, self.toline, tolen,
989 (self.fromline, fromlen, self.toline, tolen,
990 self.proc and (' ' + self.proc)))
990 self.proc and (' ' + self.proc)))
991 fp.write(''.join(self.before + self.hunk + self.after))
991 fp.write(''.join(self.before + self.hunk + self.after))
992
992
993 pretty = write
993 pretty = write
994
994
995 def filename(self):
995 def filename(self):
996 return self.header.filename()
996 return self.header.filename()
997
997
998 def __repr__(self):
998 def __repr__(self):
999 return '<hunk %r@%d>' % (self.filename(), self.fromline)
999 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000
1000
1001 def getmessages():
1001 def getmessages():
1002 return {
1002 return {
1003 'multiple': {
1003 'multiple': {
1004 'apply': _("apply change %d/%d to '%s'?"),
1004 'apply': _("apply change %d/%d to '%s'?"),
1005 'discard': _("discard change %d/%d to '%s'?"),
1005 'discard': _("discard change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1007 },
1007 },
1008 'single': {
1008 'single': {
1009 'apply': _("apply this change to '%s'?"),
1009 'apply': _("apply this change to '%s'?"),
1010 'discard': _("discard this change to '%s'?"),
1010 'discard': _("discard this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1012 },
1012 },
1013 'help': {
1013 'help': {
1014 'apply': _('[Ynesfdaq?]'
1014 'apply': _('[Ynesfdaq?]'
1015 '$$ &Yes, apply this change'
1015 '$$ &Yes, apply this change'
1016 '$$ &No, skip this change'
1016 '$$ &No, skip this change'
1017 '$$ &Edit this change manually'
1017 '$$ &Edit this change manually'
1018 '$$ &Skip remaining changes to this file'
1018 '$$ &Skip remaining changes to this file'
1019 '$$ Apply remaining changes to this &file'
1019 '$$ Apply remaining changes to this &file'
1020 '$$ &Done, skip remaining changes and files'
1020 '$$ &Done, skip remaining changes and files'
1021 '$$ Apply &all changes to all remaining files'
1021 '$$ Apply &all changes to all remaining files'
1022 '$$ &Quit, applying no changes'
1022 '$$ &Quit, applying no changes'
1023 '$$ &? (display help)'),
1023 '$$ &? (display help)'),
1024 'discard': _('[Ynesfdaq?]'
1024 'discard': _('[Ynesfdaq?]'
1025 '$$ &Yes, discard this change'
1025 '$$ &Yes, discard this change'
1026 '$$ &No, skip this change'
1026 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1027 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1028 '$$ &Skip remaining changes to this file'
1029 '$$ Discard remaining changes to this &file'
1029 '$$ Discard remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1030 '$$ &Done, skip remaining changes and files'
1031 '$$ Discard &all changes to all remaining files'
1031 '$$ Discard &all changes to all remaining files'
1032 '$$ &Quit, discarding no changes'
1032 '$$ &Quit, discarding no changes'
1033 '$$ &? (display help)'),
1033 '$$ &? (display help)'),
1034 'record': _('[Ynesfdaq?]'
1034 'record': _('[Ynesfdaq?]'
1035 '$$ &Yes, record this change'
1035 '$$ &Yes, record this change'
1036 '$$ &No, skip this change'
1036 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1037 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1038 '$$ &Skip remaining changes to this file'
1039 '$$ Record remaining changes to this &file'
1039 '$$ Record remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1040 '$$ &Done, skip remaining changes and files'
1041 '$$ Record &all changes to all remaining files'
1041 '$$ Record &all changes to all remaining files'
1042 '$$ &Quit, recording no changes'
1042 '$$ &Quit, recording no changes'
1043 '$$ &? (display help)'),
1043 '$$ &? (display help)'),
1044 }
1044 }
1045 }
1045 }
1046
1046
1047 def filterpatch(ui, headers, operation=None):
1047 def filterpatch(ui, headers, operation=None):
1048 """Interactively filter patch chunks into applied-only chunks"""
1048 """Interactively filter patch chunks into applied-only chunks"""
1049 messages = getmessages()
1049 messages = getmessages()
1050
1050
1051 if operation is None:
1051 if operation is None:
1052 operation = 'record'
1052 operation = 'record'
1053
1053
1054 def prompt(skipfile, skipall, query, chunk):
1054 def prompt(skipfile, skipall, query, chunk):
1055 """prompt query, and process base inputs
1055 """prompt query, and process base inputs
1056
1056
1057 - y/n for the rest of file
1057 - y/n for the rest of file
1058 - y/n for the rest
1058 - y/n for the rest
1059 - ? (help)
1059 - ? (help)
1060 - q (quit)
1060 - q (quit)
1061
1061
1062 Return True/False and possibly updated skipfile and skipall.
1062 Return True/False and possibly updated skipfile and skipall.
1063 """
1063 """
1064 newpatches = None
1064 newpatches = None
1065 if skipall is not None:
1065 if skipall is not None:
1066 return skipall, skipfile, skipall, newpatches
1066 return skipall, skipfile, skipall, newpatches
1067 if skipfile is not None:
1067 if skipfile is not None:
1068 return skipfile, skipfile, skipall, newpatches
1068 return skipfile, skipfile, skipall, newpatches
1069 while True:
1069 while True:
1070 resps = messages['help'][operation]
1070 resps = messages['help'][operation]
1071 r = ui.promptchoice("%s %s" % (query, resps))
1071 r = ui.promptchoice("%s %s" % (query, resps))
1072 ui.write("\n")
1072 ui.write("\n")
1073 if r == 8: # ?
1073 if r == 8: # ?
1074 for c, t in ui.extractchoices(resps)[1]:
1074 for c, t in ui.extractchoices(resps)[1]:
1075 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1075 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 continue
1076 continue
1077 elif r == 0: # yes
1077 elif r == 0: # yes
1078 ret = True
1078 ret = True
1079 elif r == 1: # no
1079 elif r == 1: # no
1080 ret = False
1080 ret = False
1081 elif r == 2: # Edit patch
1081 elif r == 2: # Edit patch
1082 if chunk is None:
1082 if chunk is None:
1083 ui.write(_('cannot edit patch for whole file'))
1083 ui.write(_('cannot edit patch for whole file'))
1084 ui.write("\n")
1084 ui.write("\n")
1085 continue
1085 continue
1086 if chunk.header.binary():
1086 if chunk.header.binary():
1087 ui.write(_('cannot edit patch for binary file'))
1087 ui.write(_('cannot edit patch for binary file'))
1088 ui.write("\n")
1088 ui.write("\n")
1089 continue
1089 continue
1090 # Patch comment based on the Git one (based on comment at end of
1090 # Patch comment based on the Git one (based on comment at end of
1091 # https://mercurial-scm.org/wiki/RecordExtension)
1091 # https://mercurial-scm.org/wiki/RecordExtension)
1092 phelp = '---' + _("""
1092 phelp = '---' + _("""
1093 To remove '-' lines, make them ' ' lines (context).
1093 To remove '-' lines, make them ' ' lines (context).
1094 To remove '+' lines, delete them.
1094 To remove '+' lines, delete them.
1095 Lines starting with # will be removed from the patch.
1095 Lines starting with # will be removed from the patch.
1096
1096
1097 If the patch applies cleanly, the edited hunk will immediately be
1097 If the patch applies cleanly, the edited hunk will immediately be
1098 added to the record list. If it does not apply cleanly, a rejects
1098 added to the record list. If it does not apply cleanly, a rejects
1099 file will be generated: you can use that when you try again. If
1099 file will be generated: you can use that when you try again. If
1100 all lines of the hunk are removed, then the edit is aborted and
1100 all lines of the hunk are removed, then the edit is aborted and
1101 the hunk is left unchanged.
1101 the hunk is left unchanged.
1102 """)
1102 """)
1103 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1103 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 suffix=".diff", text=True)
1104 suffix=".diff", text=True)
1105 ncpatchfp = None
1105 ncpatchfp = None
1106 try:
1106 try:
1107 # Write the initial patch
1107 # Write the initial patch
1108 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1108 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1109 chunk.header.write(f)
1109 chunk.header.write(f)
1110 chunk.write(f)
1110 chunk.write(f)
1111 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1111 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 f.close()
1112 f.close()
1113 # Start the editor and wait for it to complete
1113 # Start the editor and wait for it to complete
1114 editor = ui.geteditor()
1114 editor = ui.geteditor()
1115 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1115 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 environ={'HGUSER': ui.username()},
1116 environ={'HGUSER': ui.username()},
1117 blockedtag='filterpatch')
1117 blockedtag='filterpatch')
1118 if ret != 0:
1118 if ret != 0:
1119 ui.warn(_("editor exited with exit code %d\n") % ret)
1119 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 continue
1120 continue
1121 # Remove comment lines
1121 # Remove comment lines
1122 patchfp = open(patchfn)
1122 patchfp = open(patchfn)
1123 ncpatchfp = stringio()
1123 ncpatchfp = stringio()
1124 for line in util.iterfile(patchfp):
1124 for line in util.iterfile(patchfp):
1125 if not line.startswith('#'):
1125 if not line.startswith('#'):
1126 ncpatchfp.write(line)
1126 ncpatchfp.write(line)
1127 patchfp.close()
1127 patchfp.close()
1128 ncpatchfp.seek(0)
1128 ncpatchfp.seek(0)
1129 newpatches = parsepatch(ncpatchfp)
1129 newpatches = parsepatch(ncpatchfp)
1130 finally:
1130 finally:
1131 os.unlink(patchfn)
1131 os.unlink(patchfn)
1132 del ncpatchfp
1132 del ncpatchfp
1133 # Signal that the chunk shouldn't be applied as-is, but
1133 # Signal that the chunk shouldn't be applied as-is, but
1134 # provide the new patch to be used instead.
1134 # provide the new patch to be used instead.
1135 ret = False
1135 ret = False
1136 elif r == 3: # Skip
1136 elif r == 3: # Skip
1137 ret = skipfile = False
1137 ret = skipfile = False
1138 elif r == 4: # file (Record remaining)
1138 elif r == 4: # file (Record remaining)
1139 ret = skipfile = True
1139 ret = skipfile = True
1140 elif r == 5: # done, skip remaining
1140 elif r == 5: # done, skip remaining
1141 ret = skipall = False
1141 ret = skipall = False
1142 elif r == 6: # all
1142 elif r == 6: # all
1143 ret = skipall = True
1143 ret = skipall = True
1144 elif r == 7: # quit
1144 elif r == 7: # quit
1145 raise error.Abort(_('user quit'))
1145 raise error.Abort(_('user quit'))
1146 return ret, skipfile, skipall, newpatches
1146 return ret, skipfile, skipall, newpatches
1147
1147
1148 seen = set()
1148 seen = set()
1149 applied = {} # 'filename' -> [] of chunks
1149 applied = {} # 'filename' -> [] of chunks
1150 skipfile, skipall = None, None
1150 skipfile, skipall = None, None
1151 pos, total = 1, sum(len(h.hunks) for h in headers)
1151 pos, total = 1, sum(len(h.hunks) for h in headers)
1152 for h in headers:
1152 for h in headers:
1153 pos += len(h.hunks)
1153 pos += len(h.hunks)
1154 skipfile = None
1154 skipfile = None
1155 fixoffset = 0
1155 fixoffset = 0
1156 hdr = ''.join(h.header)
1156 hdr = ''.join(h.header)
1157 if hdr in seen:
1157 if hdr in seen:
1158 continue
1158 continue
1159 seen.add(hdr)
1159 seen.add(hdr)
1160 if skipall is None:
1160 if skipall is None:
1161 h.pretty(ui)
1161 h.pretty(ui)
1162 msg = (_('examine changes to %s?') %
1162 msg = (_('examine changes to %s?') %
1163 _(' and ').join("'%s'" % f for f in h.files()))
1163 _(' and ').join("'%s'" % f for f in h.files()))
1164 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1164 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1165 if not r:
1165 if not r:
1166 continue
1166 continue
1167 applied[h.filename()] = [h]
1167 applied[h.filename()] = [h]
1168 if h.allhunks():
1168 if h.allhunks():
1169 applied[h.filename()] += h.hunks
1169 applied[h.filename()] += h.hunks
1170 continue
1170 continue
1171 for i, chunk in enumerate(h.hunks):
1171 for i, chunk in enumerate(h.hunks):
1172 if skipfile is None and skipall is None:
1172 if skipfile is None and skipall is None:
1173 chunk.pretty(ui)
1173 chunk.pretty(ui)
1174 if total == 1:
1174 if total == 1:
1175 msg = messages['single'][operation] % chunk.filename()
1175 msg = messages['single'][operation] % chunk.filename()
1176 else:
1176 else:
1177 idx = pos - len(h.hunks) + i
1177 idx = pos - len(h.hunks) + i
1178 msg = messages['multiple'][operation] % (idx, total,
1178 msg = messages['multiple'][operation] % (idx, total,
1179 chunk.filename())
1179 chunk.filename())
1180 r, skipfile, skipall, newpatches = prompt(skipfile,
1180 r, skipfile, skipall, newpatches = prompt(skipfile,
1181 skipall, msg, chunk)
1181 skipall, msg, chunk)
1182 if r:
1182 if r:
1183 if fixoffset:
1183 if fixoffset:
1184 chunk = copy.copy(chunk)
1184 chunk = copy.copy(chunk)
1185 chunk.toline += fixoffset
1185 chunk.toline += fixoffset
1186 applied[chunk.filename()].append(chunk)
1186 applied[chunk.filename()].append(chunk)
1187 elif newpatches is not None:
1187 elif newpatches is not None:
1188 for newpatch in newpatches:
1188 for newpatch in newpatches:
1189 for newhunk in newpatch.hunks:
1189 for newhunk in newpatch.hunks:
1190 if fixoffset:
1190 if fixoffset:
1191 newhunk.toline += fixoffset
1191 newhunk.toline += fixoffset
1192 applied[newhunk.filename()].append(newhunk)
1192 applied[newhunk.filename()].append(newhunk)
1193 else:
1193 else:
1194 fixoffset += chunk.removed - chunk.added
1194 fixoffset += chunk.removed - chunk.added
1195 return (sum([h for h in applied.itervalues()
1195 return (sum([h for h in applied.itervalues()
1196 if h[0].special() or len(h) > 1], []), {})
1196 if h[0].special() or len(h) > 1], []), {})
1197 class hunk(object):
1197 class hunk(object):
1198 def __init__(self, desc, num, lr, context):
1198 def __init__(self, desc, num, lr, context):
1199 self.number = num
1199 self.number = num
1200 self.desc = desc
1200 self.desc = desc
1201 self.hunk = [desc]
1201 self.hunk = [desc]
1202 self.a = []
1202 self.a = []
1203 self.b = []
1203 self.b = []
1204 self.starta = self.lena = None
1204 self.starta = self.lena = None
1205 self.startb = self.lenb = None
1205 self.startb = self.lenb = None
1206 if lr is not None:
1206 if lr is not None:
1207 if context:
1207 if context:
1208 self.read_context_hunk(lr)
1208 self.read_context_hunk(lr)
1209 else:
1209 else:
1210 self.read_unified_hunk(lr)
1210 self.read_unified_hunk(lr)
1211
1211
1212 def getnormalized(self):
1212 def getnormalized(self):
1213 """Return a copy with line endings normalized to LF."""
1213 """Return a copy with line endings normalized to LF."""
1214
1214
1215 def normalize(lines):
1215 def normalize(lines):
1216 nlines = []
1216 nlines = []
1217 for line in lines:
1217 for line in lines:
1218 if line.endswith('\r\n'):
1218 if line.endswith('\r\n'):
1219 line = line[:-2] + '\n'
1219 line = line[:-2] + '\n'
1220 nlines.append(line)
1220 nlines.append(line)
1221 return nlines
1221 return nlines
1222
1222
1223 # Dummy object, it is rebuilt manually
1223 # Dummy object, it is rebuilt manually
1224 nh = hunk(self.desc, self.number, None, None)
1224 nh = hunk(self.desc, self.number, None, None)
1225 nh.number = self.number
1225 nh.number = self.number
1226 nh.desc = self.desc
1226 nh.desc = self.desc
1227 nh.hunk = self.hunk
1227 nh.hunk = self.hunk
1228 nh.a = normalize(self.a)
1228 nh.a = normalize(self.a)
1229 nh.b = normalize(self.b)
1229 nh.b = normalize(self.b)
1230 nh.starta = self.starta
1230 nh.starta = self.starta
1231 nh.startb = self.startb
1231 nh.startb = self.startb
1232 nh.lena = self.lena
1232 nh.lena = self.lena
1233 nh.lenb = self.lenb
1233 nh.lenb = self.lenb
1234 return nh
1234 return nh
1235
1235
1236 def read_unified_hunk(self, lr):
1236 def read_unified_hunk(self, lr):
1237 m = unidesc.match(self.desc)
1237 m = unidesc.match(self.desc)
1238 if not m:
1238 if not m:
1239 raise PatchError(_("bad hunk #%d") % self.number)
1239 raise PatchError(_("bad hunk #%d") % self.number)
1240 self.starta, self.lena, self.startb, self.lenb = m.groups()
1240 self.starta, self.lena, self.startb, self.lenb = m.groups()
1241 if self.lena is None:
1241 if self.lena is None:
1242 self.lena = 1
1242 self.lena = 1
1243 else:
1243 else:
1244 self.lena = int(self.lena)
1244 self.lena = int(self.lena)
1245 if self.lenb is None:
1245 if self.lenb is None:
1246 self.lenb = 1
1246 self.lenb = 1
1247 else:
1247 else:
1248 self.lenb = int(self.lenb)
1248 self.lenb = int(self.lenb)
1249 self.starta = int(self.starta)
1249 self.starta = int(self.starta)
1250 self.startb = int(self.startb)
1250 self.startb = int(self.startb)
1251 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1251 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1252 self.b)
1252 self.b)
1253 # if we hit eof before finishing out the hunk, the last line will
1253 # if we hit eof before finishing out the hunk, the last line will
1254 # be zero length. Lets try to fix it up.
1254 # be zero length. Lets try to fix it up.
1255 while len(self.hunk[-1]) == 0:
1255 while len(self.hunk[-1]) == 0:
1256 del self.hunk[-1]
1256 del self.hunk[-1]
1257 del self.a[-1]
1257 del self.a[-1]
1258 del self.b[-1]
1258 del self.b[-1]
1259 self.lena -= 1
1259 self.lena -= 1
1260 self.lenb -= 1
1260 self.lenb -= 1
1261 self._fixnewline(lr)
1261 self._fixnewline(lr)
1262
1262
1263 def read_context_hunk(self, lr):
1263 def read_context_hunk(self, lr):
1264 self.desc = lr.readline()
1264 self.desc = lr.readline()
1265 m = contextdesc.match(self.desc)
1265 m = contextdesc.match(self.desc)
1266 if not m:
1266 if not m:
1267 raise PatchError(_("bad hunk #%d") % self.number)
1267 raise PatchError(_("bad hunk #%d") % self.number)
1268 self.starta, aend = m.groups()
1268 self.starta, aend = m.groups()
1269 self.starta = int(self.starta)
1269 self.starta = int(self.starta)
1270 if aend is None:
1270 if aend is None:
1271 aend = self.starta
1271 aend = self.starta
1272 self.lena = int(aend) - self.starta
1272 self.lena = int(aend) - self.starta
1273 if self.starta:
1273 if self.starta:
1274 self.lena += 1
1274 self.lena += 1
1275 for x in xrange(self.lena):
1275 for x in xrange(self.lena):
1276 l = lr.readline()
1276 l = lr.readline()
1277 if l.startswith('---'):
1277 if l.startswith('---'):
1278 # lines addition, old block is empty
1278 # lines addition, old block is empty
1279 lr.push(l)
1279 lr.push(l)
1280 break
1280 break
1281 s = l[2:]
1281 s = l[2:]
1282 if l.startswith('- ') or l.startswith('! '):
1282 if l.startswith('- ') or l.startswith('! '):
1283 u = '-' + s
1283 u = '-' + s
1284 elif l.startswith(' '):
1284 elif l.startswith(' '):
1285 u = ' ' + s
1285 u = ' ' + s
1286 else:
1286 else:
1287 raise PatchError(_("bad hunk #%d old text line %d") %
1287 raise PatchError(_("bad hunk #%d old text line %d") %
1288 (self.number, x))
1288 (self.number, x))
1289 self.a.append(u)
1289 self.a.append(u)
1290 self.hunk.append(u)
1290 self.hunk.append(u)
1291
1291
1292 l = lr.readline()
1292 l = lr.readline()
1293 if l.startswith('\ '):
1293 if l.startswith('\ '):
1294 s = self.a[-1][:-1]
1294 s = self.a[-1][:-1]
1295 self.a[-1] = s
1295 self.a[-1] = s
1296 self.hunk[-1] = s
1296 self.hunk[-1] = s
1297 l = lr.readline()
1297 l = lr.readline()
1298 m = contextdesc.match(l)
1298 m = contextdesc.match(l)
1299 if not m:
1299 if not m:
1300 raise PatchError(_("bad hunk #%d") % self.number)
1300 raise PatchError(_("bad hunk #%d") % self.number)
1301 self.startb, bend = m.groups()
1301 self.startb, bend = m.groups()
1302 self.startb = int(self.startb)
1302 self.startb = int(self.startb)
1303 if bend is None:
1303 if bend is None:
1304 bend = self.startb
1304 bend = self.startb
1305 self.lenb = int(bend) - self.startb
1305 self.lenb = int(bend) - self.startb
1306 if self.startb:
1306 if self.startb:
1307 self.lenb += 1
1307 self.lenb += 1
1308 hunki = 1
1308 hunki = 1
1309 for x in xrange(self.lenb):
1309 for x in xrange(self.lenb):
1310 l = lr.readline()
1310 l = lr.readline()
1311 if l.startswith('\ '):
1311 if l.startswith('\ '):
1312 # XXX: the only way to hit this is with an invalid line range.
1312 # XXX: the only way to hit this is with an invalid line range.
1313 # The no-eol marker is not counted in the line range, but I
1313 # The no-eol marker is not counted in the line range, but I
1314 # guess there are diff(1) out there which behave differently.
1314 # guess there are diff(1) out there which behave differently.
1315 s = self.b[-1][:-1]
1315 s = self.b[-1][:-1]
1316 self.b[-1] = s
1316 self.b[-1] = s
1317 self.hunk[hunki - 1] = s
1317 self.hunk[hunki - 1] = s
1318 continue
1318 continue
1319 if not l:
1319 if not l:
1320 # line deletions, new block is empty and we hit EOF
1320 # line deletions, new block is empty and we hit EOF
1321 lr.push(l)
1321 lr.push(l)
1322 break
1322 break
1323 s = l[2:]
1323 s = l[2:]
1324 if l.startswith('+ ') or l.startswith('! '):
1324 if l.startswith('+ ') or l.startswith('! '):
1325 u = '+' + s
1325 u = '+' + s
1326 elif l.startswith(' '):
1326 elif l.startswith(' '):
1327 u = ' ' + s
1327 u = ' ' + s
1328 elif len(self.b) == 0:
1328 elif len(self.b) == 0:
1329 # line deletions, new block is empty
1329 # line deletions, new block is empty
1330 lr.push(l)
1330 lr.push(l)
1331 break
1331 break
1332 else:
1332 else:
1333 raise PatchError(_("bad hunk #%d old text line %d") %
1333 raise PatchError(_("bad hunk #%d old text line %d") %
1334 (self.number, x))
1334 (self.number, x))
1335 self.b.append(s)
1335 self.b.append(s)
1336 while True:
1336 while True:
1337 if hunki >= len(self.hunk):
1337 if hunki >= len(self.hunk):
1338 h = ""
1338 h = ""
1339 else:
1339 else:
1340 h = self.hunk[hunki]
1340 h = self.hunk[hunki]
1341 hunki += 1
1341 hunki += 1
1342 if h == u:
1342 if h == u:
1343 break
1343 break
1344 elif h.startswith('-'):
1344 elif h.startswith('-'):
1345 continue
1345 continue
1346 else:
1346 else:
1347 self.hunk.insert(hunki - 1, u)
1347 self.hunk.insert(hunki - 1, u)
1348 break
1348 break
1349
1349
1350 if not self.a:
1350 if not self.a:
1351 # this happens when lines were only added to the hunk
1351 # this happens when lines were only added to the hunk
1352 for x in self.hunk:
1352 for x in self.hunk:
1353 if x.startswith('-') or x.startswith(' '):
1353 if x.startswith('-') or x.startswith(' '):
1354 self.a.append(x)
1354 self.a.append(x)
1355 if not self.b:
1355 if not self.b:
1356 # this happens when lines were only deleted from the hunk
1356 # this happens when lines were only deleted from the hunk
1357 for x in self.hunk:
1357 for x in self.hunk:
1358 if x.startswith('+') or x.startswith(' '):
1358 if x.startswith('+') or x.startswith(' '):
1359 self.b.append(x[1:])
1359 self.b.append(x[1:])
1360 # @@ -start,len +start,len @@
1360 # @@ -start,len +start,len @@
1361 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1361 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1362 self.startb, self.lenb)
1362 self.startb, self.lenb)
1363 self.hunk[0] = self.desc
1363 self.hunk[0] = self.desc
1364 self._fixnewline(lr)
1364 self._fixnewline(lr)
1365
1365
1366 def _fixnewline(self, lr):
1366 def _fixnewline(self, lr):
1367 l = lr.readline()
1367 l = lr.readline()
1368 if l.startswith('\ '):
1368 if l.startswith('\ '):
1369 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1369 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1370 else:
1370 else:
1371 lr.push(l)
1371 lr.push(l)
1372
1372
1373 def complete(self):
1373 def complete(self):
1374 return len(self.a) == self.lena and len(self.b) == self.lenb
1374 return len(self.a) == self.lena and len(self.b) == self.lenb
1375
1375
1376 def _fuzzit(self, old, new, fuzz, toponly):
1376 def _fuzzit(self, old, new, fuzz, toponly):
1377 # this removes context lines from the top and bottom of list 'l'. It
1377 # this removes context lines from the top and bottom of list 'l'. It
1378 # checks the hunk to make sure only context lines are removed, and then
1378 # checks the hunk to make sure only context lines are removed, and then
1379 # returns a new shortened list of lines.
1379 # returns a new shortened list of lines.
1380 fuzz = min(fuzz, len(old))
1380 fuzz = min(fuzz, len(old))
1381 if fuzz:
1381 if fuzz:
1382 top = 0
1382 top = 0
1383 bot = 0
1383 bot = 0
1384 hlen = len(self.hunk)
1384 hlen = len(self.hunk)
1385 for x in xrange(hlen - 1):
1385 for x in xrange(hlen - 1):
1386 # the hunk starts with the @@ line, so use x+1
1386 # the hunk starts with the @@ line, so use x+1
1387 if self.hunk[x + 1][0] == ' ':
1387 if self.hunk[x + 1][0] == ' ':
1388 top += 1
1388 top += 1
1389 else:
1389 else:
1390 break
1390 break
1391 if not toponly:
1391 if not toponly:
1392 for x in xrange(hlen - 1):
1392 for x in xrange(hlen - 1):
1393 if self.hunk[hlen - bot - 1][0] == ' ':
1393 if self.hunk[hlen - bot - 1][0] == ' ':
1394 bot += 1
1394 bot += 1
1395 else:
1395 else:
1396 break
1396 break
1397
1397
1398 bot = min(fuzz, bot)
1398 bot = min(fuzz, bot)
1399 top = min(fuzz, top)
1399 top = min(fuzz, top)
1400 return old[top:len(old) - bot], new[top:len(new) - bot], top
1400 return old[top:len(old) - bot], new[top:len(new) - bot], top
1401 return old, new, 0
1401 return old, new, 0
1402
1402
1403 def fuzzit(self, fuzz, toponly):
1403 def fuzzit(self, fuzz, toponly):
1404 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1404 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1405 oldstart = self.starta + top
1405 oldstart = self.starta + top
1406 newstart = self.startb + top
1406 newstart = self.startb + top
1407 # zero length hunk ranges already have their start decremented
1407 # zero length hunk ranges already have their start decremented
1408 if self.lena and oldstart > 0:
1408 if self.lena and oldstart > 0:
1409 oldstart -= 1
1409 oldstart -= 1
1410 if self.lenb and newstart > 0:
1410 if self.lenb and newstart > 0:
1411 newstart -= 1
1411 newstart -= 1
1412 return old, oldstart, new, newstart
1412 return old, oldstart, new, newstart
1413
1413
1414 class binhunk(object):
1414 class binhunk(object):
1415 'A binary patch file.'
1415 'A binary patch file.'
1416 def __init__(self, lr, fname):
1416 def __init__(self, lr, fname):
1417 self.text = None
1417 self.text = None
1418 self.delta = False
1418 self.delta = False
1419 self.hunk = ['GIT binary patch\n']
1419 self.hunk = ['GIT binary patch\n']
1420 self._fname = fname
1420 self._fname = fname
1421 self._read(lr)
1421 self._read(lr)
1422
1422
1423 def complete(self):
1423 def complete(self):
1424 return self.text is not None
1424 return self.text is not None
1425
1425
1426 def new(self, lines):
1426 def new(self, lines):
1427 if self.delta:
1427 if self.delta:
1428 return [applybindelta(self.text, ''.join(lines))]
1428 return [applybindelta(self.text, ''.join(lines))]
1429 return [self.text]
1429 return [self.text]
1430
1430
1431 def _read(self, lr):
1431 def _read(self, lr):
1432 def getline(lr, hunk):
1432 def getline(lr, hunk):
1433 l = lr.readline()
1433 l = lr.readline()
1434 hunk.append(l)
1434 hunk.append(l)
1435 return l.rstrip('\r\n')
1435 return l.rstrip('\r\n')
1436
1436
1437 size = 0
1437 size = 0
1438 while True:
1438 while True:
1439 line = getline(lr, self.hunk)
1439 line = getline(lr, self.hunk)
1440 if not line:
1440 if not line:
1441 raise PatchError(_('could not extract "%s" binary data')
1441 raise PatchError(_('could not extract "%s" binary data')
1442 % self._fname)
1442 % self._fname)
1443 if line.startswith('literal '):
1443 if line.startswith('literal '):
1444 size = int(line[8:].rstrip())
1444 size = int(line[8:].rstrip())
1445 break
1445 break
1446 if line.startswith('delta '):
1446 if line.startswith('delta '):
1447 size = int(line[6:].rstrip())
1447 size = int(line[6:].rstrip())
1448 self.delta = True
1448 self.delta = True
1449 break
1449 break
1450 dec = []
1450 dec = []
1451 line = getline(lr, self.hunk)
1451 line = getline(lr, self.hunk)
1452 while len(line) > 1:
1452 while len(line) > 1:
1453 l = line[0:1]
1453 l = line[0:1]
1454 if l <= 'Z' and l >= 'A':
1454 if l <= 'Z' and l >= 'A':
1455 l = ord(l) - ord('A') + 1
1455 l = ord(l) - ord('A') + 1
1456 else:
1456 else:
1457 l = ord(l) - ord('a') + 27
1457 l = ord(l) - ord('a') + 27
1458 try:
1458 try:
1459 dec.append(util.b85decode(line[1:])[:l])
1459 dec.append(util.b85decode(line[1:])[:l])
1460 except ValueError as e:
1460 except ValueError as e:
1461 raise PatchError(_('could not decode "%s" binary patch: %s')
1461 raise PatchError(_('could not decode "%s" binary patch: %s')
1462 % (self._fname, util.forcebytestr(e)))
1462 % (self._fname, util.forcebytestr(e)))
1463 line = getline(lr, self.hunk)
1463 line = getline(lr, self.hunk)
1464 text = zlib.decompress(''.join(dec))
1464 text = zlib.decompress(''.join(dec))
1465 if len(text) != size:
1465 if len(text) != size:
1466 raise PatchError(_('"%s" length is %d bytes, should be %d')
1466 raise PatchError(_('"%s" length is %d bytes, should be %d')
1467 % (self._fname, len(text), size))
1467 % (self._fname, len(text), size))
1468 self.text = text
1468 self.text = text
1469
1469
1470 def parsefilename(str):
1470 def parsefilename(str):
1471 # --- filename \t|space stuff
1471 # --- filename \t|space stuff
1472 s = str[4:].rstrip('\r\n')
1472 s = str[4:].rstrip('\r\n')
1473 i = s.find('\t')
1473 i = s.find('\t')
1474 if i < 0:
1474 if i < 0:
1475 i = s.find(' ')
1475 i = s.find(' ')
1476 if i < 0:
1476 if i < 0:
1477 return s
1477 return s
1478 return s[:i]
1478 return s[:i]
1479
1479
1480 def reversehunks(hunks):
1480 def reversehunks(hunks):
1481 '''reverse the signs in the hunks given as argument
1481 '''reverse the signs in the hunks given as argument
1482
1482
1483 This function operates on hunks coming out of patch.filterpatch, that is
1483 This function operates on hunks coming out of patch.filterpatch, that is
1484 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1484 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1485
1485
1486 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1486 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1487 ... --- a/folder1/g
1487 ... --- a/folder1/g
1488 ... +++ b/folder1/g
1488 ... +++ b/folder1/g
1489 ... @@ -1,7 +1,7 @@
1489 ... @@ -1,7 +1,7 @@
1490 ... +firstline
1490 ... +firstline
1491 ... c
1491 ... c
1492 ... 1
1492 ... 1
1493 ... 2
1493 ... 2
1494 ... + 3
1494 ... + 3
1495 ... -4
1495 ... -4
1496 ... 5
1496 ... 5
1497 ... d
1497 ... d
1498 ... +lastline"""
1498 ... +lastline"""
1499 >>> hunks = parsepatch([rawpatch])
1499 >>> hunks = parsepatch([rawpatch])
1500 >>> hunkscomingfromfilterpatch = []
1500 >>> hunkscomingfromfilterpatch = []
1501 >>> for h in hunks:
1501 >>> for h in hunks:
1502 ... hunkscomingfromfilterpatch.append(h)
1502 ... hunkscomingfromfilterpatch.append(h)
1503 ... hunkscomingfromfilterpatch.extend(h.hunks)
1503 ... hunkscomingfromfilterpatch.extend(h.hunks)
1504
1504
1505 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1505 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1506 >>> from . import util
1506 >>> from . import util
1507 >>> fp = util.stringio()
1507 >>> fp = util.stringio()
1508 >>> for c in reversedhunks:
1508 >>> for c in reversedhunks:
1509 ... c.write(fp)
1509 ... c.write(fp)
1510 >>> fp.seek(0) or None
1510 >>> fp.seek(0) or None
1511 >>> reversedpatch = fp.read()
1511 >>> reversedpatch = fp.read()
1512 >>> print(pycompat.sysstr(reversedpatch))
1512 >>> print(pycompat.sysstr(reversedpatch))
1513 diff --git a/folder1/g b/folder1/g
1513 diff --git a/folder1/g b/folder1/g
1514 --- a/folder1/g
1514 --- a/folder1/g
1515 +++ b/folder1/g
1515 +++ b/folder1/g
1516 @@ -1,4 +1,3 @@
1516 @@ -1,4 +1,3 @@
1517 -firstline
1517 -firstline
1518 c
1518 c
1519 1
1519 1
1520 2
1520 2
1521 @@ -2,6 +1,6 @@
1521 @@ -2,6 +1,6 @@
1522 c
1522 c
1523 1
1523 1
1524 2
1524 2
1525 - 3
1525 - 3
1526 +4
1526 +4
1527 5
1527 5
1528 d
1528 d
1529 @@ -6,3 +5,2 @@
1529 @@ -6,3 +5,2 @@
1530 5
1530 5
1531 d
1531 d
1532 -lastline
1532 -lastline
1533
1533
1534 '''
1534 '''
1535
1535
1536 newhunks = []
1536 newhunks = []
1537 for c in hunks:
1537 for c in hunks:
1538 if util.safehasattr(c, 'reversehunk'):
1538 if util.safehasattr(c, 'reversehunk'):
1539 c = c.reversehunk()
1539 c = c.reversehunk()
1540 newhunks.append(c)
1540 newhunks.append(c)
1541 return newhunks
1541 return newhunks
1542
1542
1543 def parsepatch(originalchunks, maxcontext=None):
1543 def parsepatch(originalchunks, maxcontext=None):
1544 """patch -> [] of headers -> [] of hunks
1544 """patch -> [] of headers -> [] of hunks
1545
1545
1546 If maxcontext is not None, trim context lines if necessary.
1546 If maxcontext is not None, trim context lines if necessary.
1547
1547
1548 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1548 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1549 ... --- a/folder1/g
1549 ... --- a/folder1/g
1550 ... +++ b/folder1/g
1550 ... +++ b/folder1/g
1551 ... @@ -1,8 +1,10 @@
1551 ... @@ -1,8 +1,10 @@
1552 ... 1
1552 ... 1
1553 ... 2
1553 ... 2
1554 ... -3
1554 ... -3
1555 ... 4
1555 ... 4
1556 ... 5
1556 ... 5
1557 ... 6
1557 ... 6
1558 ... +6.1
1558 ... +6.1
1559 ... +6.2
1559 ... +6.2
1560 ... 7
1560 ... 7
1561 ... 8
1561 ... 8
1562 ... +9'''
1562 ... +9'''
1563 >>> out = util.stringio()
1563 >>> out = util.stringio()
1564 >>> headers = parsepatch([rawpatch], maxcontext=1)
1564 >>> headers = parsepatch([rawpatch], maxcontext=1)
1565 >>> for header in headers:
1565 >>> for header in headers:
1566 ... header.write(out)
1566 ... header.write(out)
1567 ... for hunk in header.hunks:
1567 ... for hunk in header.hunks:
1568 ... hunk.write(out)
1568 ... hunk.write(out)
1569 >>> print(pycompat.sysstr(out.getvalue()))
1569 >>> print(pycompat.sysstr(out.getvalue()))
1570 diff --git a/folder1/g b/folder1/g
1570 diff --git a/folder1/g b/folder1/g
1571 --- a/folder1/g
1571 --- a/folder1/g
1572 +++ b/folder1/g
1572 +++ b/folder1/g
1573 @@ -2,3 +2,2 @@
1573 @@ -2,3 +2,2 @@
1574 2
1574 2
1575 -3
1575 -3
1576 4
1576 4
1577 @@ -6,2 +5,4 @@
1577 @@ -6,2 +5,4 @@
1578 6
1578 6
1579 +6.1
1579 +6.1
1580 +6.2
1580 +6.2
1581 7
1581 7
1582 @@ -8,1 +9,2 @@
1582 @@ -8,1 +9,2 @@
1583 8
1583 8
1584 +9
1584 +9
1585 """
1585 """
1586 class parser(object):
1586 class parser(object):
1587 """patch parsing state machine"""
1587 """patch parsing state machine"""
1588 def __init__(self):
1588 def __init__(self):
1589 self.fromline = 0
1589 self.fromline = 0
1590 self.toline = 0
1590 self.toline = 0
1591 self.proc = ''
1591 self.proc = ''
1592 self.header = None
1592 self.header = None
1593 self.context = []
1593 self.context = []
1594 self.before = []
1594 self.before = []
1595 self.hunk = []
1595 self.hunk = []
1596 self.headers = []
1596 self.headers = []
1597
1597
1598 def addrange(self, limits):
1598 def addrange(self, limits):
1599 fromstart, fromend, tostart, toend, proc = limits
1599 fromstart, fromend, tostart, toend, proc = limits
1600 self.fromline = int(fromstart)
1600 self.fromline = int(fromstart)
1601 self.toline = int(tostart)
1601 self.toline = int(tostart)
1602 self.proc = proc
1602 self.proc = proc
1603
1603
1604 def addcontext(self, context):
1604 def addcontext(self, context):
1605 if self.hunk:
1605 if self.hunk:
1606 h = recordhunk(self.header, self.fromline, self.toline,
1606 h = recordhunk(self.header, self.fromline, self.toline,
1607 self.proc, self.before, self.hunk, context, maxcontext)
1607 self.proc, self.before, self.hunk, context, maxcontext)
1608 self.header.hunks.append(h)
1608 self.header.hunks.append(h)
1609 self.fromline += len(self.before) + h.removed
1609 self.fromline += len(self.before) + h.removed
1610 self.toline += len(self.before) + h.added
1610 self.toline += len(self.before) + h.added
1611 self.before = []
1611 self.before = []
1612 self.hunk = []
1612 self.hunk = []
1613 self.context = context
1613 self.context = context
1614
1614
1615 def addhunk(self, hunk):
1615 def addhunk(self, hunk):
1616 if self.context:
1616 if self.context:
1617 self.before = self.context
1617 self.before = self.context
1618 self.context = []
1618 self.context = []
1619 self.hunk = hunk
1619 self.hunk = hunk
1620
1620
1621 def newfile(self, hdr):
1621 def newfile(self, hdr):
1622 self.addcontext([])
1622 self.addcontext([])
1623 h = header(hdr)
1623 h = header(hdr)
1624 self.headers.append(h)
1624 self.headers.append(h)
1625 self.header = h
1625 self.header = h
1626
1626
1627 def addother(self, line):
1627 def addother(self, line):
1628 pass # 'other' lines are ignored
1628 pass # 'other' lines are ignored
1629
1629
1630 def finished(self):
1630 def finished(self):
1631 self.addcontext([])
1631 self.addcontext([])
1632 return self.headers
1632 return self.headers
1633
1633
1634 transitions = {
1634 transitions = {
1635 'file': {'context': addcontext,
1635 'file': {'context': addcontext,
1636 'file': newfile,
1636 'file': newfile,
1637 'hunk': addhunk,
1637 'hunk': addhunk,
1638 'range': addrange},
1638 'range': addrange},
1639 'context': {'file': newfile,
1639 'context': {'file': newfile,
1640 'hunk': addhunk,
1640 'hunk': addhunk,
1641 'range': addrange,
1641 'range': addrange,
1642 'other': addother},
1642 'other': addother},
1643 'hunk': {'context': addcontext,
1643 'hunk': {'context': addcontext,
1644 'file': newfile,
1644 'file': newfile,
1645 'range': addrange},
1645 'range': addrange},
1646 'range': {'context': addcontext,
1646 'range': {'context': addcontext,
1647 'hunk': addhunk},
1647 'hunk': addhunk},
1648 'other': {'other': addother},
1648 'other': {'other': addother},
1649 }
1649 }
1650
1650
1651 p = parser()
1651 p = parser()
1652 fp = stringio()
1652 fp = stringio()
1653 fp.write(''.join(originalchunks))
1653 fp.write(''.join(originalchunks))
1654 fp.seek(0)
1654 fp.seek(0)
1655
1655
1656 state = 'context'
1656 state = 'context'
1657 for newstate, data in scanpatch(fp):
1657 for newstate, data in scanpatch(fp):
1658 try:
1658 try:
1659 p.transitions[state][newstate](p, data)
1659 p.transitions[state][newstate](p, data)
1660 except KeyError:
1660 except KeyError:
1661 raise PatchError('unhandled transition: %s -> %s' %
1661 raise PatchError('unhandled transition: %s -> %s' %
1662 (state, newstate))
1662 (state, newstate))
1663 state = newstate
1663 state = newstate
1664 del fp
1664 del fp
1665 return p.finished()
1665 return p.finished()
1666
1666
1667 def pathtransform(path, strip, prefix):
1667 def pathtransform(path, strip, prefix):
1668 '''turn a path from a patch into a path suitable for the repository
1668 '''turn a path from a patch into a path suitable for the repository
1669
1669
1670 prefix, if not empty, is expected to be normalized with a / at the end.
1670 prefix, if not empty, is expected to be normalized with a / at the end.
1671
1671
1672 Returns (stripped components, path in repository).
1672 Returns (stripped components, path in repository).
1673
1673
1674 >>> pathtransform(b'a/b/c', 0, b'')
1674 >>> pathtransform(b'a/b/c', 0, b'')
1675 ('', 'a/b/c')
1675 ('', 'a/b/c')
1676 >>> pathtransform(b' a/b/c ', 0, b'')
1676 >>> pathtransform(b' a/b/c ', 0, b'')
1677 ('', ' a/b/c')
1677 ('', ' a/b/c')
1678 >>> pathtransform(b' a/b/c ', 2, b'')
1678 >>> pathtransform(b' a/b/c ', 2, b'')
1679 ('a/b/', 'c')
1679 ('a/b/', 'c')
1680 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1680 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1681 ('', 'd/e/a/b/c')
1681 ('', 'd/e/a/b/c')
1682 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1682 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1683 ('a//b/', 'd/e/c')
1683 ('a//b/', 'd/e/c')
1684 >>> pathtransform(b'a/b/c', 3, b'')
1684 >>> pathtransform(b'a/b/c', 3, b'')
1685 Traceback (most recent call last):
1685 Traceback (most recent call last):
1686 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1686 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1687 '''
1687 '''
1688 pathlen = len(path)
1688 pathlen = len(path)
1689 i = 0
1689 i = 0
1690 if strip == 0:
1690 if strip == 0:
1691 return '', prefix + path.rstrip()
1691 return '', prefix + path.rstrip()
1692 count = strip
1692 count = strip
1693 while count > 0:
1693 while count > 0:
1694 i = path.find('/', i)
1694 i = path.find('/', i)
1695 if i == -1:
1695 if i == -1:
1696 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1696 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1697 (count, strip, path))
1697 (count, strip, path))
1698 i += 1
1698 i += 1
1699 # consume '//' in the path
1699 # consume '//' in the path
1700 while i < pathlen - 1 and path[i:i + 1] == '/':
1700 while i < pathlen - 1 and path[i:i + 1] == '/':
1701 i += 1
1701 i += 1
1702 count -= 1
1702 count -= 1
1703 return path[:i].lstrip(), prefix + path[i:].rstrip()
1703 return path[:i].lstrip(), prefix + path[i:].rstrip()
1704
1704
1705 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1705 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1706 nulla = afile_orig == "/dev/null"
1706 nulla = afile_orig == "/dev/null"
1707 nullb = bfile_orig == "/dev/null"
1707 nullb = bfile_orig == "/dev/null"
1708 create = nulla and hunk.starta == 0 and hunk.lena == 0
1708 create = nulla and hunk.starta == 0 and hunk.lena == 0
1709 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1709 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1710 abase, afile = pathtransform(afile_orig, strip, prefix)
1710 abase, afile = pathtransform(afile_orig, strip, prefix)
1711 gooda = not nulla and backend.exists(afile)
1711 gooda = not nulla and backend.exists(afile)
1712 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1712 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1713 if afile == bfile:
1713 if afile == bfile:
1714 goodb = gooda
1714 goodb = gooda
1715 else:
1715 else:
1716 goodb = not nullb and backend.exists(bfile)
1716 goodb = not nullb and backend.exists(bfile)
1717 missing = not goodb and not gooda and not create
1717 missing = not goodb and not gooda and not create
1718
1718
1719 # some diff programs apparently produce patches where the afile is
1719 # some diff programs apparently produce patches where the afile is
1720 # not /dev/null, but afile starts with bfile
1720 # not /dev/null, but afile starts with bfile
1721 abasedir = afile[:afile.rfind('/') + 1]
1721 abasedir = afile[:afile.rfind('/') + 1]
1722 bbasedir = bfile[:bfile.rfind('/') + 1]
1722 bbasedir = bfile[:bfile.rfind('/') + 1]
1723 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1723 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1724 and hunk.starta == 0 and hunk.lena == 0):
1724 and hunk.starta == 0 and hunk.lena == 0):
1725 create = True
1725 create = True
1726 missing = False
1726 missing = False
1727
1727
1728 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1728 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1729 # diff is between a file and its backup. In this case, the original
1729 # diff is between a file and its backup. In this case, the original
1730 # file should be patched (see original mpatch code).
1730 # file should be patched (see original mpatch code).
1731 isbackup = (abase == bbase and bfile.startswith(afile))
1731 isbackup = (abase == bbase and bfile.startswith(afile))
1732 fname = None
1732 fname = None
1733 if not missing:
1733 if not missing:
1734 if gooda and goodb:
1734 if gooda and goodb:
1735 if isbackup:
1735 if isbackup:
1736 fname = afile
1736 fname = afile
1737 else:
1737 else:
1738 fname = bfile
1738 fname = bfile
1739 elif gooda:
1739 elif gooda:
1740 fname = afile
1740 fname = afile
1741
1741
1742 if not fname:
1742 if not fname:
1743 if not nullb:
1743 if not nullb:
1744 if isbackup:
1744 if isbackup:
1745 fname = afile
1745 fname = afile
1746 else:
1746 else:
1747 fname = bfile
1747 fname = bfile
1748 elif not nulla:
1748 elif not nulla:
1749 fname = afile
1749 fname = afile
1750 else:
1750 else:
1751 raise PatchError(_("undefined source and destination files"))
1751 raise PatchError(_("undefined source and destination files"))
1752
1752
1753 gp = patchmeta(fname)
1753 gp = patchmeta(fname)
1754 if create:
1754 if create:
1755 gp.op = 'ADD'
1755 gp.op = 'ADD'
1756 elif remove:
1756 elif remove:
1757 gp.op = 'DELETE'
1757 gp.op = 'DELETE'
1758 return gp
1758 return gp
1759
1759
1760 def scanpatch(fp):
1760 def scanpatch(fp):
1761 """like patch.iterhunks, but yield different events
1761 """like patch.iterhunks, but yield different events
1762
1762
1763 - ('file', [header_lines + fromfile + tofile])
1763 - ('file', [header_lines + fromfile + tofile])
1764 - ('context', [context_lines])
1764 - ('context', [context_lines])
1765 - ('hunk', [hunk_lines])
1765 - ('hunk', [hunk_lines])
1766 - ('range', (-start,len, +start,len, proc))
1766 - ('range', (-start,len, +start,len, proc))
1767 """
1767 """
1768 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1768 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1769 lr = linereader(fp)
1769 lr = linereader(fp)
1770
1770
1771 def scanwhile(first, p):
1771 def scanwhile(first, p):
1772 """scan lr while predicate holds"""
1772 """scan lr while predicate holds"""
1773 lines = [first]
1773 lines = [first]
1774 for line in iter(lr.readline, ''):
1774 for line in iter(lr.readline, ''):
1775 if p(line):
1775 if p(line):
1776 lines.append(line)
1776 lines.append(line)
1777 else:
1777 else:
1778 lr.push(line)
1778 lr.push(line)
1779 break
1779 break
1780 return lines
1780 return lines
1781
1781
1782 for line in iter(lr.readline, ''):
1782 for line in iter(lr.readline, ''):
1783 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1783 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1784 def notheader(line):
1784 def notheader(line):
1785 s = line.split(None, 1)
1785 s = line.split(None, 1)
1786 return not s or s[0] not in ('---', 'diff')
1786 return not s or s[0] not in ('---', 'diff')
1787 header = scanwhile(line, notheader)
1787 header = scanwhile(line, notheader)
1788 fromfile = lr.readline()
1788 fromfile = lr.readline()
1789 if fromfile.startswith('---'):
1789 if fromfile.startswith('---'):
1790 tofile = lr.readline()
1790 tofile = lr.readline()
1791 header += [fromfile, tofile]
1791 header += [fromfile, tofile]
1792 else:
1792 else:
1793 lr.push(fromfile)
1793 lr.push(fromfile)
1794 yield 'file', header
1794 yield 'file', header
1795 elif line[0:1] == ' ':
1795 elif line[0:1] == ' ':
1796 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1796 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1797 elif line[0] in '-+':
1797 elif line[0] in '-+':
1798 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1798 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1799 else:
1799 else:
1800 m = lines_re.match(line)
1800 m = lines_re.match(line)
1801 if m:
1801 if m:
1802 yield 'range', m.groups()
1802 yield 'range', m.groups()
1803 else:
1803 else:
1804 yield 'other', line
1804 yield 'other', line
1805
1805
1806 def scangitpatch(lr, firstline):
1806 def scangitpatch(lr, firstline):
1807 """
1807 """
1808 Git patches can emit:
1808 Git patches can emit:
1809 - rename a to b
1809 - rename a to b
1810 - change b
1810 - change b
1811 - copy a to c
1811 - copy a to c
1812 - change c
1812 - change c
1813
1813
1814 We cannot apply this sequence as-is, the renamed 'a' could not be
1814 We cannot apply this sequence as-is, the renamed 'a' could not be
1815 found for it would have been renamed already. And we cannot copy
1815 found for it would have been renamed already. And we cannot copy
1816 from 'b' instead because 'b' would have been changed already. So
1816 from 'b' instead because 'b' would have been changed already. So
1817 we scan the git patch for copy and rename commands so we can
1817 we scan the git patch for copy and rename commands so we can
1818 perform the copies ahead of time.
1818 perform the copies ahead of time.
1819 """
1819 """
1820 pos = 0
1820 pos = 0
1821 try:
1821 try:
1822 pos = lr.fp.tell()
1822 pos = lr.fp.tell()
1823 fp = lr.fp
1823 fp = lr.fp
1824 except IOError:
1824 except IOError:
1825 fp = stringio(lr.fp.read())
1825 fp = stringio(lr.fp.read())
1826 gitlr = linereader(fp)
1826 gitlr = linereader(fp)
1827 gitlr.push(firstline)
1827 gitlr.push(firstline)
1828 gitpatches = readgitpatch(gitlr)
1828 gitpatches = readgitpatch(gitlr)
1829 fp.seek(pos)
1829 fp.seek(pos)
1830 return gitpatches
1830 return gitpatches
1831
1831
1832 def iterhunks(fp):
1832 def iterhunks(fp):
1833 """Read a patch and yield the following events:
1833 """Read a patch and yield the following events:
1834 - ("file", afile, bfile, firsthunk): select a new target file.
1834 - ("file", afile, bfile, firsthunk): select a new target file.
1835 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1835 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1836 "file" event.
1836 "file" event.
1837 - ("git", gitchanges): current diff is in git format, gitchanges
1837 - ("git", gitchanges): current diff is in git format, gitchanges
1838 maps filenames to gitpatch records. Unique event.
1838 maps filenames to gitpatch records. Unique event.
1839 """
1839 """
1840 afile = ""
1840 afile = ""
1841 bfile = ""
1841 bfile = ""
1842 state = None
1842 state = None
1843 hunknum = 0
1843 hunknum = 0
1844 emitfile = newfile = False
1844 emitfile = newfile = False
1845 gitpatches = None
1845 gitpatches = None
1846
1846
1847 # our states
1847 # our states
1848 BFILE = 1
1848 BFILE = 1
1849 context = None
1849 context = None
1850 lr = linereader(fp)
1850 lr = linereader(fp)
1851
1851
1852 for x in iter(lr.readline, ''):
1852 for x in iter(lr.readline, ''):
1853 if state == BFILE and (
1853 if state == BFILE and (
1854 (not context and x[0] == '@')
1854 (not context and x[0] == '@')
1855 or (context is not False and x.startswith('***************'))
1855 or (context is not False and x.startswith('***************'))
1856 or x.startswith('GIT binary patch')):
1856 or x.startswith('GIT binary patch')):
1857 gp = None
1857 gp = None
1858 if (gitpatches and
1858 if (gitpatches and
1859 gitpatches[-1].ispatching(afile, bfile)):
1859 gitpatches[-1].ispatching(afile, bfile)):
1860 gp = gitpatches.pop()
1860 gp = gitpatches.pop()
1861 if x.startswith('GIT binary patch'):
1861 if x.startswith('GIT binary patch'):
1862 h = binhunk(lr, gp.path)
1862 h = binhunk(lr, gp.path)
1863 else:
1863 else:
1864 if context is None and x.startswith('***************'):
1864 if context is None and x.startswith('***************'):
1865 context = True
1865 context = True
1866 h = hunk(x, hunknum + 1, lr, context)
1866 h = hunk(x, hunknum + 1, lr, context)
1867 hunknum += 1
1867 hunknum += 1
1868 if emitfile:
1868 if emitfile:
1869 emitfile = False
1869 emitfile = False
1870 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1870 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1871 yield 'hunk', h
1871 yield 'hunk', h
1872 elif x.startswith('diff --git a/'):
1872 elif x.startswith('diff --git a/'):
1873 m = gitre.match(x.rstrip(' \r\n'))
1873 m = gitre.match(x.rstrip(' \r\n'))
1874 if not m:
1874 if not m:
1875 continue
1875 continue
1876 if gitpatches is None:
1876 if gitpatches is None:
1877 # scan whole input for git metadata
1877 # scan whole input for git metadata
1878 gitpatches = scangitpatch(lr, x)
1878 gitpatches = scangitpatch(lr, x)
1879 yield 'git', [g.copy() for g in gitpatches
1879 yield 'git', [g.copy() for g in gitpatches
1880 if g.op in ('COPY', 'RENAME')]
1880 if g.op in ('COPY', 'RENAME')]
1881 gitpatches.reverse()
1881 gitpatches.reverse()
1882 afile = 'a/' + m.group(1)
1882 afile = 'a/' + m.group(1)
1883 bfile = 'b/' + m.group(2)
1883 bfile = 'b/' + m.group(2)
1884 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1884 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1885 gp = gitpatches.pop()
1885 gp = gitpatches.pop()
1886 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1886 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1887 if not gitpatches:
1887 if not gitpatches:
1888 raise PatchError(_('failed to synchronize metadata for "%s"')
1888 raise PatchError(_('failed to synchronize metadata for "%s"')
1889 % afile[2:])
1889 % afile[2:])
1890 gp = gitpatches[-1]
1890 gp = gitpatches[-1]
1891 newfile = True
1891 newfile = True
1892 elif x.startswith('---'):
1892 elif x.startswith('---'):
1893 # check for a unified diff
1893 # check for a unified diff
1894 l2 = lr.readline()
1894 l2 = lr.readline()
1895 if not l2.startswith('+++'):
1895 if not l2.startswith('+++'):
1896 lr.push(l2)
1896 lr.push(l2)
1897 continue
1897 continue
1898 newfile = True
1898 newfile = True
1899 context = False
1899 context = False
1900 afile = parsefilename(x)
1900 afile = parsefilename(x)
1901 bfile = parsefilename(l2)
1901 bfile = parsefilename(l2)
1902 elif x.startswith('***'):
1902 elif x.startswith('***'):
1903 # check for a context diff
1903 # check for a context diff
1904 l2 = lr.readline()
1904 l2 = lr.readline()
1905 if not l2.startswith('---'):
1905 if not l2.startswith('---'):
1906 lr.push(l2)
1906 lr.push(l2)
1907 continue
1907 continue
1908 l3 = lr.readline()
1908 l3 = lr.readline()
1909 lr.push(l3)
1909 lr.push(l3)
1910 if not l3.startswith("***************"):
1910 if not l3.startswith("***************"):
1911 lr.push(l2)
1911 lr.push(l2)
1912 continue
1912 continue
1913 newfile = True
1913 newfile = True
1914 context = True
1914 context = True
1915 afile = parsefilename(x)
1915 afile = parsefilename(x)
1916 bfile = parsefilename(l2)
1916 bfile = parsefilename(l2)
1917
1917
1918 if newfile:
1918 if newfile:
1919 newfile = False
1919 newfile = False
1920 emitfile = True
1920 emitfile = True
1921 state = BFILE
1921 state = BFILE
1922 hunknum = 0
1922 hunknum = 0
1923
1923
1924 while gitpatches:
1924 while gitpatches:
1925 gp = gitpatches.pop()
1925 gp = gitpatches.pop()
1926 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1926 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1927
1927
1928 def applybindelta(binchunk, data):
1928 def applybindelta(binchunk, data):
1929 """Apply a binary delta hunk
1929 """Apply a binary delta hunk
1930 The algorithm used is the algorithm from git's patch-delta.c
1930 The algorithm used is the algorithm from git's patch-delta.c
1931 """
1931 """
1932 def deltahead(binchunk):
1932 def deltahead(binchunk):
1933 i = 0
1933 i = 0
1934 for c in binchunk:
1934 for c in binchunk:
1935 i += 1
1935 i += 1
1936 if not (ord(c) & 0x80):
1936 if not (ord(c) & 0x80):
1937 return i
1937 return i
1938 return i
1938 return i
1939 out = ""
1939 out = ""
1940 s = deltahead(binchunk)
1940 s = deltahead(binchunk)
1941 binchunk = binchunk[s:]
1941 binchunk = binchunk[s:]
1942 s = deltahead(binchunk)
1942 s = deltahead(binchunk)
1943 binchunk = binchunk[s:]
1943 binchunk = binchunk[s:]
1944 i = 0
1944 i = 0
1945 while i < len(binchunk):
1945 while i < len(binchunk):
1946 cmd = ord(binchunk[i])
1946 cmd = ord(binchunk[i])
1947 i += 1
1947 i += 1
1948 if (cmd & 0x80):
1948 if (cmd & 0x80):
1949 offset = 0
1949 offset = 0
1950 size = 0
1950 size = 0
1951 if (cmd & 0x01):
1951 if (cmd & 0x01):
1952 offset = ord(binchunk[i])
1952 offset = ord(binchunk[i])
1953 i += 1
1953 i += 1
1954 if (cmd & 0x02):
1954 if (cmd & 0x02):
1955 offset |= ord(binchunk[i]) << 8
1955 offset |= ord(binchunk[i]) << 8
1956 i += 1
1956 i += 1
1957 if (cmd & 0x04):
1957 if (cmd & 0x04):
1958 offset |= ord(binchunk[i]) << 16
1958 offset |= ord(binchunk[i]) << 16
1959 i += 1
1959 i += 1
1960 if (cmd & 0x08):
1960 if (cmd & 0x08):
1961 offset |= ord(binchunk[i]) << 24
1961 offset |= ord(binchunk[i]) << 24
1962 i += 1
1962 i += 1
1963 if (cmd & 0x10):
1963 if (cmd & 0x10):
1964 size = ord(binchunk[i])
1964 size = ord(binchunk[i])
1965 i += 1
1965 i += 1
1966 if (cmd & 0x20):
1966 if (cmd & 0x20):
1967 size |= ord(binchunk[i]) << 8
1967 size |= ord(binchunk[i]) << 8
1968 i += 1
1968 i += 1
1969 if (cmd & 0x40):
1969 if (cmd & 0x40):
1970 size |= ord(binchunk[i]) << 16
1970 size |= ord(binchunk[i]) << 16
1971 i += 1
1971 i += 1
1972 if size == 0:
1972 if size == 0:
1973 size = 0x10000
1973 size = 0x10000
1974 offset_end = offset + size
1974 offset_end = offset + size
1975 out += data[offset:offset_end]
1975 out += data[offset:offset_end]
1976 elif cmd != 0:
1976 elif cmd != 0:
1977 offset_end = i + cmd
1977 offset_end = i + cmd
1978 out += binchunk[i:offset_end]
1978 out += binchunk[i:offset_end]
1979 i += cmd
1979 i += cmd
1980 else:
1980 else:
1981 raise PatchError(_('unexpected delta opcode 0'))
1981 raise PatchError(_('unexpected delta opcode 0'))
1982 return out
1982 return out
1983
1983
1984 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1984 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1985 """Reads a patch from fp and tries to apply it.
1985 """Reads a patch from fp and tries to apply it.
1986
1986
1987 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1987 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1988 there was any fuzz.
1988 there was any fuzz.
1989
1989
1990 If 'eolmode' is 'strict', the patch content and patched file are
1990 If 'eolmode' is 'strict', the patch content and patched file are
1991 read in binary mode. Otherwise, line endings are ignored when
1991 read in binary mode. Otherwise, line endings are ignored when
1992 patching then normalized according to 'eolmode'.
1992 patching then normalized according to 'eolmode'.
1993 """
1993 """
1994 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1994 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1995 prefix=prefix, eolmode=eolmode)
1995 prefix=prefix, eolmode=eolmode)
1996
1996
1997 def _canonprefix(repo, prefix):
1997 def _canonprefix(repo, prefix):
1998 if prefix:
1998 if prefix:
1999 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1999 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2000 if prefix != '':
2000 if prefix != '':
2001 prefix += '/'
2001 prefix += '/'
2002 return prefix
2002 return prefix
2003
2003
2004 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2004 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2005 eolmode='strict'):
2005 eolmode='strict'):
2006 prefix = _canonprefix(backend.repo, prefix)
2006 prefix = _canonprefix(backend.repo, prefix)
2007 def pstrip(p):
2007 def pstrip(p):
2008 return pathtransform(p, strip - 1, prefix)[1]
2008 return pathtransform(p, strip - 1, prefix)[1]
2009
2009
2010 rejects = 0
2010 rejects = 0
2011 err = 0
2011 err = 0
2012 current_file = None
2012 current_file = None
2013
2013
2014 for state, values in iterhunks(fp):
2014 for state, values in iterhunks(fp):
2015 if state == 'hunk':
2015 if state == 'hunk':
2016 if not current_file:
2016 if not current_file:
2017 continue
2017 continue
2018 ret = current_file.apply(values)
2018 ret = current_file.apply(values)
2019 if ret > 0:
2019 if ret > 0:
2020 err = 1
2020 err = 1
2021 elif state == 'file':
2021 elif state == 'file':
2022 if current_file:
2022 if current_file:
2023 rejects += current_file.close()
2023 rejects += current_file.close()
2024 current_file = None
2024 current_file = None
2025 afile, bfile, first_hunk, gp = values
2025 afile, bfile, first_hunk, gp = values
2026 if gp:
2026 if gp:
2027 gp.path = pstrip(gp.path)
2027 gp.path = pstrip(gp.path)
2028 if gp.oldpath:
2028 if gp.oldpath:
2029 gp.oldpath = pstrip(gp.oldpath)
2029 gp.oldpath = pstrip(gp.oldpath)
2030 else:
2030 else:
2031 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2031 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2032 prefix)
2032 prefix)
2033 if gp.op == 'RENAME':
2033 if gp.op == 'RENAME':
2034 backend.unlink(gp.oldpath)
2034 backend.unlink(gp.oldpath)
2035 if not first_hunk:
2035 if not first_hunk:
2036 if gp.op == 'DELETE':
2036 if gp.op == 'DELETE':
2037 backend.unlink(gp.path)
2037 backend.unlink(gp.path)
2038 continue
2038 continue
2039 data, mode = None, None
2039 data, mode = None, None
2040 if gp.op in ('RENAME', 'COPY'):
2040 if gp.op in ('RENAME', 'COPY'):
2041 data, mode = store.getfile(gp.oldpath)[:2]
2041 data, mode = store.getfile(gp.oldpath)[:2]
2042 if data is None:
2042 if data is None:
2043 # This means that the old path does not exist
2043 # This means that the old path does not exist
2044 raise PatchError(_("source file '%s' does not exist")
2044 raise PatchError(_("source file '%s' does not exist")
2045 % gp.oldpath)
2045 % gp.oldpath)
2046 if gp.mode:
2046 if gp.mode:
2047 mode = gp.mode
2047 mode = gp.mode
2048 if gp.op == 'ADD':
2048 if gp.op == 'ADD':
2049 # Added files without content have no hunk and
2049 # Added files without content have no hunk and
2050 # must be created
2050 # must be created
2051 data = ''
2051 data = ''
2052 if data or mode:
2052 if data or mode:
2053 if (gp.op in ('ADD', 'RENAME', 'COPY')
2053 if (gp.op in ('ADD', 'RENAME', 'COPY')
2054 and backend.exists(gp.path)):
2054 and backend.exists(gp.path)):
2055 raise PatchError(_("cannot create %s: destination "
2055 raise PatchError(_("cannot create %s: destination "
2056 "already exists") % gp.path)
2056 "already exists") % gp.path)
2057 backend.setfile(gp.path, data, mode, gp.oldpath)
2057 backend.setfile(gp.path, data, mode, gp.oldpath)
2058 continue
2058 continue
2059 try:
2059 try:
2060 current_file = patcher(ui, gp, backend, store,
2060 current_file = patcher(ui, gp, backend, store,
2061 eolmode=eolmode)
2061 eolmode=eolmode)
2062 except PatchError as inst:
2062 except PatchError as inst:
2063 ui.warn(str(inst) + '\n')
2063 ui.warn(str(inst) + '\n')
2064 current_file = None
2064 current_file = None
2065 rejects += 1
2065 rejects += 1
2066 continue
2066 continue
2067 elif state == 'git':
2067 elif state == 'git':
2068 for gp in values:
2068 for gp in values:
2069 path = pstrip(gp.oldpath)
2069 path = pstrip(gp.oldpath)
2070 data, mode = backend.getfile(path)
2070 data, mode = backend.getfile(path)
2071 if data is None:
2071 if data is None:
2072 # The error ignored here will trigger a getfile()
2072 # The error ignored here will trigger a getfile()
2073 # error in a place more appropriate for error
2073 # error in a place more appropriate for error
2074 # handling, and will not interrupt the patching
2074 # handling, and will not interrupt the patching
2075 # process.
2075 # process.
2076 pass
2076 pass
2077 else:
2077 else:
2078 store.setfile(path, data, mode)
2078 store.setfile(path, data, mode)
2079 else:
2079 else:
2080 raise error.Abort(_('unsupported parser state: %s') % state)
2080 raise error.Abort(_('unsupported parser state: %s') % state)
2081
2081
2082 if current_file:
2082 if current_file:
2083 rejects += current_file.close()
2083 rejects += current_file.close()
2084
2084
2085 if rejects:
2085 if rejects:
2086 return -1
2086 return -1
2087 return err
2087 return err
2088
2088
2089 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2089 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2090 similarity):
2090 similarity):
2091 """use <patcher> to apply <patchname> to the working directory.
2091 """use <patcher> to apply <patchname> to the working directory.
2092 returns whether patch was applied with fuzz factor."""
2092 returns whether patch was applied with fuzz factor."""
2093
2093
2094 fuzz = False
2094 fuzz = False
2095 args = []
2095 args = []
2096 cwd = repo.root
2096 cwd = repo.root
2097 if cwd:
2097 if cwd:
2098 args.append('-d %s' % util.shellquote(cwd))
2098 args.append('-d %s' % util.shellquote(cwd))
2099 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2099 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2100 util.shellquote(patchname)))
2100 util.shellquote(patchname)))
2101 try:
2101 try:
2102 for line in util.iterfile(fp):
2102 for line in util.iterfile(fp):
2103 line = line.rstrip()
2103 line = line.rstrip()
2104 ui.note(line + '\n')
2104 ui.note(line + '\n')
2105 if line.startswith('patching file '):
2105 if line.startswith('patching file '):
2106 pf = util.parsepatchoutput(line)
2106 pf = util.parsepatchoutput(line)
2107 printed_file = False
2107 printed_file = False
2108 files.add(pf)
2108 files.add(pf)
2109 elif line.find('with fuzz') >= 0:
2109 elif line.find('with fuzz') >= 0:
2110 fuzz = True
2110 fuzz = True
2111 if not printed_file:
2111 if not printed_file:
2112 ui.warn(pf + '\n')
2112 ui.warn(pf + '\n')
2113 printed_file = True
2113 printed_file = True
2114 ui.warn(line + '\n')
2114 ui.warn(line + '\n')
2115 elif line.find('saving rejects to file') >= 0:
2115 elif line.find('saving rejects to file') >= 0:
2116 ui.warn(line + '\n')
2116 ui.warn(line + '\n')
2117 elif line.find('FAILED') >= 0:
2117 elif line.find('FAILED') >= 0:
2118 if not printed_file:
2118 if not printed_file:
2119 ui.warn(pf + '\n')
2119 ui.warn(pf + '\n')
2120 printed_file = True
2120 printed_file = True
2121 ui.warn(line + '\n')
2121 ui.warn(line + '\n')
2122 finally:
2122 finally:
2123 if files:
2123 if files:
2124 scmutil.marktouched(repo, files, similarity)
2124 scmutil.marktouched(repo, files, similarity)
2125 code = fp.close()
2125 code = fp.close()
2126 if code:
2126 if code:
2127 raise PatchError(_("patch command failed: %s") %
2127 raise PatchError(_("patch command failed: %s") %
2128 util.explainexit(code)[0])
2128 util.explainexit(code)[0])
2129 return fuzz
2129 return fuzz
2130
2130
2131 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2131 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2132 eolmode='strict'):
2132 eolmode='strict'):
2133 if files is None:
2133 if files is None:
2134 files = set()
2134 files = set()
2135 if eolmode is None:
2135 if eolmode is None:
2136 eolmode = ui.config('patch', 'eol')
2136 eolmode = ui.config('patch', 'eol')
2137 if eolmode.lower() not in eolmodes:
2137 if eolmode.lower() not in eolmodes:
2138 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2138 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2139 eolmode = eolmode.lower()
2139 eolmode = eolmode.lower()
2140
2140
2141 store = filestore()
2141 store = filestore()
2142 try:
2142 try:
2143 fp = open(patchobj, 'rb')
2143 fp = open(patchobj, 'rb')
2144 except TypeError:
2144 except TypeError:
2145 fp = patchobj
2145 fp = patchobj
2146 try:
2146 try:
2147 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2147 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2148 eolmode=eolmode)
2148 eolmode=eolmode)
2149 finally:
2149 finally:
2150 if fp != patchobj:
2150 if fp != patchobj:
2151 fp.close()
2151 fp.close()
2152 files.update(backend.close())
2152 files.update(backend.close())
2153 store.close()
2153 store.close()
2154 if ret < 0:
2154 if ret < 0:
2155 raise PatchError(_('patch failed to apply'))
2155 raise PatchError(_('patch failed to apply'))
2156 return ret > 0
2156 return ret > 0
2157
2157
2158 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2158 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2159 eolmode='strict', similarity=0):
2159 eolmode='strict', similarity=0):
2160 """use builtin patch to apply <patchobj> to the working directory.
2160 """use builtin patch to apply <patchobj> to the working directory.
2161 returns whether patch was applied with fuzz factor."""
2161 returns whether patch was applied with fuzz factor."""
2162 backend = workingbackend(ui, repo, similarity)
2162 backend = workingbackend(ui, repo, similarity)
2163 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2164
2164
2165 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2165 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2166 eolmode='strict'):
2166 eolmode='strict'):
2167 backend = repobackend(ui, repo, ctx, store)
2167 backend = repobackend(ui, repo, ctx, store)
2168 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2168 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2169
2169
2170 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2170 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2171 similarity=0):
2171 similarity=0):
2172 """Apply <patchname> to the working directory.
2172 """Apply <patchname> to the working directory.
2173
2173
2174 'eolmode' specifies how end of lines should be handled. It can be:
2174 'eolmode' specifies how end of lines should be handled. It can be:
2175 - 'strict': inputs are read in binary mode, EOLs are preserved
2175 - 'strict': inputs are read in binary mode, EOLs are preserved
2176 - 'crlf': EOLs are ignored when patching and reset to CRLF
2176 - 'crlf': EOLs are ignored when patching and reset to CRLF
2177 - 'lf': EOLs are ignored when patching and reset to LF
2177 - 'lf': EOLs are ignored when patching and reset to LF
2178 - None: get it from user settings, default to 'strict'
2178 - None: get it from user settings, default to 'strict'
2179 'eolmode' is ignored when using an external patcher program.
2179 'eolmode' is ignored when using an external patcher program.
2180
2180
2181 Returns whether patch was applied with fuzz factor.
2181 Returns whether patch was applied with fuzz factor.
2182 """
2182 """
2183 patcher = ui.config('ui', 'patch')
2183 patcher = ui.config('ui', 'patch')
2184 if files is None:
2184 if files is None:
2185 files = set()
2185 files = set()
2186 if patcher:
2186 if patcher:
2187 return _externalpatch(ui, repo, patcher, patchname, strip,
2187 return _externalpatch(ui, repo, patcher, patchname, strip,
2188 files, similarity)
2188 files, similarity)
2189 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2189 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2190 similarity)
2190 similarity)
2191
2191
2192 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2192 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2193 backend = fsbackend(ui, repo.root)
2193 backend = fsbackend(ui, repo.root)
2194 prefix = _canonprefix(repo, prefix)
2194 prefix = _canonprefix(repo, prefix)
2195 with open(patchpath, 'rb') as fp:
2195 with open(patchpath, 'rb') as fp:
2196 changed = set()
2196 changed = set()
2197 for state, values in iterhunks(fp):
2197 for state, values in iterhunks(fp):
2198 if state == 'file':
2198 if state == 'file':
2199 afile, bfile, first_hunk, gp = values
2199 afile, bfile, first_hunk, gp = values
2200 if gp:
2200 if gp:
2201 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2201 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2202 if gp.oldpath:
2202 if gp.oldpath:
2203 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2203 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2204 prefix)[1]
2204 prefix)[1]
2205 else:
2205 else:
2206 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2206 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2207 prefix)
2207 prefix)
2208 changed.add(gp.path)
2208 changed.add(gp.path)
2209 if gp.op == 'RENAME':
2209 if gp.op == 'RENAME':
2210 changed.add(gp.oldpath)
2210 changed.add(gp.oldpath)
2211 elif state not in ('hunk', 'git'):
2211 elif state not in ('hunk', 'git'):
2212 raise error.Abort(_('unsupported parser state: %s') % state)
2212 raise error.Abort(_('unsupported parser state: %s') % state)
2213 return changed
2213 return changed
2214
2214
2215 class GitDiffRequired(Exception):
2215 class GitDiffRequired(Exception):
2216 pass
2216 pass
2217
2217
2218 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2218 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2219 '''return diffopts with all features supported and parsed'''
2219 '''return diffopts with all features supported and parsed'''
2220 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2220 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2221 git=True, whitespace=True, formatchanging=True)
2221 git=True, whitespace=True, formatchanging=True)
2222
2222
2223 diffopts = diffallopts
2223 diffopts = diffallopts
2224
2224
2225 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2225 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2226 whitespace=False, formatchanging=False):
2226 whitespace=False, formatchanging=False):
2227 '''return diffopts with only opted-in features parsed
2227 '''return diffopts with only opted-in features parsed
2228
2228
2229 Features:
2229 Features:
2230 - git: git-style diffs
2230 - git: git-style diffs
2231 - whitespace: whitespace options like ignoreblanklines and ignorews
2231 - whitespace: whitespace options like ignoreblanklines and ignorews
2232 - formatchanging: options that will likely break or cause correctness issues
2232 - formatchanging: options that will likely break or cause correctness issues
2233 with most diff parsers
2233 with most diff parsers
2234 '''
2234 '''
2235 def get(key, name=None, getter=ui.configbool, forceplain=None):
2235 def get(key, name=None, getter=ui.configbool, forceplain=None):
2236 if opts:
2236 if opts:
2237 v = opts.get(key)
2237 v = opts.get(key)
2238 # diffopts flags are either None-default (which is passed
2238 # diffopts flags are either None-default (which is passed
2239 # through unchanged, so we can identify unset values), or
2239 # through unchanged, so we can identify unset values), or
2240 # some other falsey default (eg --unified, which defaults
2240 # some other falsey default (eg --unified, which defaults
2241 # to an empty string). We only want to override the config
2241 # to an empty string). We only want to override the config
2242 # entries from hgrc with command line values if they
2242 # entries from hgrc with command line values if they
2243 # appear to have been set, which is any truthy value,
2243 # appear to have been set, which is any truthy value,
2244 # True, or False.
2244 # True, or False.
2245 if v or isinstance(v, bool):
2245 if v or isinstance(v, bool):
2246 return v
2246 return v
2247 if forceplain is not None and ui.plain():
2247 if forceplain is not None and ui.plain():
2248 return forceplain
2248 return forceplain
2249 return getter(section, name or key, untrusted=untrusted)
2249 return getter(section, name or key, untrusted=untrusted)
2250
2250
2251 # core options, expected to be understood by every diff parser
2251 # core options, expected to be understood by every diff parser
2252 buildopts = {
2252 buildopts = {
2253 'nodates': get('nodates'),
2253 'nodates': get('nodates'),
2254 'showfunc': get('show_function', 'showfunc'),
2254 'showfunc': get('show_function', 'showfunc'),
2255 'context': get('unified', getter=ui.config),
2255 'context': get('unified', getter=ui.config),
2256 }
2256 }
2257 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2257 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2258
2258
2259 if git:
2259 if git:
2260 buildopts['git'] = get('git')
2260 buildopts['git'] = get('git')
2261
2261
2262 # since this is in the experimental section, we need to call
2262 # since this is in the experimental section, we need to call
2263 # ui.configbool directory
2263 # ui.configbool directory
2264 buildopts['showsimilarity'] = ui.configbool('experimental',
2264 buildopts['showsimilarity'] = ui.configbool('experimental',
2265 'extendedheader.similarity')
2265 'extendedheader.similarity')
2266
2266
2267 # need to inspect the ui object instead of using get() since we want to
2267 # need to inspect the ui object instead of using get() since we want to
2268 # test for an int
2268 # test for an int
2269 hconf = ui.config('experimental', 'extendedheader.index')
2269 hconf = ui.config('experimental', 'extendedheader.index')
2270 if hconf is not None:
2270 if hconf is not None:
2271 hlen = None
2271 hlen = None
2272 try:
2272 try:
2273 # the hash config could be an integer (for length of hash) or a
2273 # the hash config could be an integer (for length of hash) or a
2274 # word (e.g. short, full, none)
2274 # word (e.g. short, full, none)
2275 hlen = int(hconf)
2275 hlen = int(hconf)
2276 if hlen < 0 or hlen > 40:
2276 if hlen < 0 or hlen > 40:
2277 msg = _("invalid length for extendedheader.index: '%d'\n")
2277 msg = _("invalid length for extendedheader.index: '%d'\n")
2278 ui.warn(msg % hlen)
2278 ui.warn(msg % hlen)
2279 except ValueError:
2279 except ValueError:
2280 # default value
2280 # default value
2281 if hconf == 'short' or hconf == '':
2281 if hconf == 'short' or hconf == '':
2282 hlen = 12
2282 hlen = 12
2283 elif hconf == 'full':
2283 elif hconf == 'full':
2284 hlen = 40
2284 hlen = 40
2285 elif hconf != 'none':
2285 elif hconf != 'none':
2286 msg = _("invalid value for extendedheader.index: '%s'\n")
2286 msg = _("invalid value for extendedheader.index: '%s'\n")
2287 ui.warn(msg % hconf)
2287 ui.warn(msg % hconf)
2288 finally:
2288 finally:
2289 buildopts['index'] = hlen
2289 buildopts['index'] = hlen
2290
2290
2291 if whitespace:
2291 if whitespace:
2292 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2292 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2293 buildopts['ignorewsamount'] = get('ignore_space_change',
2293 buildopts['ignorewsamount'] = get('ignore_space_change',
2294 'ignorewsamount')
2294 'ignorewsamount')
2295 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2295 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2296 'ignoreblanklines')
2296 'ignoreblanklines')
2297 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2297 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2298 if formatchanging:
2298 if formatchanging:
2299 buildopts['text'] = opts and opts.get('text')
2299 buildopts['text'] = opts and opts.get('text')
2300 binary = None if opts is None else opts.get('binary')
2300 binary = None if opts is None else opts.get('binary')
2301 buildopts['nobinary'] = (not binary if binary is not None
2301 buildopts['nobinary'] = (not binary if binary is not None
2302 else get('nobinary', forceplain=False))
2302 else get('nobinary', forceplain=False))
2303 buildopts['noprefix'] = get('noprefix', forceplain=False)
2303 buildopts['noprefix'] = get('noprefix', forceplain=False)
2304
2304
2305 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2305 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2306
2306
2307 def diff(repo, node1=None, node2=None, match=None, changes=None,
2307 def diff(repo, node1=None, node2=None, match=None, changes=None,
2308 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2308 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2309 hunksfilterfn=None):
2309 hunksfilterfn=None):
2310 '''yields diff of changes to files between two nodes, or node and
2310 '''yields diff of changes to files between two nodes, or node and
2311 working directory.
2311 working directory.
2312
2312
2313 if node1 is None, use first dirstate parent instead.
2313 if node1 is None, use first dirstate parent instead.
2314 if node2 is None, compare node1 with working directory.
2314 if node2 is None, compare node1 with working directory.
2315
2315
2316 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2316 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2317 every time some change cannot be represented with the current
2317 every time some change cannot be represented with the current
2318 patch format. Return False to upgrade to git patch format, True to
2318 patch format. Return False to upgrade to git patch format, True to
2319 accept the loss or raise an exception to abort the diff. It is
2319 accept the loss or raise an exception to abort the diff. It is
2320 called with the name of current file being diffed as 'fn'. If set
2320 called with the name of current file being diffed as 'fn'. If set
2321 to None, patches will always be upgraded to git format when
2321 to None, patches will always be upgraded to git format when
2322 necessary.
2322 necessary.
2323
2323
2324 prefix is a filename prefix that is prepended to all filenames on
2324 prefix is a filename prefix that is prepended to all filenames on
2325 display (used for subrepos).
2325 display (used for subrepos).
2326
2326
2327 relroot, if not empty, must be normalized with a trailing /. Any match
2327 relroot, if not empty, must be normalized with a trailing /. Any match
2328 patterns that fall outside it will be ignored.
2328 patterns that fall outside it will be ignored.
2329
2329
2330 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2330 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2331 information.
2331 information.
2332
2332
2333 hunksfilterfn, if not None, should be a function taking a filectx and
2333 hunksfilterfn, if not None, should be a function taking a filectx and
2334 hunks generator that may yield filtered hunks.
2334 hunks generator that may yield filtered hunks.
2335 '''
2335 '''
2336 for fctx1, fctx2, hdr, hunks in diffhunks(
2336 for fctx1, fctx2, hdr, hunks in diffhunks(
2337 repo, node1=node1, node2=node2,
2337 repo, node1=node1, node2=node2,
2338 match=match, changes=changes, opts=opts,
2338 match=match, changes=changes, opts=opts,
2339 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2339 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2340 ):
2340 ):
2341 if hunksfilterfn is not None:
2341 if hunksfilterfn is not None:
2342 # If the file has been removed, fctx2 is None; but this should
2342 # If the file has been removed, fctx2 is None; but this should
2343 # not occur here since we catch removed files early in
2343 # not occur here since we catch removed files early in
2344 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2344 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2345 assert fctx2 is not None, \
2345 assert fctx2 is not None, \
2346 'fctx2 unexpectly None in diff hunks filtering'
2346 'fctx2 unexpectly None in diff hunks filtering'
2347 hunks = hunksfilterfn(fctx2, hunks)
2347 hunks = hunksfilterfn(fctx2, hunks)
2348 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2348 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2349 if hdr and (text or len(hdr) > 1):
2349 if hdr and (text or len(hdr) > 1):
2350 yield '\n'.join(hdr) + '\n'
2350 yield '\n'.join(hdr) + '\n'
2351 if text:
2351 if text:
2352 yield text
2352 yield text
2353
2353
2354 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2354 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2355 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2355 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2356 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2356 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2357 where `header` is a list of diff headers and `hunks` is an iterable of
2357 where `header` is a list of diff headers and `hunks` is an iterable of
2358 (`hunkrange`, `hunklines`) tuples.
2358 (`hunkrange`, `hunklines`) tuples.
2359
2359
2360 See diff() for the meaning of parameters.
2360 See diff() for the meaning of parameters.
2361 """
2361 """
2362
2362
2363 if opts is None:
2363 if opts is None:
2364 opts = mdiff.defaultopts
2364 opts = mdiff.defaultopts
2365
2365
2366 if not node1 and not node2:
2366 if not node1 and not node2:
2367 node1 = repo.dirstate.p1()
2367 node1 = repo.dirstate.p1()
2368
2368
2369 def lrugetfilectx():
2369 def lrugetfilectx():
2370 cache = {}
2370 cache = {}
2371 order = collections.deque()
2371 order = collections.deque()
2372 def getfilectx(f, ctx):
2372 def getfilectx(f, ctx):
2373 fctx = ctx.filectx(f, filelog=cache.get(f))
2373 fctx = ctx.filectx(f, filelog=cache.get(f))
2374 if f not in cache:
2374 if f not in cache:
2375 if len(cache) > 20:
2375 if len(cache) > 20:
2376 del cache[order.popleft()]
2376 del cache[order.popleft()]
2377 cache[f] = fctx.filelog()
2377 cache[f] = fctx.filelog()
2378 else:
2378 else:
2379 order.remove(f)
2379 order.remove(f)
2380 order.append(f)
2380 order.append(f)
2381 return fctx
2381 return fctx
2382 return getfilectx
2382 return getfilectx
2383 getfilectx = lrugetfilectx()
2383 getfilectx = lrugetfilectx()
2384
2384
2385 ctx1 = repo[node1]
2385 ctx1 = repo[node1]
2386 ctx2 = repo[node2]
2386 ctx2 = repo[node2]
2387
2387
2388 relfiltered = False
2388 relfiltered = False
2389 if relroot != '' and match.always():
2389 if relroot != '' and match.always():
2390 # as a special case, create a new matcher with just the relroot
2390 # as a special case, create a new matcher with just the relroot
2391 pats = [relroot]
2391 pats = [relroot]
2392 match = scmutil.match(ctx2, pats, default='path')
2392 match = scmutil.match(ctx2, pats, default='path')
2393 relfiltered = True
2393 relfiltered = True
2394
2394
2395 if not changes:
2395 if not changes:
2396 changes = repo.status(ctx1, ctx2, match=match)
2396 changes = repo.status(ctx1, ctx2, match=match)
2397 modified, added, removed = changes[:3]
2397 modified, added, removed = changes[:3]
2398
2398
2399 if not modified and not added and not removed:
2399 if not modified and not added and not removed:
2400 return []
2400 return []
2401
2401
2402 if repo.ui.debugflag:
2402 if repo.ui.debugflag:
2403 hexfunc = hex
2403 hexfunc = hex
2404 else:
2404 else:
2405 hexfunc = short
2405 hexfunc = short
2406 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2406 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2407
2407
2408 if copy is None:
2408 if copy is None:
2409 copy = {}
2409 copy = {}
2410 if opts.git or opts.upgrade:
2410 if opts.git or opts.upgrade:
2411 copy = copies.pathcopies(ctx1, ctx2, match=match)
2411 copy = copies.pathcopies(ctx1, ctx2, match=match)
2412
2412
2413 if relroot is not None:
2413 if relroot is not None:
2414 if not relfiltered:
2414 if not relfiltered:
2415 # XXX this would ideally be done in the matcher, but that is
2415 # XXX this would ideally be done in the matcher, but that is
2416 # generally meant to 'or' patterns, not 'and' them. In this case we
2416 # generally meant to 'or' patterns, not 'and' them. In this case we
2417 # need to 'and' all the patterns from the matcher with relroot.
2417 # need to 'and' all the patterns from the matcher with relroot.
2418 def filterrel(l):
2418 def filterrel(l):
2419 return [f for f in l if f.startswith(relroot)]
2419 return [f for f in l if f.startswith(relroot)]
2420 modified = filterrel(modified)
2420 modified = filterrel(modified)
2421 added = filterrel(added)
2421 added = filterrel(added)
2422 removed = filterrel(removed)
2422 removed = filterrel(removed)
2423 relfiltered = True
2423 relfiltered = True
2424 # filter out copies where either side isn't inside the relative root
2424 # filter out copies where either side isn't inside the relative root
2425 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2425 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2426 if dst.startswith(relroot)
2426 if dst.startswith(relroot)
2427 and src.startswith(relroot)))
2427 and src.startswith(relroot)))
2428
2428
2429 modifiedset = set(modified)
2429 modifiedset = set(modified)
2430 addedset = set(added)
2430 addedset = set(added)
2431 removedset = set(removed)
2431 removedset = set(removed)
2432 for f in modified:
2432 for f in modified:
2433 if f not in ctx1:
2433 if f not in ctx1:
2434 # Fix up added, since merged-in additions appear as
2434 # Fix up added, since merged-in additions appear as
2435 # modifications during merges
2435 # modifications during merges
2436 modifiedset.remove(f)
2436 modifiedset.remove(f)
2437 addedset.add(f)
2437 addedset.add(f)
2438 for f in removed:
2438 for f in removed:
2439 if f not in ctx1:
2439 if f not in ctx1:
2440 # Merged-in additions that are then removed are reported as removed.
2440 # Merged-in additions that are then removed are reported as removed.
2441 # They are not in ctx1, so We don't want to show them in the diff.
2441 # They are not in ctx1, so We don't want to show them in the diff.
2442 removedset.remove(f)
2442 removedset.remove(f)
2443 modified = sorted(modifiedset)
2443 modified = sorted(modifiedset)
2444 added = sorted(addedset)
2444 added = sorted(addedset)
2445 removed = sorted(removedset)
2445 removed = sorted(removedset)
2446 for dst, src in list(copy.items()):
2446 for dst, src in list(copy.items()):
2447 if src not in ctx1:
2447 if src not in ctx1:
2448 # Files merged in during a merge and then copied/renamed are
2448 # Files merged in during a merge and then copied/renamed are
2449 # reported as copies. We want to show them in the diff as additions.
2449 # reported as copies. We want to show them in the diff as additions.
2450 del copy[dst]
2450 del copy[dst]
2451
2451
2452 def difffn(opts, losedata):
2452 def difffn(opts, losedata):
2453 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2453 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2454 copy, getfilectx, opts, losedata, prefix, relroot)
2454 copy, getfilectx, opts, losedata, prefix, relroot)
2455 if opts.upgrade and not opts.git:
2455 if opts.upgrade and not opts.git:
2456 try:
2456 try:
2457 def losedata(fn):
2457 def losedata(fn):
2458 if not losedatafn or not losedatafn(fn=fn):
2458 if not losedatafn or not losedatafn(fn=fn):
2459 raise GitDiffRequired
2459 raise GitDiffRequired
2460 # Buffer the whole output until we are sure it can be generated
2460 # Buffer the whole output until we are sure it can be generated
2461 return list(difffn(opts.copy(git=False), losedata))
2461 return list(difffn(opts.copy(git=False), losedata))
2462 except GitDiffRequired:
2462 except GitDiffRequired:
2463 return difffn(opts.copy(git=True), None)
2463 return difffn(opts.copy(git=True), None)
2464 else:
2464 else:
2465 return difffn(opts, None)
2465 return difffn(opts, None)
2466
2466
2467 def difflabel(func, *args, **kw):
2467 def difflabel(func, *args, **kw):
2468 '''yields 2-tuples of (output, label) based on the output of func()'''
2468 '''yields 2-tuples of (output, label) based on the output of func()'''
2469 inlinecolor = False
2469 inlinecolor = False
2470 if kw.get(r'opts'):
2470 if kw.get(r'opts'):
2471 inlinecolor = kw[r'opts'].worddiff
2471 inlinecolor = kw[r'opts'].worddiff
2472 headprefixes = [('diff', 'diff.diffline'),
2472 headprefixes = [('diff', 'diff.diffline'),
2473 ('copy', 'diff.extended'),
2473 ('copy', 'diff.extended'),
2474 ('rename', 'diff.extended'),
2474 ('rename', 'diff.extended'),
2475 ('old', 'diff.extended'),
2475 ('old', 'diff.extended'),
2476 ('new', 'diff.extended'),
2476 ('new', 'diff.extended'),
2477 ('deleted', 'diff.extended'),
2477 ('deleted', 'diff.extended'),
2478 ('index', 'diff.extended'),
2478 ('index', 'diff.extended'),
2479 ('similarity', 'diff.extended'),
2479 ('similarity', 'diff.extended'),
2480 ('---', 'diff.file_a'),
2480 ('---', 'diff.file_a'),
2481 ('+++', 'diff.file_b')]
2481 ('+++', 'diff.file_b')]
2482 textprefixes = [('@', 'diff.hunk'),
2482 textprefixes = [('@', 'diff.hunk'),
2483 ('-', 'diff.deleted'),
2483 ('-', 'diff.deleted'),
2484 ('+', 'diff.inserted')]
2484 ('+', 'diff.inserted')]
2485 head = False
2485 head = False
2486 for chunk in func(*args, **kw):
2486 for chunk in func(*args, **kw):
2487 lines = chunk.split('\n')
2487 lines = chunk.split('\n')
2488 matches = {}
2488 matches = {}
2489 if inlinecolor:
2489 if inlinecolor:
2490 matches = _findmatches(lines)
2490 matches = _findmatches(lines)
2491 for i, line in enumerate(lines):
2491 for i, line in enumerate(lines):
2492 if i != 0:
2492 if i != 0:
2493 yield ('\n', '')
2493 yield ('\n', '')
2494 if head:
2494 if head:
2495 if line.startswith('@'):
2495 if line.startswith('@'):
2496 head = False
2496 head = False
2497 else:
2497 else:
2498 if line and line[0] not in ' +-@\\':
2498 if line and line[0] not in ' +-@\\':
2499 head = True
2499 head = True
2500 stripline = line
2500 stripline = line
2501 diffline = False
2501 diffline = False
2502 if not head and line and line[0] in '+-':
2502 if not head and line and line[0] in '+-':
2503 # highlight tabs and trailing whitespace, but only in
2503 # highlight tabs and trailing whitespace, but only in
2504 # changed lines
2504 # changed lines
2505 stripline = line.rstrip()
2505 stripline = line.rstrip()
2506 diffline = True
2506 diffline = True
2507
2507
2508 prefixes = textprefixes
2508 prefixes = textprefixes
2509 if head:
2509 if head:
2510 prefixes = headprefixes
2510 prefixes = headprefixes
2511 for prefix, label in prefixes:
2511 for prefix, label in prefixes:
2512 if stripline.startswith(prefix):
2512 if stripline.startswith(prefix):
2513 if diffline:
2513 if diffline:
2514 if i in matches:
2514 if i in matches:
2515 for t, l in _inlinediff(lines[i].rstrip(),
2515 for t, l in _inlinediff(lines[i].rstrip(),
2516 lines[matches[i]].rstrip(),
2516 lines[matches[i]].rstrip(),
2517 label):
2517 label):
2518 yield (t, l)
2518 yield (t, l)
2519 else:
2519 else:
2520 for token in tabsplitter.findall(stripline):
2520 for token in tabsplitter.findall(stripline):
2521 if '\t' == token[0]:
2521 if token.startswith('\t'):
2522 yield (token, 'diff.tab')
2522 yield (token, 'diff.tab')
2523 else:
2523 else:
2524 yield (token, label)
2524 yield (token, label)
2525 else:
2525 else:
2526 yield (stripline, label)
2526 yield (stripline, label)
2527 break
2527 break
2528 else:
2528 else:
2529 yield (line, '')
2529 yield (line, '')
2530 if line != stripline:
2530 if line != stripline:
2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532
2532
2533 def _findmatches(slist):
2533 def _findmatches(slist):
2534 '''Look for insertion matches to deletion and returns a dict of
2534 '''Look for insertion matches to deletion and returns a dict of
2535 correspondences.
2535 correspondences.
2536 '''
2536 '''
2537 lastmatch = 0
2537 lastmatch = 0
2538 matches = {}
2538 matches = {}
2539 for i, line in enumerate(slist):
2539 for i, line in enumerate(slist):
2540 if line == '':
2540 if line == '':
2541 continue
2541 continue
2542 if line[0] == '-':
2542 if line[0] == '-':
2543 lastmatch = max(lastmatch, i)
2543 lastmatch = max(lastmatch, i)
2544 newgroup = False
2544 newgroup = False
2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 if newline == '':
2546 if newline == '':
2547 continue
2547 continue
2548 if newline[0] == '-' and newgroup: # too far, no match
2548 if newline[0] == '-' and newgroup: # too far, no match
2549 break
2549 break
2550 if newline[0] == '+': # potential match
2550 if newline[0] == '+': # potential match
2551 newgroup = True
2551 newgroup = True
2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 if sim > 0.7:
2553 if sim > 0.7:
2554 lastmatch = lastmatch + 1 + j
2554 lastmatch = lastmatch + 1 + j
2555 matches[i] = lastmatch
2555 matches[i] = lastmatch
2556 matches[lastmatch] = i
2556 matches[lastmatch] = i
2557 break
2557 break
2558 return matches
2558 return matches
2559
2559
2560 def _inlinediff(s1, s2, operation):
2560 def _inlinediff(s1, s2, operation):
2561 '''Perform string diff to highlight specific changes.'''
2561 '''Perform string diff to highlight specific changes.'''
2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 if operation == 'diff.deleted':
2563 if operation == 'diff.deleted':
2564 s2, s1 = s1, s2
2564 s2, s1 = s1, s2
2565
2565
2566 buff = []
2566 buff = []
2567 # we never want to higlight the leading +-
2567 # we never want to higlight the leading +-
2568 if operation == 'diff.deleted' and s2.startswith('-'):
2568 if operation == 'diff.deleted' and s2.startswith('-'):
2569 label = operation
2569 label = operation
2570 token = '-'
2570 token = '-'
2571 s2 = s2[1:]
2571 s2 = s2[1:]
2572 s1 = s1[1:]
2572 s1 = s1[1:]
2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 label = operation
2574 label = operation
2575 token = '+'
2575 token = '+'
2576 s2 = s2[1:]
2576 s2 = s2[1:]
2577 s1 = s1[1:]
2577 s1 = s1[1:]
2578 else:
2578 else:
2579 raise error.ProgrammingError("Case not expected, operation = %s" %
2579 raise error.ProgrammingError("Case not expected, operation = %s" %
2580 operation)
2580 operation)
2581
2581
2582 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2582 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2583 for part in s:
2583 for part in s:
2584 if part[0] in operation_skip or len(part) == 2:
2584 if part[0] in operation_skip or len(part) == 2:
2585 continue
2585 continue
2586 l = operation + '.highlight'
2586 l = operation + '.highlight'
2587 if part[0] in ' ':
2587 if part[0] in ' ':
2588 l = operation
2588 l = operation
2589 if part[2:] == '\t':
2589 if part[2:] == '\t':
2590 l = 'diff.tab'
2590 l = 'diff.tab'
2591 if l == label: # contiguous token with same label
2591 if l == label: # contiguous token with same label
2592 token += part[2:]
2592 token += part[2:]
2593 continue
2593 continue
2594 else:
2594 else:
2595 buff.append((token, label))
2595 buff.append((token, label))
2596 label = l
2596 label = l
2597 token = part[2:]
2597 token = part[2:]
2598 buff.append((token, label))
2598 buff.append((token, label))
2599
2599
2600 return buff
2600 return buff
2601
2601
2602 def diffui(*args, **kw):
2602 def diffui(*args, **kw):
2603 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2603 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2604 return difflabel(diff, *args, **kw)
2604 return difflabel(diff, *args, **kw)
2605
2605
2606 def _filepairs(modified, added, removed, copy, opts):
2606 def _filepairs(modified, added, removed, copy, opts):
2607 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2607 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2608 before and f2 is the the name after. For added files, f1 will be None,
2608 before and f2 is the the name after. For added files, f1 will be None,
2609 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2609 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2610 or 'rename' (the latter two only if opts.git is set).'''
2610 or 'rename' (the latter two only if opts.git is set).'''
2611 gone = set()
2611 gone = set()
2612
2612
2613 copyto = dict([(v, k) for k, v in copy.items()])
2613 copyto = dict([(v, k) for k, v in copy.items()])
2614
2614
2615 addedset, removedset = set(added), set(removed)
2615 addedset, removedset = set(added), set(removed)
2616
2616
2617 for f in sorted(modified + added + removed):
2617 for f in sorted(modified + added + removed):
2618 copyop = None
2618 copyop = None
2619 f1, f2 = f, f
2619 f1, f2 = f, f
2620 if f in addedset:
2620 if f in addedset:
2621 f1 = None
2621 f1 = None
2622 if f in copy:
2622 if f in copy:
2623 if opts.git:
2623 if opts.git:
2624 f1 = copy[f]
2624 f1 = copy[f]
2625 if f1 in removedset and f1 not in gone:
2625 if f1 in removedset and f1 not in gone:
2626 copyop = 'rename'
2626 copyop = 'rename'
2627 gone.add(f1)
2627 gone.add(f1)
2628 else:
2628 else:
2629 copyop = 'copy'
2629 copyop = 'copy'
2630 elif f in removedset:
2630 elif f in removedset:
2631 f2 = None
2631 f2 = None
2632 if opts.git:
2632 if opts.git:
2633 # have we already reported a copy above?
2633 # have we already reported a copy above?
2634 if (f in copyto and copyto[f] in addedset
2634 if (f in copyto and copyto[f] in addedset
2635 and copy[copyto[f]] == f):
2635 and copy[copyto[f]] == f):
2636 continue
2636 continue
2637 yield f1, f2, copyop
2637 yield f1, f2, copyop
2638
2638
2639 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2639 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2640 copy, getfilectx, opts, losedatafn, prefix, relroot):
2640 copy, getfilectx, opts, losedatafn, prefix, relroot):
2641 '''given input data, generate a diff and yield it in blocks
2641 '''given input data, generate a diff and yield it in blocks
2642
2642
2643 If generating a diff would lose data like flags or binary data and
2643 If generating a diff would lose data like flags or binary data and
2644 losedatafn is not None, it will be called.
2644 losedatafn is not None, it will be called.
2645
2645
2646 relroot is removed and prefix is added to every path in the diff output.
2646 relroot is removed and prefix is added to every path in the diff output.
2647
2647
2648 If relroot is not empty, this function expects every path in modified,
2648 If relroot is not empty, this function expects every path in modified,
2649 added, removed and copy to start with it.'''
2649 added, removed and copy to start with it.'''
2650
2650
2651 def gitindex(text):
2651 def gitindex(text):
2652 if not text:
2652 if not text:
2653 text = ""
2653 text = ""
2654 l = len(text)
2654 l = len(text)
2655 s = hashlib.sha1('blob %d\0' % l)
2655 s = hashlib.sha1('blob %d\0' % l)
2656 s.update(text)
2656 s.update(text)
2657 return hex(s.digest())
2657 return hex(s.digest())
2658
2658
2659 if opts.noprefix:
2659 if opts.noprefix:
2660 aprefix = bprefix = ''
2660 aprefix = bprefix = ''
2661 else:
2661 else:
2662 aprefix = 'a/'
2662 aprefix = 'a/'
2663 bprefix = 'b/'
2663 bprefix = 'b/'
2664
2664
2665 def diffline(f, revs):
2665 def diffline(f, revs):
2666 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2666 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2667 return 'diff %s %s' % (revinfo, f)
2667 return 'diff %s %s' % (revinfo, f)
2668
2668
2669 def isempty(fctx):
2669 def isempty(fctx):
2670 return fctx is None or fctx.size() == 0
2670 return fctx is None or fctx.size() == 0
2671
2671
2672 date1 = util.datestr(ctx1.date())
2672 date1 = util.datestr(ctx1.date())
2673 date2 = util.datestr(ctx2.date())
2673 date2 = util.datestr(ctx2.date())
2674
2674
2675 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2675 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2676
2676
2677 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2677 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2678 or repo.ui.configbool('devel', 'check-relroot')):
2678 or repo.ui.configbool('devel', 'check-relroot')):
2679 for f in modified + added + removed + list(copy) + list(copy.values()):
2679 for f in modified + added + removed + list(copy) + list(copy.values()):
2680 if f is not None and not f.startswith(relroot):
2680 if f is not None and not f.startswith(relroot):
2681 raise AssertionError(
2681 raise AssertionError(
2682 "file %s doesn't start with relroot %s" % (f, relroot))
2682 "file %s doesn't start with relroot %s" % (f, relroot))
2683
2683
2684 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2684 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2685 content1 = None
2685 content1 = None
2686 content2 = None
2686 content2 = None
2687 fctx1 = None
2687 fctx1 = None
2688 fctx2 = None
2688 fctx2 = None
2689 flag1 = None
2689 flag1 = None
2690 flag2 = None
2690 flag2 = None
2691 if f1:
2691 if f1:
2692 fctx1 = getfilectx(f1, ctx1)
2692 fctx1 = getfilectx(f1, ctx1)
2693 if opts.git or losedatafn:
2693 if opts.git or losedatafn:
2694 flag1 = ctx1.flags(f1)
2694 flag1 = ctx1.flags(f1)
2695 if f2:
2695 if f2:
2696 fctx2 = getfilectx(f2, ctx2)
2696 fctx2 = getfilectx(f2, ctx2)
2697 if opts.git or losedatafn:
2697 if opts.git or losedatafn:
2698 flag2 = ctx2.flags(f2)
2698 flag2 = ctx2.flags(f2)
2699 # if binary is True, output "summary" or "base85", but not "text diff"
2699 # if binary is True, output "summary" or "base85", but not "text diff"
2700 if opts.text:
2700 if opts.text:
2701 binary = False
2701 binary = False
2702 else:
2702 else:
2703 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2703 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2704
2704
2705 if losedatafn and not opts.git:
2705 if losedatafn and not opts.git:
2706 if (binary or
2706 if (binary or
2707 # copy/rename
2707 # copy/rename
2708 f2 in copy or
2708 f2 in copy or
2709 # empty file creation
2709 # empty file creation
2710 (not f1 and isempty(fctx2)) or
2710 (not f1 and isempty(fctx2)) or
2711 # empty file deletion
2711 # empty file deletion
2712 (isempty(fctx1) and not f2) or
2712 (isempty(fctx1) and not f2) or
2713 # create with flags
2713 # create with flags
2714 (not f1 and flag2) or
2714 (not f1 and flag2) or
2715 # change flags
2715 # change flags
2716 (f1 and f2 and flag1 != flag2)):
2716 (f1 and f2 and flag1 != flag2)):
2717 losedatafn(f2 or f1)
2717 losedatafn(f2 or f1)
2718
2718
2719 path1 = f1 or f2
2719 path1 = f1 or f2
2720 path2 = f2 or f1
2720 path2 = f2 or f1
2721 path1 = posixpath.join(prefix, path1[len(relroot):])
2721 path1 = posixpath.join(prefix, path1[len(relroot):])
2722 path2 = posixpath.join(prefix, path2[len(relroot):])
2722 path2 = posixpath.join(prefix, path2[len(relroot):])
2723 header = []
2723 header = []
2724 if opts.git:
2724 if opts.git:
2725 header.append('diff --git %s%s %s%s' %
2725 header.append('diff --git %s%s %s%s' %
2726 (aprefix, path1, bprefix, path2))
2726 (aprefix, path1, bprefix, path2))
2727 if not f1: # added
2727 if not f1: # added
2728 header.append('new file mode %s' % gitmode[flag2])
2728 header.append('new file mode %s' % gitmode[flag2])
2729 elif not f2: # removed
2729 elif not f2: # removed
2730 header.append('deleted file mode %s' % gitmode[flag1])
2730 header.append('deleted file mode %s' % gitmode[flag1])
2731 else: # modified/copied/renamed
2731 else: # modified/copied/renamed
2732 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2732 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2733 if mode1 != mode2:
2733 if mode1 != mode2:
2734 header.append('old mode %s' % mode1)
2734 header.append('old mode %s' % mode1)
2735 header.append('new mode %s' % mode2)
2735 header.append('new mode %s' % mode2)
2736 if copyop is not None:
2736 if copyop is not None:
2737 if opts.showsimilarity:
2737 if opts.showsimilarity:
2738 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2738 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2739 header.append('similarity index %d%%' % sim)
2739 header.append('similarity index %d%%' % sim)
2740 header.append('%s from %s' % (copyop, path1))
2740 header.append('%s from %s' % (copyop, path1))
2741 header.append('%s to %s' % (copyop, path2))
2741 header.append('%s to %s' % (copyop, path2))
2742 elif revs and not repo.ui.quiet:
2742 elif revs and not repo.ui.quiet:
2743 header.append(diffline(path1, revs))
2743 header.append(diffline(path1, revs))
2744
2744
2745 # fctx.is | diffopts | what to | is fctx.data()
2745 # fctx.is | diffopts | what to | is fctx.data()
2746 # binary() | text nobinary git index | output? | outputted?
2746 # binary() | text nobinary git index | output? | outputted?
2747 # ------------------------------------|----------------------------
2747 # ------------------------------------|----------------------------
2748 # yes | no no no * | summary | no
2748 # yes | no no no * | summary | no
2749 # yes | no no yes * | base85 | yes
2749 # yes | no no yes * | base85 | yes
2750 # yes | no yes no * | summary | no
2750 # yes | no yes no * | summary | no
2751 # yes | no yes yes 0 | summary | no
2751 # yes | no yes yes 0 | summary | no
2752 # yes | no yes yes >0 | summary | semi [1]
2752 # yes | no yes yes >0 | summary | semi [1]
2753 # yes | yes * * * | text diff | yes
2753 # yes | yes * * * | text diff | yes
2754 # no | * * * * | text diff | yes
2754 # no | * * * * | text diff | yes
2755 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2755 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2756 if binary and (not opts.git or (opts.git and opts.nobinary and not
2756 if binary and (not opts.git or (opts.git and opts.nobinary and not
2757 opts.index)):
2757 opts.index)):
2758 # fast path: no binary content will be displayed, content1 and
2758 # fast path: no binary content will be displayed, content1 and
2759 # content2 are only used for equivalent test. cmp() could have a
2759 # content2 are only used for equivalent test. cmp() could have a
2760 # fast path.
2760 # fast path.
2761 if fctx1 is not None:
2761 if fctx1 is not None:
2762 content1 = b'\0'
2762 content1 = b'\0'
2763 if fctx2 is not None:
2763 if fctx2 is not None:
2764 if fctx1 is not None and not fctx1.cmp(fctx2):
2764 if fctx1 is not None and not fctx1.cmp(fctx2):
2765 content2 = b'\0' # not different
2765 content2 = b'\0' # not different
2766 else:
2766 else:
2767 content2 = b'\0\0'
2767 content2 = b'\0\0'
2768 else:
2768 else:
2769 # normal path: load contents
2769 # normal path: load contents
2770 if fctx1 is not None:
2770 if fctx1 is not None:
2771 content1 = fctx1.data()
2771 content1 = fctx1.data()
2772 if fctx2 is not None:
2772 if fctx2 is not None:
2773 content2 = fctx2.data()
2773 content2 = fctx2.data()
2774
2774
2775 if binary and opts.git and not opts.nobinary:
2775 if binary and opts.git and not opts.nobinary:
2776 text = mdiff.b85diff(content1, content2)
2776 text = mdiff.b85diff(content1, content2)
2777 if text:
2777 if text:
2778 header.append('index %s..%s' %
2778 header.append('index %s..%s' %
2779 (gitindex(content1), gitindex(content2)))
2779 (gitindex(content1), gitindex(content2)))
2780 hunks = (None, [text]),
2780 hunks = (None, [text]),
2781 else:
2781 else:
2782 if opts.git and opts.index > 0:
2782 if opts.git and opts.index > 0:
2783 flag = flag1
2783 flag = flag1
2784 if flag is None:
2784 if flag is None:
2785 flag = flag2
2785 flag = flag2
2786 header.append('index %s..%s %s' %
2786 header.append('index %s..%s %s' %
2787 (gitindex(content1)[0:opts.index],
2787 (gitindex(content1)[0:opts.index],
2788 gitindex(content2)[0:opts.index],
2788 gitindex(content2)[0:opts.index],
2789 gitmode[flag]))
2789 gitmode[flag]))
2790
2790
2791 uheaders, hunks = mdiff.unidiff(content1, date1,
2791 uheaders, hunks = mdiff.unidiff(content1, date1,
2792 content2, date2,
2792 content2, date2,
2793 path1, path2,
2793 path1, path2,
2794 binary=binary, opts=opts)
2794 binary=binary, opts=opts)
2795 header.extend(uheaders)
2795 header.extend(uheaders)
2796 yield fctx1, fctx2, header, hunks
2796 yield fctx1, fctx2, header, hunks
2797
2797
2798 def diffstatsum(stats):
2798 def diffstatsum(stats):
2799 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2799 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2800 for f, a, r, b in stats:
2800 for f, a, r, b in stats:
2801 maxfile = max(maxfile, encoding.colwidth(f))
2801 maxfile = max(maxfile, encoding.colwidth(f))
2802 maxtotal = max(maxtotal, a + r)
2802 maxtotal = max(maxtotal, a + r)
2803 addtotal += a
2803 addtotal += a
2804 removetotal += r
2804 removetotal += r
2805 binary = binary or b
2805 binary = binary or b
2806
2806
2807 return maxfile, maxtotal, addtotal, removetotal, binary
2807 return maxfile, maxtotal, addtotal, removetotal, binary
2808
2808
2809 def diffstatdata(lines):
2809 def diffstatdata(lines):
2810 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2810 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2811
2811
2812 results = []
2812 results = []
2813 filename, adds, removes, isbinary = None, 0, 0, False
2813 filename, adds, removes, isbinary = None, 0, 0, False
2814
2814
2815 def addresult():
2815 def addresult():
2816 if filename:
2816 if filename:
2817 results.append((filename, adds, removes, isbinary))
2817 results.append((filename, adds, removes, isbinary))
2818
2818
2819 # inheader is used to track if a line is in the
2819 # inheader is used to track if a line is in the
2820 # header portion of the diff. This helps properly account
2820 # header portion of the diff. This helps properly account
2821 # for lines that start with '--' or '++'
2821 # for lines that start with '--' or '++'
2822 inheader = False
2822 inheader = False
2823
2823
2824 for line in lines:
2824 for line in lines:
2825 if line.startswith('diff'):
2825 if line.startswith('diff'):
2826 addresult()
2826 addresult()
2827 # starting a new file diff
2827 # starting a new file diff
2828 # set numbers to 0 and reset inheader
2828 # set numbers to 0 and reset inheader
2829 inheader = True
2829 inheader = True
2830 adds, removes, isbinary = 0, 0, False
2830 adds, removes, isbinary = 0, 0, False
2831 if line.startswith('diff --git a/'):
2831 if line.startswith('diff --git a/'):
2832 filename = gitre.search(line).group(2)
2832 filename = gitre.search(line).group(2)
2833 elif line.startswith('diff -r'):
2833 elif line.startswith('diff -r'):
2834 # format: "diff -r ... -r ... filename"
2834 # format: "diff -r ... -r ... filename"
2835 filename = diffre.search(line).group(1)
2835 filename = diffre.search(line).group(1)
2836 elif line.startswith('@@'):
2836 elif line.startswith('@@'):
2837 inheader = False
2837 inheader = False
2838 elif line.startswith('+') and not inheader:
2838 elif line.startswith('+') and not inheader:
2839 adds += 1
2839 adds += 1
2840 elif line.startswith('-') and not inheader:
2840 elif line.startswith('-') and not inheader:
2841 removes += 1
2841 removes += 1
2842 elif (line.startswith('GIT binary patch') or
2842 elif (line.startswith('GIT binary patch') or
2843 line.startswith('Binary file')):
2843 line.startswith('Binary file')):
2844 isbinary = True
2844 isbinary = True
2845 addresult()
2845 addresult()
2846 return results
2846 return results
2847
2847
2848 def diffstat(lines, width=80):
2848 def diffstat(lines, width=80):
2849 output = []
2849 output = []
2850 stats = diffstatdata(lines)
2850 stats = diffstatdata(lines)
2851 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2851 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2852
2852
2853 countwidth = len(str(maxtotal))
2853 countwidth = len(str(maxtotal))
2854 if hasbinary and countwidth < 3:
2854 if hasbinary and countwidth < 3:
2855 countwidth = 3
2855 countwidth = 3
2856 graphwidth = width - countwidth - maxname - 6
2856 graphwidth = width - countwidth - maxname - 6
2857 if graphwidth < 10:
2857 if graphwidth < 10:
2858 graphwidth = 10
2858 graphwidth = 10
2859
2859
2860 def scale(i):
2860 def scale(i):
2861 if maxtotal <= graphwidth:
2861 if maxtotal <= graphwidth:
2862 return i
2862 return i
2863 # If diffstat runs out of room it doesn't print anything,
2863 # If diffstat runs out of room it doesn't print anything,
2864 # which isn't very useful, so always print at least one + or -
2864 # which isn't very useful, so always print at least one + or -
2865 # if there were at least some changes.
2865 # if there were at least some changes.
2866 return max(i * graphwidth // maxtotal, int(bool(i)))
2866 return max(i * graphwidth // maxtotal, int(bool(i)))
2867
2867
2868 for filename, adds, removes, isbinary in stats:
2868 for filename, adds, removes, isbinary in stats:
2869 if isbinary:
2869 if isbinary:
2870 count = 'Bin'
2870 count = 'Bin'
2871 else:
2871 else:
2872 count = '%d' % (adds + removes)
2872 count = '%d' % (adds + removes)
2873 pluses = '+' * scale(adds)
2873 pluses = '+' * scale(adds)
2874 minuses = '-' * scale(removes)
2874 minuses = '-' * scale(removes)
2875 output.append(' %s%s | %*s %s%s\n' %
2875 output.append(' %s%s | %*s %s%s\n' %
2876 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2876 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2877 countwidth, count, pluses, minuses))
2877 countwidth, count, pluses, minuses))
2878
2878
2879 if stats:
2879 if stats:
2880 output.append(_(' %d files changed, %d insertions(+), '
2880 output.append(_(' %d files changed, %d insertions(+), '
2881 '%d deletions(-)\n')
2881 '%d deletions(-)\n')
2882 % (len(stats), totaladds, totalremoves))
2882 % (len(stats), totaladds, totalremoves))
2883
2883
2884 return ''.join(output)
2884 return ''.join(output)
2885
2885
2886 def diffstatui(*args, **kw):
2886 def diffstatui(*args, **kw):
2887 '''like diffstat(), but yields 2-tuples of (output, label) for
2887 '''like diffstat(), but yields 2-tuples of (output, label) for
2888 ui.write()
2888 ui.write()
2889 '''
2889 '''
2890
2890
2891 for line in diffstat(*args, **kw).splitlines():
2891 for line in diffstat(*args, **kw).splitlines():
2892 if line and line[-1] in '+-':
2892 if line and line[-1] in '+-':
2893 name, graph = line.rsplit(' ', 1)
2893 name, graph = line.rsplit(' ', 1)
2894 yield (name + ' ', '')
2894 yield (name + ' ', '')
2895 m = re.search(br'\++', graph)
2895 m = re.search(br'\++', graph)
2896 if m:
2896 if m:
2897 yield (m.group(0), 'diffstat.inserted')
2897 yield (m.group(0), 'diffstat.inserted')
2898 m = re.search(br'-+', graph)
2898 m = re.search(br'-+', graph)
2899 if m:
2899 if m:
2900 yield (m.group(0), 'diffstat.deleted')
2900 yield (m.group(0), 'diffstat.deleted')
2901 else:
2901 else:
2902 yield (line, '')
2902 yield (line, '')
2903 yield ('\n', '')
2903 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now