##// END OF EJS Templates
py3: slice over bytes or use .startswith() to prevent getting ascii values...
Pulkit Goyal -
r36465:588048a6 default
parent child Browse files
Show More
@@ -1,360 +1,360 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import gzip
11 11 import os
12 12 import struct
13 13 import tarfile
14 14 import time
15 15 import zipfile
16 16 import zlib
17 17
18 18 from .i18n import _
19 19
20 20 from . import (
21 21 error,
22 22 formatter,
23 23 match as matchmod,
24 24 scmutil,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28 stringio = util.stringio
29 29
30 30 # from unzip source code:
31 31 _UNX_IFREG = 0x8000
32 32 _UNX_IFLNK = 0xa000
33 33
34 34 def tidyprefix(dest, kind, prefix):
35 35 '''choose prefix to use for names in archive. make sure prefix is
36 36 safe for consumers.'''
37 37
38 38 if prefix:
39 39 prefix = util.normpath(prefix)
40 40 else:
41 41 if not isinstance(dest, bytes):
42 42 raise ValueError('dest must be string if no prefix')
43 43 prefix = os.path.basename(dest)
44 44 lower = prefix.lower()
45 45 for sfx in exts.get(kind, []):
46 46 if lower.endswith(sfx):
47 47 prefix = prefix[:-len(sfx)]
48 48 break
49 49 lpfx = os.path.normpath(util.localpath(prefix))
50 50 prefix = util.pconvert(lpfx)
51 51 if not prefix.endswith('/'):
52 52 prefix += '/'
53 53 # Drop the leading '.' path component if present, so Windows can read the
54 54 # zip files (issue4634)
55 55 if prefix.startswith('./'):
56 56 prefix = prefix[2:]
57 57 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
58 58 raise error.Abort(_('archive prefix contains illegal components'))
59 59 return prefix
60 60
61 61 exts = {
62 62 'tar': ['.tar'],
63 63 'tbz2': ['.tbz2', '.tar.bz2'],
64 64 'tgz': ['.tgz', '.tar.gz'],
65 65 'zip': ['.zip'],
66 66 }
67 67
68 68 def guesskind(dest):
69 69 for kind, extensions in exts.iteritems():
70 70 if any(dest.endswith(ext) for ext in extensions):
71 71 return kind
72 72 return None
73 73
74 74 def _rootctx(repo):
75 75 # repo[0] may be hidden
76 76 for rev in repo:
77 77 return repo[rev]
78 78 return repo['null']
79 79
80 80 # {tags} on ctx includes local tags and 'tip', with no current way to limit
81 81 # that to global tags. Therefore, use {latesttag} as a substitute when
82 82 # the distance is 0, since that will be the list of global tags on ctx.
83 83 _defaultmetatemplate = br'''
84 84 repo: {root}
85 85 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
86 86 branch: {branch|utf8}
87 87 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
88 88 separate("\n",
89 89 join(latesttag % "latesttag: {tag}", "\n"),
90 90 "latesttagdistance: {latesttagdistance}",
91 91 "changessincelatesttag: {changessincelatesttag}"))}
92 92 '''[1:] # drop leading '\n'
93 93
94 94 def buildmetadata(ctx):
95 95 '''build content of .hg_archival.txt'''
96 96 repo = ctx.repo()
97 97
98 98 opts = {
99 99 'template': repo.ui.config('experimental', 'archivemetatemplate',
100 100 _defaultmetatemplate)
101 101 }
102 102
103 103 out = util.stringio()
104 104
105 105 fm = formatter.formatter(repo.ui, out, 'archive', opts)
106 106 fm.startitem()
107 107 fm.context(ctx=ctx)
108 108 fm.data(root=_rootctx(repo).hex())
109 109
110 110 if ctx.rev() is None:
111 111 dirty = ''
112 112 if ctx.dirty(missing=True):
113 113 dirty = '+'
114 114 fm.data(dirty=dirty)
115 115 fm.end()
116 116
117 117 return out.getvalue()
118 118
119 119 class tarit(object):
120 120 '''write archive to tar file or stream. can write uncompressed,
121 121 or compress with gzip or bzip2.'''
122 122
123 123 class GzipFileWithTime(gzip.GzipFile):
124 124
125 125 def __init__(self, *args, **kw):
126 126 timestamp = None
127 127 if 'timestamp' in kw:
128 128 timestamp = kw.pop(r'timestamp')
129 129 if timestamp is None:
130 130 self.timestamp = time.time()
131 131 else:
132 132 self.timestamp = timestamp
133 133 gzip.GzipFile.__init__(self, *args, **kw)
134 134
135 135 def _write_gzip_header(self):
136 136 self.fileobj.write('\037\213') # magic header
137 137 self.fileobj.write('\010') # compression method
138 138 fname = self.name
139 139 if fname and fname.endswith('.gz'):
140 140 fname = fname[:-3]
141 141 flags = 0
142 142 if fname:
143 143 flags = gzip.FNAME
144 144 self.fileobj.write(chr(flags))
145 145 gzip.write32u(self.fileobj, long(self.timestamp))
146 146 self.fileobj.write('\002')
147 147 self.fileobj.write('\377')
148 148 if fname:
149 149 self.fileobj.write(fname + '\000')
150 150
151 151 def __init__(self, dest, mtime, kind=''):
152 152 self.mtime = mtime
153 153 self.fileobj = None
154 154
155 155 def taropen(mode, name='', fileobj=None):
156 156 if kind == 'gz':
157 mode = mode[0]
157 mode = mode[0:1]
158 158 if not fileobj:
159 159 fileobj = open(name, mode + 'b')
160 160 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
161 161 zlib.Z_BEST_COMPRESSION,
162 162 fileobj, timestamp=mtime)
163 163 self.fileobj = gzfileobj
164 164 return tarfile.TarFile.taropen(name, mode, gzfileobj)
165 165 else:
166 166 return tarfile.open(name, mode + kind, fileobj)
167 167
168 168 if isinstance(dest, str):
169 169 self.z = taropen('w:', name=dest)
170 170 else:
171 171 self.z = taropen('w|', fileobj=dest)
172 172
173 173 def addfile(self, name, mode, islink, data):
174 174 i = tarfile.TarInfo(name)
175 175 i.mtime = self.mtime
176 176 i.size = len(data)
177 177 if islink:
178 178 i.type = tarfile.SYMTYPE
179 179 i.mode = 0o777
180 180 i.linkname = data
181 181 data = None
182 182 i.size = 0
183 183 else:
184 184 i.mode = mode
185 185 data = stringio(data)
186 186 self.z.addfile(i, data)
187 187
188 188 def done(self):
189 189 self.z.close()
190 190 if self.fileobj:
191 191 self.fileobj.close()
192 192
193 193 class tellable(object):
194 194 '''provide tell method for zipfile.ZipFile when writing to http
195 195 response file object.'''
196 196
197 197 def __init__(self, fp):
198 198 self.fp = fp
199 199 self.offset = 0
200 200
201 201 def __getattr__(self, key):
202 202 return getattr(self.fp, key)
203 203
204 204 def write(self, s):
205 205 self.fp.write(s)
206 206 self.offset += len(s)
207 207
208 208 def tell(self):
209 209 return self.offset
210 210
211 211 class zipit(object):
212 212 '''write archive to zip file or stream. can write uncompressed,
213 213 or compressed with deflate.'''
214 214
215 215 def __init__(self, dest, mtime, compress=True):
216 216 if not isinstance(dest, str):
217 217 try:
218 218 dest.tell()
219 219 except (AttributeError, IOError):
220 220 dest = tellable(dest)
221 221 self.z = zipfile.ZipFile(dest, r'w',
222 222 compress and zipfile.ZIP_DEFLATED or
223 223 zipfile.ZIP_STORED)
224 224
225 225 # Python's zipfile module emits deprecation warnings if we try
226 226 # to store files with a date before 1980.
227 227 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
228 228 if mtime < epoch:
229 229 mtime = epoch
230 230
231 231 self.mtime = mtime
232 232 self.date_time = time.gmtime(mtime)[:6]
233 233
234 234 def addfile(self, name, mode, islink, data):
235 235 i = zipfile.ZipInfo(name, self.date_time)
236 236 i.compress_type = self.z.compression
237 237 # unzip will not honor unix file modes unless file creator is
238 238 # set to unix (id 3).
239 239 i.create_system = 3
240 240 ftype = _UNX_IFREG
241 241 if islink:
242 242 mode = 0o777
243 243 ftype = _UNX_IFLNK
244 244 i.external_attr = (mode | ftype) << 16
245 245 # add "extended-timestamp" extra block, because zip archives
246 246 # without this will be extracted with unexpected timestamp,
247 247 # if TZ is not configured as GMT
248 248 i.extra += struct.pack('<hhBl',
249 249 0x5455, # block type: "extended-timestamp"
250 250 1 + 4, # size of this block
251 251 1, # "modification time is present"
252 252 int(self.mtime)) # last modification (UTC)
253 253 self.z.writestr(i, data)
254 254
255 255 def done(self):
256 256 self.z.close()
257 257
258 258 class fileit(object):
259 259 '''write archive as files in directory.'''
260 260
261 261 def __init__(self, name, mtime):
262 262 self.basedir = name
263 263 self.opener = vfsmod.vfs(self.basedir)
264 264 self.mtime = mtime
265 265
266 266 def addfile(self, name, mode, islink, data):
267 267 if islink:
268 268 self.opener.symlink(data, name)
269 269 return
270 270 f = self.opener(name, "w", atomictemp=True)
271 271 f.write(data)
272 272 f.close()
273 273 destfile = os.path.join(self.basedir, name)
274 274 os.chmod(destfile, mode)
275 275 if self.mtime is not None:
276 276 os.utime(destfile, (self.mtime, self.mtime))
277 277
278 278 def done(self):
279 279 pass
280 280
281 281 archivers = {
282 282 'files': fileit,
283 283 'tar': tarit,
284 284 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
285 285 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
286 286 'uzip': lambda name, mtime: zipit(name, mtime, False),
287 287 'zip': zipit,
288 288 }
289 289
290 290 def archive(repo, dest, node, kind, decode=True, matchfn=None,
291 291 prefix='', mtime=None, subrepos=False):
292 292 '''create archive of repo as it was at node.
293 293
294 294 dest can be name of directory, name of archive file, or file
295 295 object to write archive to.
296 296
297 297 kind is type of archive to create.
298 298
299 299 decode tells whether to put files through decode filters from
300 300 hgrc.
301 301
302 302 matchfn is function to filter names of files to write to archive.
303 303
304 304 prefix is name of path to put before every archive member.
305 305
306 306 mtime is the modified time, in seconds, or None to use the changeset time.
307 307
308 308 subrepos tells whether to include subrepos.
309 309 '''
310 310
311 311 if kind == 'files':
312 312 if prefix:
313 313 raise error.Abort(_('cannot give prefix when archiving to files'))
314 314 else:
315 315 prefix = tidyprefix(dest, kind, prefix)
316 316
317 317 def write(name, mode, islink, getdata):
318 318 data = getdata()
319 319 if decode:
320 320 data = repo.wwritedata(name, data)
321 321 archiver.addfile(prefix + name, mode, islink, data)
322 322
323 323 if kind not in archivers:
324 324 raise error.Abort(_("unknown archive type '%s'") % kind)
325 325
326 326 ctx = repo[node]
327 327 archiver = archivers[kind](dest, mtime or ctx.date()[0])
328 328
329 329 if repo.ui.configbool("ui", "archivemeta"):
330 330 name = '.hg_archival.txt'
331 331 if not matchfn or matchfn(name):
332 332 write(name, 0o644, False, lambda: buildmetadata(ctx))
333 333
334 334 if matchfn:
335 335 files = [f for f in ctx.manifest().keys() if matchfn(f)]
336 336 else:
337 337 files = ctx.manifest().keys()
338 338 total = len(files)
339 339 if total:
340 340 files.sort()
341 341 scmutil.fileprefetchhooks(repo, ctx, files)
342 342 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
343 343 for i, f in enumerate(files):
344 344 ff = ctx.flags(f)
345 345 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data)
346 346 repo.ui.progress(_('archiving'), i + 1, item=f,
347 347 unit=_('files'), total=total)
348 348 repo.ui.progress(_('archiving'), None)
349 349
350 350 if subrepos:
351 351 for subpath in sorted(ctx.substate):
352 352 sub = ctx.workingsub(subpath)
353 353 submatch = matchmod.subdirmatcher(subpath, matchfn)
354 354 total += sub.archive(archiver, prefix, submatch, decode)
355 355
356 356 if total == 0:
357 357 raise error.Abort(_('no files match the archive pattern'))
358 358
359 359 archiver.done()
360 360 return total
@@ -1,2903 +1,2903 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import copy
13 13 import difflib
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import posixpath
19 19 import re
20 20 import shutil
21 21 import tempfile
22 22 import zlib
23 23
24 24 from .i18n import _
25 25 from .node import (
26 26 hex,
27 27 short,
28 28 )
29 29 from . import (
30 30 copies,
31 31 encoding,
32 32 error,
33 33 mail,
34 34 mdiff,
35 35 pathutil,
36 36 policy,
37 37 pycompat,
38 38 scmutil,
39 39 similar,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43
44 44 diffhelpers = policy.importmod(r'diffhelpers')
45 45 stringio = util.stringio
46 46
47 47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 49 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50 50
51 51 PatchError = error.PatchError
52 52
53 53 # public functions
54 54
55 55 def split(stream):
56 56 '''return an iterator of individual patches from a stream'''
57 57 def isheader(line, inheader):
58 58 if inheader and line[0] in (' ', '\t'):
59 59 # continuation
60 60 return True
61 61 if line[0] in (' ', '-', '+'):
62 62 # diff line - don't check for header pattern in there
63 63 return False
64 64 l = line.split(': ', 1)
65 65 return len(l) == 2 and ' ' not in l[0]
66 66
67 67 def chunk(lines):
68 68 return stringio(''.join(lines))
69 69
70 70 def hgsplit(stream, cur):
71 71 inheader = True
72 72
73 73 for line in stream:
74 74 if not line.strip():
75 75 inheader = False
76 76 if not inheader and line.startswith('# HG changeset patch'):
77 77 yield chunk(cur)
78 78 cur = []
79 79 inheader = True
80 80
81 81 cur.append(line)
82 82
83 83 if cur:
84 84 yield chunk(cur)
85 85
86 86 def mboxsplit(stream, cur):
87 87 for line in stream:
88 88 if line.startswith('From '):
89 89 for c in split(chunk(cur[1:])):
90 90 yield c
91 91 cur = []
92 92
93 93 cur.append(line)
94 94
95 95 if cur:
96 96 for c in split(chunk(cur[1:])):
97 97 yield c
98 98
99 99 def mimesplit(stream, cur):
100 100 def msgfp(m):
101 101 fp = stringio()
102 102 g = email.Generator.Generator(fp, mangle_from_=False)
103 103 g.flatten(m)
104 104 fp.seek(0)
105 105 return fp
106 106
107 107 for line in stream:
108 108 cur.append(line)
109 109 c = chunk(cur)
110 110
111 111 m = pycompat.emailparser().parse(c)
112 112 if not m.is_multipart():
113 113 yield msgfp(m)
114 114 else:
115 115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 116 for part in m.walk():
117 117 ct = part.get_content_type()
118 118 if ct not in ok_types:
119 119 continue
120 120 yield msgfp(part)
121 121
122 122 def headersplit(stream, cur):
123 123 inheader = False
124 124
125 125 for line in stream:
126 126 if not inheader and isheader(line, inheader):
127 127 yield chunk(cur)
128 128 cur = []
129 129 inheader = True
130 130 if inheader and not isheader(line, inheader):
131 131 inheader = False
132 132
133 133 cur.append(line)
134 134
135 135 if cur:
136 136 yield chunk(cur)
137 137
138 138 def remainder(cur):
139 139 yield chunk(cur)
140 140
141 141 class fiter(object):
142 142 def __init__(self, fp):
143 143 self.fp = fp
144 144
145 145 def __iter__(self):
146 146 return self
147 147
148 148 def next(self):
149 149 l = self.fp.readline()
150 150 if not l:
151 151 raise StopIteration
152 152 return l
153 153
154 154 __next__ = next
155 155
156 156 inheader = False
157 157 cur = []
158 158
159 159 mimeheaders = ['content-type']
160 160
161 161 if not util.safehasattr(stream, 'next'):
162 162 # http responses, for example, have readline but not next
163 163 stream = fiter(stream)
164 164
165 165 for line in stream:
166 166 cur.append(line)
167 167 if line.startswith('# HG changeset patch'):
168 168 return hgsplit(stream, cur)
169 169 elif line.startswith('From '):
170 170 return mboxsplit(stream, cur)
171 171 elif isheader(line, inheader):
172 172 inheader = True
173 173 if line.split(':', 1)[0].lower() in mimeheaders:
174 174 # let email parser handle this
175 175 return mimesplit(stream, cur)
176 176 elif line.startswith('--- ') and inheader:
177 177 # No evil headers seen by diff start, split by hand
178 178 return headersplit(stream, cur)
179 179 # Not enough info, keep reading
180 180
181 181 # if we are here, we have a very plain patch
182 182 return remainder(cur)
183 183
184 184 ## Some facility for extensible patch parsing:
185 185 # list of pairs ("header to match", "data key")
186 186 patchheadermap = [('Date', 'date'),
187 187 ('Branch', 'branch'),
188 188 ('Node ID', 'nodeid'),
189 189 ]
190 190
191 191 def extract(ui, fileobj):
192 192 '''extract patch from data read from fileobj.
193 193
194 194 patch can be a normal patch or contained in an email message.
195 195
196 196 return a dictionary. Standard keys are:
197 197 - filename,
198 198 - message,
199 199 - user,
200 200 - date,
201 201 - branch,
202 202 - node,
203 203 - p1,
204 204 - p2.
205 205 Any item can be missing from the dictionary. If filename is missing,
206 206 fileobj did not contain a patch. Caller must unlink filename when done.'''
207 207
208 208 # attempt to detect the start of a patch
209 209 # (this heuristic is borrowed from quilt)
210 210 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 211 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 212 br'---[ \t].*?^\+\+\+[ \t]|'
213 213 br'\*\*\*[ \t].*?^---[ \t])',
214 214 re.MULTILINE | re.DOTALL)
215 215
216 216 data = {}
217 217 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 218 tmpfp = os.fdopen(fd, pycompat.sysstr('wb'))
219 219 try:
220 220 msg = pycompat.emailparser().parse(fileobj)
221 221
222 222 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 223 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 224 if not subject and not data['user']:
225 225 # Not an email, restore parsed headers if any
226 226 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227 227
228 228 # should try to parse msg['Date']
229 229 parents = []
230 230
231 231 if subject:
232 232 if subject.startswith('[PATCH'):
233 233 pend = subject.find(']')
234 234 if pend >= 0:
235 235 subject = subject[pend + 1:].lstrip()
236 236 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 237 ui.debug('Subject: %s\n' % subject)
238 238 if data['user']:
239 239 ui.debug('From: %s\n' % data['user'])
240 240 diffs_seen = 0
241 241 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 242 message = ''
243 243 for part in msg.walk():
244 244 content_type = pycompat.bytestr(part.get_content_type())
245 245 ui.debug('Content-Type: %s\n' % content_type)
246 246 if content_type not in ok_types:
247 247 continue
248 248 payload = part.get_payload(decode=True)
249 249 m = diffre.search(payload)
250 250 if m:
251 251 hgpatch = False
252 252 hgpatchheader = False
253 253 ignoretext = False
254 254
255 255 ui.debug('found patch at byte %d\n' % m.start(0))
256 256 diffs_seen += 1
257 257 cfp = stringio()
258 258 for line in payload[:m.start(0)].splitlines():
259 259 if line.startswith('# HG changeset patch') and not hgpatch:
260 260 ui.debug('patch generated by hg export\n')
261 261 hgpatch = True
262 262 hgpatchheader = True
263 263 # drop earlier commit message content
264 264 cfp.seek(0)
265 265 cfp.truncate()
266 266 subject = None
267 267 elif hgpatchheader:
268 268 if line.startswith('# User '):
269 269 data['user'] = line[7:]
270 270 ui.debug('From: %s\n' % data['user'])
271 271 elif line.startswith("# Parent "):
272 272 parents.append(line[9:].lstrip())
273 273 elif line.startswith("# "):
274 274 for header, key in patchheadermap:
275 275 prefix = '# %s ' % header
276 276 if line.startswith(prefix):
277 277 data[key] = line[len(prefix):]
278 278 else:
279 279 hgpatchheader = False
280 280 elif line == '---':
281 281 ignoretext = True
282 282 if not hgpatchheader and not ignoretext:
283 283 cfp.write(line)
284 284 cfp.write('\n')
285 285 message = cfp.getvalue()
286 286 if tmpfp:
287 287 tmpfp.write(payload)
288 288 if not payload.endswith('\n'):
289 289 tmpfp.write('\n')
290 290 elif not diffs_seen and message and content_type == 'text/plain':
291 291 message += '\n' + payload
292 292 except: # re-raises
293 293 tmpfp.close()
294 294 os.unlink(tmpname)
295 295 raise
296 296
297 297 if subject and not message.startswith(subject):
298 298 message = '%s\n%s' % (subject, message)
299 299 data['message'] = message
300 300 tmpfp.close()
301 301 if parents:
302 302 data['p1'] = parents.pop(0)
303 303 if parents:
304 304 data['p2'] = parents.pop(0)
305 305
306 306 if diffs_seen:
307 307 data['filename'] = tmpname
308 308 else:
309 309 os.unlink(tmpname)
310 310 return data
311 311
312 312 class patchmeta(object):
313 313 """Patched file metadata
314 314
315 315 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 316 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 317 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 318 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 319 'islink' is True if the file is a symlink and 'isexec' is True if
320 320 the file is executable. Otherwise, 'mode' is None.
321 321 """
322 322 def __init__(self, path):
323 323 self.path = path
324 324 self.oldpath = None
325 325 self.mode = None
326 326 self.op = 'MODIFY'
327 327 self.binary = False
328 328
329 329 def setmode(self, mode):
330 330 islink = mode & 0o20000
331 331 isexec = mode & 0o100
332 332 self.mode = (islink, isexec)
333 333
334 334 def copy(self):
335 335 other = patchmeta(self.path)
336 336 other.oldpath = self.oldpath
337 337 other.mode = self.mode
338 338 other.op = self.op
339 339 other.binary = self.binary
340 340 return other
341 341
342 342 def _ispatchinga(self, afile):
343 343 if afile == '/dev/null':
344 344 return self.op == 'ADD'
345 345 return afile == 'a/' + (self.oldpath or self.path)
346 346
347 347 def _ispatchingb(self, bfile):
348 348 if bfile == '/dev/null':
349 349 return self.op == 'DELETE'
350 350 return bfile == 'b/' + self.path
351 351
352 352 def ispatching(self, afile, bfile):
353 353 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354 354
355 355 def __repr__(self):
356 356 return "<patchmeta %s %r>" % (self.op, self.path)
357 357
358 358 def readgitpatch(lr):
359 359 """extract git-style metadata about patches from <patchname>"""
360 360
361 361 # Filter patch for git information
362 362 gp = None
363 363 gitpatches = []
364 364 for line in lr:
365 365 line = line.rstrip(' \r\n')
366 366 if line.startswith('diff --git a/'):
367 367 m = gitre.match(line)
368 368 if m:
369 369 if gp:
370 370 gitpatches.append(gp)
371 371 dst = m.group(2)
372 372 gp = patchmeta(dst)
373 373 elif gp:
374 374 if line.startswith('--- '):
375 375 gitpatches.append(gp)
376 376 gp = None
377 377 continue
378 378 if line.startswith('rename from '):
379 379 gp.op = 'RENAME'
380 380 gp.oldpath = line[12:]
381 381 elif line.startswith('rename to '):
382 382 gp.path = line[10:]
383 383 elif line.startswith('copy from '):
384 384 gp.op = 'COPY'
385 385 gp.oldpath = line[10:]
386 386 elif line.startswith('copy to '):
387 387 gp.path = line[8:]
388 388 elif line.startswith('deleted file'):
389 389 gp.op = 'DELETE'
390 390 elif line.startswith('new file mode '):
391 391 gp.op = 'ADD'
392 392 gp.setmode(int(line[-6:], 8))
393 393 elif line.startswith('new mode '):
394 394 gp.setmode(int(line[-6:], 8))
395 395 elif line.startswith('GIT binary patch'):
396 396 gp.binary = True
397 397 if gp:
398 398 gitpatches.append(gp)
399 399
400 400 return gitpatches
401 401
402 402 class linereader(object):
403 403 # simple class to allow pushing lines back into the input stream
404 404 def __init__(self, fp):
405 405 self.fp = fp
406 406 self.buf = []
407 407
408 408 def push(self, line):
409 409 if line is not None:
410 410 self.buf.append(line)
411 411
412 412 def readline(self):
413 413 if self.buf:
414 414 l = self.buf[0]
415 415 del self.buf[0]
416 416 return l
417 417 return self.fp.readline()
418 418
419 419 def __iter__(self):
420 420 return iter(self.readline, '')
421 421
422 422 class abstractbackend(object):
423 423 def __init__(self, ui):
424 424 self.ui = ui
425 425
426 426 def getfile(self, fname):
427 427 """Return target file data and flags as a (data, (islink,
428 428 isexec)) tuple. Data is None if file is missing/deleted.
429 429 """
430 430 raise NotImplementedError
431 431
432 432 def setfile(self, fname, data, mode, copysource):
433 433 """Write data to target file fname and set its mode. mode is a
434 434 (islink, isexec) tuple. If data is None, the file content should
435 435 be left unchanged. If the file is modified after being copied,
436 436 copysource is set to the original file name.
437 437 """
438 438 raise NotImplementedError
439 439
440 440 def unlink(self, fname):
441 441 """Unlink target file."""
442 442 raise NotImplementedError
443 443
444 444 def writerej(self, fname, failed, total, lines):
445 445 """Write rejected lines for fname. total is the number of hunks
446 446 which failed to apply and total the total number of hunks for this
447 447 files.
448 448 """
449 449
450 450 def exists(self, fname):
451 451 raise NotImplementedError
452 452
453 453 def close(self):
454 454 raise NotImplementedError
455 455
456 456 class fsbackend(abstractbackend):
457 457 def __init__(self, ui, basedir):
458 458 super(fsbackend, self).__init__(ui)
459 459 self.opener = vfsmod.vfs(basedir)
460 460
461 461 def getfile(self, fname):
462 462 if self.opener.islink(fname):
463 463 return (self.opener.readlink(fname), (True, False))
464 464
465 465 isexec = False
466 466 try:
467 467 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 468 except OSError as e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 try:
472 472 return (self.opener.read(fname), (False, isexec))
473 473 except IOError as e:
474 474 if e.errno != errno.ENOENT:
475 475 raise
476 476 return None, None
477 477
478 478 def setfile(self, fname, data, mode, copysource):
479 479 islink, isexec = mode
480 480 if data is None:
481 481 self.opener.setflags(fname, islink, isexec)
482 482 return
483 483 if islink:
484 484 self.opener.symlink(data, fname)
485 485 else:
486 486 self.opener.write(fname, data)
487 487 if isexec:
488 488 self.opener.setflags(fname, False, True)
489 489
490 490 def unlink(self, fname):
491 491 self.opener.unlinkpath(fname, ignoremissing=True)
492 492
493 493 def writerej(self, fname, failed, total, lines):
494 494 fname = fname + ".rej"
495 495 self.ui.warn(
496 496 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 497 (failed, total, fname))
498 498 fp = self.opener(fname, 'w')
499 499 fp.writelines(lines)
500 500 fp.close()
501 501
502 502 def exists(self, fname):
503 503 return self.opener.lexists(fname)
504 504
505 505 class workingbackend(fsbackend):
506 506 def __init__(self, ui, repo, similarity):
507 507 super(workingbackend, self).__init__(ui, repo.root)
508 508 self.repo = repo
509 509 self.similarity = similarity
510 510 self.removed = set()
511 511 self.changed = set()
512 512 self.copied = []
513 513
514 514 def _checkknown(self, fname):
515 515 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 516 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517 517
518 518 def setfile(self, fname, data, mode, copysource):
519 519 self._checkknown(fname)
520 520 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 521 if copysource is not None:
522 522 self.copied.append((copysource, fname))
523 523 self.changed.add(fname)
524 524
525 525 def unlink(self, fname):
526 526 self._checkknown(fname)
527 527 super(workingbackend, self).unlink(fname)
528 528 self.removed.add(fname)
529 529 self.changed.add(fname)
530 530
531 531 def close(self):
532 532 wctx = self.repo[None]
533 533 changed = set(self.changed)
534 534 for src, dst in self.copied:
535 535 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 536 if self.removed:
537 537 wctx.forget(sorted(self.removed))
538 538 for f in self.removed:
539 539 if f not in self.repo.dirstate:
540 540 # File was deleted and no longer belongs to the
541 541 # dirstate, it was probably marked added then
542 542 # deleted, and should not be considered by
543 543 # marktouched().
544 544 changed.discard(f)
545 545 if changed:
546 546 scmutil.marktouched(self.repo, changed, self.similarity)
547 547 return sorted(self.changed)
548 548
549 549 class filestore(object):
550 550 def __init__(self, maxsize=None):
551 551 self.opener = None
552 552 self.files = {}
553 553 self.created = 0
554 554 self.maxsize = maxsize
555 555 if self.maxsize is None:
556 556 self.maxsize = 4*(2**20)
557 557 self.size = 0
558 558 self.data = {}
559 559
560 560 def setfile(self, fname, data, mode, copied=None):
561 561 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 562 self.data[fname] = (data, mode, copied)
563 563 self.size += len(data)
564 564 else:
565 565 if self.opener is None:
566 566 root = tempfile.mkdtemp(prefix='hg-patch-')
567 567 self.opener = vfsmod.vfs(root)
568 568 # Avoid filename issues with these simple names
569 569 fn = '%d' % self.created
570 570 self.opener.write(fn, data)
571 571 self.created += 1
572 572 self.files[fname] = (fn, mode, copied)
573 573
574 574 def getfile(self, fname):
575 575 if fname in self.data:
576 576 return self.data[fname]
577 577 if not self.opener or fname not in self.files:
578 578 return None, None, None
579 579 fn, mode, copied = self.files[fname]
580 580 return self.opener.read(fn), mode, copied
581 581
582 582 def close(self):
583 583 if self.opener:
584 584 shutil.rmtree(self.opener.base)
585 585
586 586 class repobackend(abstractbackend):
587 587 def __init__(self, ui, repo, ctx, store):
588 588 super(repobackend, self).__init__(ui)
589 589 self.repo = repo
590 590 self.ctx = ctx
591 591 self.store = store
592 592 self.changed = set()
593 593 self.removed = set()
594 594 self.copied = {}
595 595
596 596 def _checkknown(self, fname):
597 597 if fname not in self.ctx:
598 598 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599 599
600 600 def getfile(self, fname):
601 601 try:
602 602 fctx = self.ctx[fname]
603 603 except error.LookupError:
604 604 return None, None
605 605 flags = fctx.flags()
606 606 return fctx.data(), ('l' in flags, 'x' in flags)
607 607
608 608 def setfile(self, fname, data, mode, copysource):
609 609 if copysource:
610 610 self._checkknown(copysource)
611 611 if data is None:
612 612 data = self.ctx[fname].data()
613 613 self.store.setfile(fname, data, mode, copysource)
614 614 self.changed.add(fname)
615 615 if copysource:
616 616 self.copied[fname] = copysource
617 617
618 618 def unlink(self, fname):
619 619 self._checkknown(fname)
620 620 self.removed.add(fname)
621 621
622 622 def exists(self, fname):
623 623 return fname in self.ctx
624 624
625 625 def close(self):
626 626 return self.changed | self.removed
627 627
628 628 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 629 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 630 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 631 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632 632
633 633 class patchfile(object):
634 634 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 635 self.fname = gp.path
636 636 self.eolmode = eolmode
637 637 self.eol = None
638 638 self.backend = backend
639 639 self.ui = ui
640 640 self.lines = []
641 641 self.exists = False
642 642 self.missing = True
643 643 self.mode = gp.mode
644 644 self.copysource = gp.oldpath
645 645 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 646 self.remove = gp.op == 'DELETE'
647 647 if self.copysource is None:
648 648 data, mode = backend.getfile(self.fname)
649 649 else:
650 650 data, mode = store.getfile(self.copysource)[:2]
651 651 if data is not None:
652 652 self.exists = self.copysource is None or backend.exists(self.fname)
653 653 self.missing = False
654 654 if data:
655 655 self.lines = mdiff.splitnewlines(data)
656 656 if self.mode is None:
657 657 self.mode = mode
658 658 if self.lines:
659 659 # Normalize line endings
660 660 if self.lines[0].endswith('\r\n'):
661 661 self.eol = '\r\n'
662 662 elif self.lines[0].endswith('\n'):
663 663 self.eol = '\n'
664 664 if eolmode != 'strict':
665 665 nlines = []
666 666 for l in self.lines:
667 667 if l.endswith('\r\n'):
668 668 l = l[:-2] + '\n'
669 669 nlines.append(l)
670 670 self.lines = nlines
671 671 else:
672 672 if self.create:
673 673 self.missing = False
674 674 if self.mode is None:
675 675 self.mode = (False, False)
676 676 if self.missing:
677 677 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 678 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 679 "current directory)\n"))
680 680
681 681 self.hash = {}
682 682 self.dirty = 0
683 683 self.offset = 0
684 684 self.skew = 0
685 685 self.rej = []
686 686 self.fileprinted = False
687 687 self.printfile(False)
688 688 self.hunks = 0
689 689
690 690 def writelines(self, fname, lines, mode):
691 691 if self.eolmode == 'auto':
692 692 eol = self.eol
693 693 elif self.eolmode == 'crlf':
694 694 eol = '\r\n'
695 695 else:
696 696 eol = '\n'
697 697
698 698 if self.eolmode != 'strict' and eol and eol != '\n':
699 699 rawlines = []
700 700 for l in lines:
701 701 if l and l[-1] == '\n':
702 702 l = l[:-1] + eol
703 703 rawlines.append(l)
704 704 lines = rawlines
705 705
706 706 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707 707
708 708 def printfile(self, warn):
709 709 if self.fileprinted:
710 710 return
711 711 if warn or self.ui.verbose:
712 712 self.fileprinted = True
713 713 s = _("patching file %s\n") % self.fname
714 714 if warn:
715 715 self.ui.warn(s)
716 716 else:
717 717 self.ui.note(s)
718 718
719 719
720 720 def findlines(self, l, linenum):
721 721 # looks through the hash and finds candidate lines. The
722 722 # result is a list of line numbers sorted based on distance
723 723 # from linenum
724 724
725 725 cand = self.hash.get(l, [])
726 726 if len(cand) > 1:
727 727 # resort our list of potentials forward then back.
728 728 cand.sort(key=lambda x: abs(x - linenum))
729 729 return cand
730 730
731 731 def write_rej(self):
732 732 # our rejects are a little different from patch(1). This always
733 733 # creates rejects in the same form as the original patch. A file
734 734 # header is inserted so that you can run the reject through patch again
735 735 # without having to type the filename.
736 736 if not self.rej:
737 737 return
738 738 base = os.path.basename(self.fname)
739 739 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 740 for x in self.rej:
741 741 for l in x.hunk:
742 742 lines.append(l)
743 743 if l[-1:] != '\n':
744 744 lines.append("\n\ No newline at end of file\n")
745 745 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746 746
747 747 def apply(self, h):
748 748 if not h.complete():
749 749 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 750 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 751 h.lenb))
752 752
753 753 self.hunks += 1
754 754
755 755 if self.missing:
756 756 self.rej.append(h)
757 757 return -1
758 758
759 759 if self.exists and self.create:
760 760 if self.copysource:
761 761 self.ui.warn(_("cannot create %s: destination already "
762 762 "exists\n") % self.fname)
763 763 else:
764 764 self.ui.warn(_("file %s already exists\n") % self.fname)
765 765 self.rej.append(h)
766 766 return -1
767 767
768 768 if isinstance(h, binhunk):
769 769 if self.remove:
770 770 self.backend.unlink(self.fname)
771 771 else:
772 772 l = h.new(self.lines)
773 773 self.lines[:] = l
774 774 self.offset += len(l)
775 775 self.dirty = True
776 776 return 0
777 777
778 778 horig = h
779 779 if (self.eolmode in ('crlf', 'lf')
780 780 or self.eolmode == 'auto' and self.eol):
781 781 # If new eols are going to be normalized, then normalize
782 782 # hunk data before patching. Otherwise, preserve input
783 783 # line-endings.
784 784 h = h.getnormalized()
785 785
786 786 # fast case first, no offsets, no fuzz
787 787 old, oldstart, new, newstart = h.fuzzit(0, False)
788 788 oldstart += self.offset
789 789 orig_start = oldstart
790 790 # if there's skew we want to emit the "(offset %d lines)" even
791 791 # when the hunk cleanly applies at start + skew, so skip the
792 792 # fast case code
793 793 if (self.skew == 0 and
794 794 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 795 if self.remove:
796 796 self.backend.unlink(self.fname)
797 797 else:
798 798 self.lines[oldstart:oldstart + len(old)] = new
799 799 self.offset += len(new) - len(old)
800 800 self.dirty = True
801 801 return 0
802 802
803 803 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 804 self.hash = {}
805 805 for x, s in enumerate(self.lines):
806 806 self.hash.setdefault(s, []).append(x)
807 807
808 808 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 809 for toponly in [True, False]:
810 810 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 811 oldstart = oldstart + self.offset + self.skew
812 812 oldstart = min(oldstart, len(self.lines))
813 813 if old:
814 814 cand = self.findlines(old[0][1:], oldstart)
815 815 else:
816 816 # Only adding lines with no or fuzzed context, just
817 817 # take the skew in account
818 818 cand = [oldstart]
819 819
820 820 for l in cand:
821 821 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 822 self.lines[l : l + len(old)] = new
823 823 self.offset += len(new) - len(old)
824 824 self.skew = l - orig_start
825 825 self.dirty = True
826 826 offset = l - orig_start - fuzzlen
827 827 if fuzzlen:
828 828 msg = _("Hunk #%d succeeded at %d "
829 829 "with fuzz %d "
830 830 "(offset %d lines).\n")
831 831 self.printfile(True)
832 832 self.ui.warn(msg %
833 833 (h.number, l + 1, fuzzlen, offset))
834 834 else:
835 835 msg = _("Hunk #%d succeeded at %d "
836 836 "(offset %d lines).\n")
837 837 self.ui.note(msg % (h.number, l + 1, offset))
838 838 return fuzzlen
839 839 self.printfile(True)
840 840 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 841 self.rej.append(horig)
842 842 return -1
843 843
844 844 def close(self):
845 845 if self.dirty:
846 846 self.writelines(self.fname, self.lines, self.mode)
847 847 self.write_rej()
848 848 return len(self.rej)
849 849
850 850 class header(object):
851 851 """patch header
852 852 """
853 853 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 854 diff_re = re.compile('diff -r .* (.*)$')
855 855 allhunks_re = re.compile('(?:index|deleted file) ')
856 856 pretty_re = re.compile('(?:new file|deleted file) ')
857 857 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 858 newfile_re = re.compile('(?:new file)')
859 859
860 860 def __init__(self, header):
861 861 self.header = header
862 862 self.hunks = []
863 863
864 864 def binary(self):
865 865 return any(h.startswith('index ') for h in self.header)
866 866
867 867 def pretty(self, fp):
868 868 for h in self.header:
869 869 if h.startswith('index '):
870 870 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 871 break
872 872 if self.pretty_re.match(h):
873 873 fp.write(h)
874 874 if self.binary():
875 875 fp.write(_('this is a binary file\n'))
876 876 break
877 877 if h.startswith('---'):
878 878 fp.write(_('%d hunks, %d lines changed\n') %
879 879 (len(self.hunks),
880 880 sum([max(h.added, h.removed) for h in self.hunks])))
881 881 break
882 882 fp.write(h)
883 883
884 884 def write(self, fp):
885 885 fp.write(''.join(self.header))
886 886
887 887 def allhunks(self):
888 888 return any(self.allhunks_re.match(h) for h in self.header)
889 889
890 890 def files(self):
891 891 match = self.diffgit_re.match(self.header[0])
892 892 if match:
893 893 fromfile, tofile = match.groups()
894 894 if fromfile == tofile:
895 895 return [fromfile]
896 896 return [fromfile, tofile]
897 897 else:
898 898 return self.diff_re.match(self.header[0]).groups()
899 899
900 900 def filename(self):
901 901 return self.files()[-1]
902 902
903 903 def __repr__(self):
904 904 return '<header %s>' % (' '.join(map(repr, self.files())))
905 905
906 906 def isnewfile(self):
907 907 return any(self.newfile_re.match(h) for h in self.header)
908 908
909 909 def special(self):
910 910 # Special files are shown only at the header level and not at the hunk
911 911 # level for example a file that has been deleted is a special file.
912 912 # The user cannot change the content of the operation, in the case of
913 913 # the deleted file he has to take the deletion or not take it, he
914 914 # cannot take some of it.
915 915 # Newly added files are special if they are empty, they are not special
916 916 # if they have some content as we want to be able to change it
917 917 nocontent = len(self.header) == 2
918 918 emptynewfile = self.isnewfile() and nocontent
919 919 return emptynewfile or \
920 920 any(self.special_re.match(h) for h in self.header)
921 921
922 922 class recordhunk(object):
923 923 """patch hunk
924 924
925 925 XXX shouldn't we merge this with the other hunk class?
926 926 """
927 927
928 928 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 929 maxcontext=None):
930 930 def trimcontext(lines, reverse=False):
931 931 if maxcontext is not None:
932 932 delta = len(lines) - maxcontext
933 933 if delta > 0:
934 934 if reverse:
935 935 return delta, lines[delta:]
936 936 else:
937 937 return delta, lines[:maxcontext]
938 938 return 0, lines
939 939
940 940 self.header = header
941 941 trimedbefore, self.before = trimcontext(before, True)
942 942 self.fromline = fromline + trimedbefore
943 943 self.toline = toline + trimedbefore
944 944 _trimedafter, self.after = trimcontext(after, False)
945 945 self.proc = proc
946 946 self.hunk = hunk
947 947 self.added, self.removed = self.countchanges(self.hunk)
948 948
949 949 def __eq__(self, v):
950 950 if not isinstance(v, recordhunk):
951 951 return False
952 952
953 953 return ((v.hunk == self.hunk) and
954 954 (v.proc == self.proc) and
955 955 (self.fromline == v.fromline) and
956 956 (self.header.files() == v.header.files()))
957 957
958 958 def __hash__(self):
959 959 return hash((tuple(self.hunk),
960 960 tuple(self.header.files()),
961 961 self.fromline,
962 962 self.proc))
963 963
964 964 def countchanges(self, hunk):
965 965 """hunk -> (n+,n-)"""
966 966 add = len([h for h in hunk if h.startswith('+')])
967 967 rem = len([h for h in hunk if h.startswith('-')])
968 968 return add, rem
969 969
970 970 def reversehunk(self):
971 971 """return another recordhunk which is the reverse of the hunk
972 972
973 973 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 974 that, swap fromline/toline and +/- signs while keep other things
975 975 unchanged.
976 976 """
977 977 m = {'+': '-', '-': '+', '\\': '\\'}
978 978 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 979 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 980 self.before, hunk, self.after)
981 981
982 982 def write(self, fp):
983 983 delta = len(self.before) + len(self.after)
984 984 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 985 delta -= 1
986 986 fromlen = delta + self.removed
987 987 tolen = delta + self.added
988 988 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 989 (self.fromline, fromlen, self.toline, tolen,
990 990 self.proc and (' ' + self.proc)))
991 991 fp.write(''.join(self.before + self.hunk + self.after))
992 992
993 993 pretty = write
994 994
995 995 def filename(self):
996 996 return self.header.filename()
997 997
998 998 def __repr__(self):
999 999 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000 1000
1001 1001 def getmessages():
1002 1002 return {
1003 1003 'multiple': {
1004 1004 'apply': _("apply change %d/%d to '%s'?"),
1005 1005 'discard': _("discard change %d/%d to '%s'?"),
1006 1006 'record': _("record change %d/%d to '%s'?"),
1007 1007 },
1008 1008 'single': {
1009 1009 'apply': _("apply this change to '%s'?"),
1010 1010 'discard': _("discard this change to '%s'?"),
1011 1011 'record': _("record this change to '%s'?"),
1012 1012 },
1013 1013 'help': {
1014 1014 'apply': _('[Ynesfdaq?]'
1015 1015 '$$ &Yes, apply this change'
1016 1016 '$$ &No, skip this change'
1017 1017 '$$ &Edit this change manually'
1018 1018 '$$ &Skip remaining changes to this file'
1019 1019 '$$ Apply remaining changes to this &file'
1020 1020 '$$ &Done, skip remaining changes and files'
1021 1021 '$$ Apply &all changes to all remaining files'
1022 1022 '$$ &Quit, applying no changes'
1023 1023 '$$ &? (display help)'),
1024 1024 'discard': _('[Ynesfdaq?]'
1025 1025 '$$ &Yes, discard this change'
1026 1026 '$$ &No, skip this change'
1027 1027 '$$ &Edit this change manually'
1028 1028 '$$ &Skip remaining changes to this file'
1029 1029 '$$ Discard remaining changes to this &file'
1030 1030 '$$ &Done, skip remaining changes and files'
1031 1031 '$$ Discard &all changes to all remaining files'
1032 1032 '$$ &Quit, discarding no changes'
1033 1033 '$$ &? (display help)'),
1034 1034 'record': _('[Ynesfdaq?]'
1035 1035 '$$ &Yes, record this change'
1036 1036 '$$ &No, skip this change'
1037 1037 '$$ &Edit this change manually'
1038 1038 '$$ &Skip remaining changes to this file'
1039 1039 '$$ Record remaining changes to this &file'
1040 1040 '$$ &Done, skip remaining changes and files'
1041 1041 '$$ Record &all changes to all remaining files'
1042 1042 '$$ &Quit, recording no changes'
1043 1043 '$$ &? (display help)'),
1044 1044 }
1045 1045 }
1046 1046
1047 1047 def filterpatch(ui, headers, operation=None):
1048 1048 """Interactively filter patch chunks into applied-only chunks"""
1049 1049 messages = getmessages()
1050 1050
1051 1051 if operation is None:
1052 1052 operation = 'record'
1053 1053
1054 1054 def prompt(skipfile, skipall, query, chunk):
1055 1055 """prompt query, and process base inputs
1056 1056
1057 1057 - y/n for the rest of file
1058 1058 - y/n for the rest
1059 1059 - ? (help)
1060 1060 - q (quit)
1061 1061
1062 1062 Return True/False and possibly updated skipfile and skipall.
1063 1063 """
1064 1064 newpatches = None
1065 1065 if skipall is not None:
1066 1066 return skipall, skipfile, skipall, newpatches
1067 1067 if skipfile is not None:
1068 1068 return skipfile, skipfile, skipall, newpatches
1069 1069 while True:
1070 1070 resps = messages['help'][operation]
1071 1071 r = ui.promptchoice("%s %s" % (query, resps))
1072 1072 ui.write("\n")
1073 1073 if r == 8: # ?
1074 1074 for c, t in ui.extractchoices(resps)[1]:
1075 1075 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 1076 continue
1077 1077 elif r == 0: # yes
1078 1078 ret = True
1079 1079 elif r == 1: # no
1080 1080 ret = False
1081 1081 elif r == 2: # Edit patch
1082 1082 if chunk is None:
1083 1083 ui.write(_('cannot edit patch for whole file'))
1084 1084 ui.write("\n")
1085 1085 continue
1086 1086 if chunk.header.binary():
1087 1087 ui.write(_('cannot edit patch for binary file'))
1088 1088 ui.write("\n")
1089 1089 continue
1090 1090 # Patch comment based on the Git one (based on comment at end of
1091 1091 # https://mercurial-scm.org/wiki/RecordExtension)
1092 1092 phelp = '---' + _("""
1093 1093 To remove '-' lines, make them ' ' lines (context).
1094 1094 To remove '+' lines, delete them.
1095 1095 Lines starting with # will be removed from the patch.
1096 1096
1097 1097 If the patch applies cleanly, the edited hunk will immediately be
1098 1098 added to the record list. If it does not apply cleanly, a rejects
1099 1099 file will be generated: you can use that when you try again. If
1100 1100 all lines of the hunk are removed, then the edit is aborted and
1101 1101 the hunk is left unchanged.
1102 1102 """)
1103 1103 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 1104 suffix=".diff", text=True)
1105 1105 ncpatchfp = None
1106 1106 try:
1107 1107 # Write the initial patch
1108 1108 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1109 1109 chunk.header.write(f)
1110 1110 chunk.write(f)
1111 1111 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 1112 f.close()
1113 1113 # Start the editor and wait for it to complete
1114 1114 editor = ui.geteditor()
1115 1115 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 1116 environ={'HGUSER': ui.username()},
1117 1117 blockedtag='filterpatch')
1118 1118 if ret != 0:
1119 1119 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 1120 continue
1121 1121 # Remove comment lines
1122 1122 patchfp = open(patchfn)
1123 1123 ncpatchfp = stringio()
1124 1124 for line in util.iterfile(patchfp):
1125 1125 if not line.startswith('#'):
1126 1126 ncpatchfp.write(line)
1127 1127 patchfp.close()
1128 1128 ncpatchfp.seek(0)
1129 1129 newpatches = parsepatch(ncpatchfp)
1130 1130 finally:
1131 1131 os.unlink(patchfn)
1132 1132 del ncpatchfp
1133 1133 # Signal that the chunk shouldn't be applied as-is, but
1134 1134 # provide the new patch to be used instead.
1135 1135 ret = False
1136 1136 elif r == 3: # Skip
1137 1137 ret = skipfile = False
1138 1138 elif r == 4: # file (Record remaining)
1139 1139 ret = skipfile = True
1140 1140 elif r == 5: # done, skip remaining
1141 1141 ret = skipall = False
1142 1142 elif r == 6: # all
1143 1143 ret = skipall = True
1144 1144 elif r == 7: # quit
1145 1145 raise error.Abort(_('user quit'))
1146 1146 return ret, skipfile, skipall, newpatches
1147 1147
1148 1148 seen = set()
1149 1149 applied = {} # 'filename' -> [] of chunks
1150 1150 skipfile, skipall = None, None
1151 1151 pos, total = 1, sum(len(h.hunks) for h in headers)
1152 1152 for h in headers:
1153 1153 pos += len(h.hunks)
1154 1154 skipfile = None
1155 1155 fixoffset = 0
1156 1156 hdr = ''.join(h.header)
1157 1157 if hdr in seen:
1158 1158 continue
1159 1159 seen.add(hdr)
1160 1160 if skipall is None:
1161 1161 h.pretty(ui)
1162 1162 msg = (_('examine changes to %s?') %
1163 1163 _(' and ').join("'%s'" % f for f in h.files()))
1164 1164 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1165 1165 if not r:
1166 1166 continue
1167 1167 applied[h.filename()] = [h]
1168 1168 if h.allhunks():
1169 1169 applied[h.filename()] += h.hunks
1170 1170 continue
1171 1171 for i, chunk in enumerate(h.hunks):
1172 1172 if skipfile is None and skipall is None:
1173 1173 chunk.pretty(ui)
1174 1174 if total == 1:
1175 1175 msg = messages['single'][operation] % chunk.filename()
1176 1176 else:
1177 1177 idx = pos - len(h.hunks) + i
1178 1178 msg = messages['multiple'][operation] % (idx, total,
1179 1179 chunk.filename())
1180 1180 r, skipfile, skipall, newpatches = prompt(skipfile,
1181 1181 skipall, msg, chunk)
1182 1182 if r:
1183 1183 if fixoffset:
1184 1184 chunk = copy.copy(chunk)
1185 1185 chunk.toline += fixoffset
1186 1186 applied[chunk.filename()].append(chunk)
1187 1187 elif newpatches is not None:
1188 1188 for newpatch in newpatches:
1189 1189 for newhunk in newpatch.hunks:
1190 1190 if fixoffset:
1191 1191 newhunk.toline += fixoffset
1192 1192 applied[newhunk.filename()].append(newhunk)
1193 1193 else:
1194 1194 fixoffset += chunk.removed - chunk.added
1195 1195 return (sum([h for h in applied.itervalues()
1196 1196 if h[0].special() or len(h) > 1], []), {})
1197 1197 class hunk(object):
1198 1198 def __init__(self, desc, num, lr, context):
1199 1199 self.number = num
1200 1200 self.desc = desc
1201 1201 self.hunk = [desc]
1202 1202 self.a = []
1203 1203 self.b = []
1204 1204 self.starta = self.lena = None
1205 1205 self.startb = self.lenb = None
1206 1206 if lr is not None:
1207 1207 if context:
1208 1208 self.read_context_hunk(lr)
1209 1209 else:
1210 1210 self.read_unified_hunk(lr)
1211 1211
1212 1212 def getnormalized(self):
1213 1213 """Return a copy with line endings normalized to LF."""
1214 1214
1215 1215 def normalize(lines):
1216 1216 nlines = []
1217 1217 for line in lines:
1218 1218 if line.endswith('\r\n'):
1219 1219 line = line[:-2] + '\n'
1220 1220 nlines.append(line)
1221 1221 return nlines
1222 1222
1223 1223 # Dummy object, it is rebuilt manually
1224 1224 nh = hunk(self.desc, self.number, None, None)
1225 1225 nh.number = self.number
1226 1226 nh.desc = self.desc
1227 1227 nh.hunk = self.hunk
1228 1228 nh.a = normalize(self.a)
1229 1229 nh.b = normalize(self.b)
1230 1230 nh.starta = self.starta
1231 1231 nh.startb = self.startb
1232 1232 nh.lena = self.lena
1233 1233 nh.lenb = self.lenb
1234 1234 return nh
1235 1235
1236 1236 def read_unified_hunk(self, lr):
1237 1237 m = unidesc.match(self.desc)
1238 1238 if not m:
1239 1239 raise PatchError(_("bad hunk #%d") % self.number)
1240 1240 self.starta, self.lena, self.startb, self.lenb = m.groups()
1241 1241 if self.lena is None:
1242 1242 self.lena = 1
1243 1243 else:
1244 1244 self.lena = int(self.lena)
1245 1245 if self.lenb is None:
1246 1246 self.lenb = 1
1247 1247 else:
1248 1248 self.lenb = int(self.lenb)
1249 1249 self.starta = int(self.starta)
1250 1250 self.startb = int(self.startb)
1251 1251 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1252 1252 self.b)
1253 1253 # if we hit eof before finishing out the hunk, the last line will
1254 1254 # be zero length. Lets try to fix it up.
1255 1255 while len(self.hunk[-1]) == 0:
1256 1256 del self.hunk[-1]
1257 1257 del self.a[-1]
1258 1258 del self.b[-1]
1259 1259 self.lena -= 1
1260 1260 self.lenb -= 1
1261 1261 self._fixnewline(lr)
1262 1262
1263 1263 def read_context_hunk(self, lr):
1264 1264 self.desc = lr.readline()
1265 1265 m = contextdesc.match(self.desc)
1266 1266 if not m:
1267 1267 raise PatchError(_("bad hunk #%d") % self.number)
1268 1268 self.starta, aend = m.groups()
1269 1269 self.starta = int(self.starta)
1270 1270 if aend is None:
1271 1271 aend = self.starta
1272 1272 self.lena = int(aend) - self.starta
1273 1273 if self.starta:
1274 1274 self.lena += 1
1275 1275 for x in xrange(self.lena):
1276 1276 l = lr.readline()
1277 1277 if l.startswith('---'):
1278 1278 # lines addition, old block is empty
1279 1279 lr.push(l)
1280 1280 break
1281 1281 s = l[2:]
1282 1282 if l.startswith('- ') or l.startswith('! '):
1283 1283 u = '-' + s
1284 1284 elif l.startswith(' '):
1285 1285 u = ' ' + s
1286 1286 else:
1287 1287 raise PatchError(_("bad hunk #%d old text line %d") %
1288 1288 (self.number, x))
1289 1289 self.a.append(u)
1290 1290 self.hunk.append(u)
1291 1291
1292 1292 l = lr.readline()
1293 1293 if l.startswith('\ '):
1294 1294 s = self.a[-1][:-1]
1295 1295 self.a[-1] = s
1296 1296 self.hunk[-1] = s
1297 1297 l = lr.readline()
1298 1298 m = contextdesc.match(l)
1299 1299 if not m:
1300 1300 raise PatchError(_("bad hunk #%d") % self.number)
1301 1301 self.startb, bend = m.groups()
1302 1302 self.startb = int(self.startb)
1303 1303 if bend is None:
1304 1304 bend = self.startb
1305 1305 self.lenb = int(bend) - self.startb
1306 1306 if self.startb:
1307 1307 self.lenb += 1
1308 1308 hunki = 1
1309 1309 for x in xrange(self.lenb):
1310 1310 l = lr.readline()
1311 1311 if l.startswith('\ '):
1312 1312 # XXX: the only way to hit this is with an invalid line range.
1313 1313 # The no-eol marker is not counted in the line range, but I
1314 1314 # guess there are diff(1) out there which behave differently.
1315 1315 s = self.b[-1][:-1]
1316 1316 self.b[-1] = s
1317 1317 self.hunk[hunki - 1] = s
1318 1318 continue
1319 1319 if not l:
1320 1320 # line deletions, new block is empty and we hit EOF
1321 1321 lr.push(l)
1322 1322 break
1323 1323 s = l[2:]
1324 1324 if l.startswith('+ ') or l.startswith('! '):
1325 1325 u = '+' + s
1326 1326 elif l.startswith(' '):
1327 1327 u = ' ' + s
1328 1328 elif len(self.b) == 0:
1329 1329 # line deletions, new block is empty
1330 1330 lr.push(l)
1331 1331 break
1332 1332 else:
1333 1333 raise PatchError(_("bad hunk #%d old text line %d") %
1334 1334 (self.number, x))
1335 1335 self.b.append(s)
1336 1336 while True:
1337 1337 if hunki >= len(self.hunk):
1338 1338 h = ""
1339 1339 else:
1340 1340 h = self.hunk[hunki]
1341 1341 hunki += 1
1342 1342 if h == u:
1343 1343 break
1344 1344 elif h.startswith('-'):
1345 1345 continue
1346 1346 else:
1347 1347 self.hunk.insert(hunki - 1, u)
1348 1348 break
1349 1349
1350 1350 if not self.a:
1351 1351 # this happens when lines were only added to the hunk
1352 1352 for x in self.hunk:
1353 1353 if x.startswith('-') or x.startswith(' '):
1354 1354 self.a.append(x)
1355 1355 if not self.b:
1356 1356 # this happens when lines were only deleted from the hunk
1357 1357 for x in self.hunk:
1358 1358 if x.startswith('+') or x.startswith(' '):
1359 1359 self.b.append(x[1:])
1360 1360 # @@ -start,len +start,len @@
1361 1361 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1362 1362 self.startb, self.lenb)
1363 1363 self.hunk[0] = self.desc
1364 1364 self._fixnewline(lr)
1365 1365
1366 1366 def _fixnewline(self, lr):
1367 1367 l = lr.readline()
1368 1368 if l.startswith('\ '):
1369 1369 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1370 1370 else:
1371 1371 lr.push(l)
1372 1372
1373 1373 def complete(self):
1374 1374 return len(self.a) == self.lena and len(self.b) == self.lenb
1375 1375
1376 1376 def _fuzzit(self, old, new, fuzz, toponly):
1377 1377 # this removes context lines from the top and bottom of list 'l'. It
1378 1378 # checks the hunk to make sure only context lines are removed, and then
1379 1379 # returns a new shortened list of lines.
1380 1380 fuzz = min(fuzz, len(old))
1381 1381 if fuzz:
1382 1382 top = 0
1383 1383 bot = 0
1384 1384 hlen = len(self.hunk)
1385 1385 for x in xrange(hlen - 1):
1386 1386 # the hunk starts with the @@ line, so use x+1
1387 1387 if self.hunk[x + 1][0] == ' ':
1388 1388 top += 1
1389 1389 else:
1390 1390 break
1391 1391 if not toponly:
1392 1392 for x in xrange(hlen - 1):
1393 1393 if self.hunk[hlen - bot - 1][0] == ' ':
1394 1394 bot += 1
1395 1395 else:
1396 1396 break
1397 1397
1398 1398 bot = min(fuzz, bot)
1399 1399 top = min(fuzz, top)
1400 1400 return old[top:len(old) - bot], new[top:len(new) - bot], top
1401 1401 return old, new, 0
1402 1402
1403 1403 def fuzzit(self, fuzz, toponly):
1404 1404 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1405 1405 oldstart = self.starta + top
1406 1406 newstart = self.startb + top
1407 1407 # zero length hunk ranges already have their start decremented
1408 1408 if self.lena and oldstart > 0:
1409 1409 oldstart -= 1
1410 1410 if self.lenb and newstart > 0:
1411 1411 newstart -= 1
1412 1412 return old, oldstart, new, newstart
1413 1413
1414 1414 class binhunk(object):
1415 1415 'A binary patch file.'
1416 1416 def __init__(self, lr, fname):
1417 1417 self.text = None
1418 1418 self.delta = False
1419 1419 self.hunk = ['GIT binary patch\n']
1420 1420 self._fname = fname
1421 1421 self._read(lr)
1422 1422
1423 1423 def complete(self):
1424 1424 return self.text is not None
1425 1425
1426 1426 def new(self, lines):
1427 1427 if self.delta:
1428 1428 return [applybindelta(self.text, ''.join(lines))]
1429 1429 return [self.text]
1430 1430
1431 1431 def _read(self, lr):
1432 1432 def getline(lr, hunk):
1433 1433 l = lr.readline()
1434 1434 hunk.append(l)
1435 1435 return l.rstrip('\r\n')
1436 1436
1437 1437 size = 0
1438 1438 while True:
1439 1439 line = getline(lr, self.hunk)
1440 1440 if not line:
1441 1441 raise PatchError(_('could not extract "%s" binary data')
1442 1442 % self._fname)
1443 1443 if line.startswith('literal '):
1444 1444 size = int(line[8:].rstrip())
1445 1445 break
1446 1446 if line.startswith('delta '):
1447 1447 size = int(line[6:].rstrip())
1448 1448 self.delta = True
1449 1449 break
1450 1450 dec = []
1451 1451 line = getline(lr, self.hunk)
1452 1452 while len(line) > 1:
1453 1453 l = line[0:1]
1454 1454 if l <= 'Z' and l >= 'A':
1455 1455 l = ord(l) - ord('A') + 1
1456 1456 else:
1457 1457 l = ord(l) - ord('a') + 27
1458 1458 try:
1459 1459 dec.append(util.b85decode(line[1:])[:l])
1460 1460 except ValueError as e:
1461 1461 raise PatchError(_('could not decode "%s" binary patch: %s')
1462 1462 % (self._fname, util.forcebytestr(e)))
1463 1463 line = getline(lr, self.hunk)
1464 1464 text = zlib.decompress(''.join(dec))
1465 1465 if len(text) != size:
1466 1466 raise PatchError(_('"%s" length is %d bytes, should be %d')
1467 1467 % (self._fname, len(text), size))
1468 1468 self.text = text
1469 1469
1470 1470 def parsefilename(str):
1471 1471 # --- filename \t|space stuff
1472 1472 s = str[4:].rstrip('\r\n')
1473 1473 i = s.find('\t')
1474 1474 if i < 0:
1475 1475 i = s.find(' ')
1476 1476 if i < 0:
1477 1477 return s
1478 1478 return s[:i]
1479 1479
1480 1480 def reversehunks(hunks):
1481 1481 '''reverse the signs in the hunks given as argument
1482 1482
1483 1483 This function operates on hunks coming out of patch.filterpatch, that is
1484 1484 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1485 1485
1486 1486 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1487 1487 ... --- a/folder1/g
1488 1488 ... +++ b/folder1/g
1489 1489 ... @@ -1,7 +1,7 @@
1490 1490 ... +firstline
1491 1491 ... c
1492 1492 ... 1
1493 1493 ... 2
1494 1494 ... + 3
1495 1495 ... -4
1496 1496 ... 5
1497 1497 ... d
1498 1498 ... +lastline"""
1499 1499 >>> hunks = parsepatch([rawpatch])
1500 1500 >>> hunkscomingfromfilterpatch = []
1501 1501 >>> for h in hunks:
1502 1502 ... hunkscomingfromfilterpatch.append(h)
1503 1503 ... hunkscomingfromfilterpatch.extend(h.hunks)
1504 1504
1505 1505 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1506 1506 >>> from . import util
1507 1507 >>> fp = util.stringio()
1508 1508 >>> for c in reversedhunks:
1509 1509 ... c.write(fp)
1510 1510 >>> fp.seek(0) or None
1511 1511 >>> reversedpatch = fp.read()
1512 1512 >>> print(pycompat.sysstr(reversedpatch))
1513 1513 diff --git a/folder1/g b/folder1/g
1514 1514 --- a/folder1/g
1515 1515 +++ b/folder1/g
1516 1516 @@ -1,4 +1,3 @@
1517 1517 -firstline
1518 1518 c
1519 1519 1
1520 1520 2
1521 1521 @@ -2,6 +1,6 @@
1522 1522 c
1523 1523 1
1524 1524 2
1525 1525 - 3
1526 1526 +4
1527 1527 5
1528 1528 d
1529 1529 @@ -6,3 +5,2 @@
1530 1530 5
1531 1531 d
1532 1532 -lastline
1533 1533
1534 1534 '''
1535 1535
1536 1536 newhunks = []
1537 1537 for c in hunks:
1538 1538 if util.safehasattr(c, 'reversehunk'):
1539 1539 c = c.reversehunk()
1540 1540 newhunks.append(c)
1541 1541 return newhunks
1542 1542
1543 1543 def parsepatch(originalchunks, maxcontext=None):
1544 1544 """patch -> [] of headers -> [] of hunks
1545 1545
1546 1546 If maxcontext is not None, trim context lines if necessary.
1547 1547
1548 1548 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1549 1549 ... --- a/folder1/g
1550 1550 ... +++ b/folder1/g
1551 1551 ... @@ -1,8 +1,10 @@
1552 1552 ... 1
1553 1553 ... 2
1554 1554 ... -3
1555 1555 ... 4
1556 1556 ... 5
1557 1557 ... 6
1558 1558 ... +6.1
1559 1559 ... +6.2
1560 1560 ... 7
1561 1561 ... 8
1562 1562 ... +9'''
1563 1563 >>> out = util.stringio()
1564 1564 >>> headers = parsepatch([rawpatch], maxcontext=1)
1565 1565 >>> for header in headers:
1566 1566 ... header.write(out)
1567 1567 ... for hunk in header.hunks:
1568 1568 ... hunk.write(out)
1569 1569 >>> print(pycompat.sysstr(out.getvalue()))
1570 1570 diff --git a/folder1/g b/folder1/g
1571 1571 --- a/folder1/g
1572 1572 +++ b/folder1/g
1573 1573 @@ -2,3 +2,2 @@
1574 1574 2
1575 1575 -3
1576 1576 4
1577 1577 @@ -6,2 +5,4 @@
1578 1578 6
1579 1579 +6.1
1580 1580 +6.2
1581 1581 7
1582 1582 @@ -8,1 +9,2 @@
1583 1583 8
1584 1584 +9
1585 1585 """
1586 1586 class parser(object):
1587 1587 """patch parsing state machine"""
1588 1588 def __init__(self):
1589 1589 self.fromline = 0
1590 1590 self.toline = 0
1591 1591 self.proc = ''
1592 1592 self.header = None
1593 1593 self.context = []
1594 1594 self.before = []
1595 1595 self.hunk = []
1596 1596 self.headers = []
1597 1597
1598 1598 def addrange(self, limits):
1599 1599 fromstart, fromend, tostart, toend, proc = limits
1600 1600 self.fromline = int(fromstart)
1601 1601 self.toline = int(tostart)
1602 1602 self.proc = proc
1603 1603
1604 1604 def addcontext(self, context):
1605 1605 if self.hunk:
1606 1606 h = recordhunk(self.header, self.fromline, self.toline,
1607 1607 self.proc, self.before, self.hunk, context, maxcontext)
1608 1608 self.header.hunks.append(h)
1609 1609 self.fromline += len(self.before) + h.removed
1610 1610 self.toline += len(self.before) + h.added
1611 1611 self.before = []
1612 1612 self.hunk = []
1613 1613 self.context = context
1614 1614
1615 1615 def addhunk(self, hunk):
1616 1616 if self.context:
1617 1617 self.before = self.context
1618 1618 self.context = []
1619 1619 self.hunk = hunk
1620 1620
1621 1621 def newfile(self, hdr):
1622 1622 self.addcontext([])
1623 1623 h = header(hdr)
1624 1624 self.headers.append(h)
1625 1625 self.header = h
1626 1626
1627 1627 def addother(self, line):
1628 1628 pass # 'other' lines are ignored
1629 1629
1630 1630 def finished(self):
1631 1631 self.addcontext([])
1632 1632 return self.headers
1633 1633
1634 1634 transitions = {
1635 1635 'file': {'context': addcontext,
1636 1636 'file': newfile,
1637 1637 'hunk': addhunk,
1638 1638 'range': addrange},
1639 1639 'context': {'file': newfile,
1640 1640 'hunk': addhunk,
1641 1641 'range': addrange,
1642 1642 'other': addother},
1643 1643 'hunk': {'context': addcontext,
1644 1644 'file': newfile,
1645 1645 'range': addrange},
1646 1646 'range': {'context': addcontext,
1647 1647 'hunk': addhunk},
1648 1648 'other': {'other': addother},
1649 1649 }
1650 1650
1651 1651 p = parser()
1652 1652 fp = stringio()
1653 1653 fp.write(''.join(originalchunks))
1654 1654 fp.seek(0)
1655 1655
1656 1656 state = 'context'
1657 1657 for newstate, data in scanpatch(fp):
1658 1658 try:
1659 1659 p.transitions[state][newstate](p, data)
1660 1660 except KeyError:
1661 1661 raise PatchError('unhandled transition: %s -> %s' %
1662 1662 (state, newstate))
1663 1663 state = newstate
1664 1664 del fp
1665 1665 return p.finished()
1666 1666
1667 1667 def pathtransform(path, strip, prefix):
1668 1668 '''turn a path from a patch into a path suitable for the repository
1669 1669
1670 1670 prefix, if not empty, is expected to be normalized with a / at the end.
1671 1671
1672 1672 Returns (stripped components, path in repository).
1673 1673
1674 1674 >>> pathtransform(b'a/b/c', 0, b'')
1675 1675 ('', 'a/b/c')
1676 1676 >>> pathtransform(b' a/b/c ', 0, b'')
1677 1677 ('', ' a/b/c')
1678 1678 >>> pathtransform(b' a/b/c ', 2, b'')
1679 1679 ('a/b/', 'c')
1680 1680 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1681 1681 ('', 'd/e/a/b/c')
1682 1682 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1683 1683 ('a//b/', 'd/e/c')
1684 1684 >>> pathtransform(b'a/b/c', 3, b'')
1685 1685 Traceback (most recent call last):
1686 1686 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1687 1687 '''
1688 1688 pathlen = len(path)
1689 1689 i = 0
1690 1690 if strip == 0:
1691 1691 return '', prefix + path.rstrip()
1692 1692 count = strip
1693 1693 while count > 0:
1694 1694 i = path.find('/', i)
1695 1695 if i == -1:
1696 1696 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1697 1697 (count, strip, path))
1698 1698 i += 1
1699 1699 # consume '//' in the path
1700 1700 while i < pathlen - 1 and path[i:i + 1] == '/':
1701 1701 i += 1
1702 1702 count -= 1
1703 1703 return path[:i].lstrip(), prefix + path[i:].rstrip()
1704 1704
1705 1705 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1706 1706 nulla = afile_orig == "/dev/null"
1707 1707 nullb = bfile_orig == "/dev/null"
1708 1708 create = nulla and hunk.starta == 0 and hunk.lena == 0
1709 1709 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1710 1710 abase, afile = pathtransform(afile_orig, strip, prefix)
1711 1711 gooda = not nulla and backend.exists(afile)
1712 1712 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1713 1713 if afile == bfile:
1714 1714 goodb = gooda
1715 1715 else:
1716 1716 goodb = not nullb and backend.exists(bfile)
1717 1717 missing = not goodb and not gooda and not create
1718 1718
1719 1719 # some diff programs apparently produce patches where the afile is
1720 1720 # not /dev/null, but afile starts with bfile
1721 1721 abasedir = afile[:afile.rfind('/') + 1]
1722 1722 bbasedir = bfile[:bfile.rfind('/') + 1]
1723 1723 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1724 1724 and hunk.starta == 0 and hunk.lena == 0):
1725 1725 create = True
1726 1726 missing = False
1727 1727
1728 1728 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1729 1729 # diff is between a file and its backup. In this case, the original
1730 1730 # file should be patched (see original mpatch code).
1731 1731 isbackup = (abase == bbase and bfile.startswith(afile))
1732 1732 fname = None
1733 1733 if not missing:
1734 1734 if gooda and goodb:
1735 1735 if isbackup:
1736 1736 fname = afile
1737 1737 else:
1738 1738 fname = bfile
1739 1739 elif gooda:
1740 1740 fname = afile
1741 1741
1742 1742 if not fname:
1743 1743 if not nullb:
1744 1744 if isbackup:
1745 1745 fname = afile
1746 1746 else:
1747 1747 fname = bfile
1748 1748 elif not nulla:
1749 1749 fname = afile
1750 1750 else:
1751 1751 raise PatchError(_("undefined source and destination files"))
1752 1752
1753 1753 gp = patchmeta(fname)
1754 1754 if create:
1755 1755 gp.op = 'ADD'
1756 1756 elif remove:
1757 1757 gp.op = 'DELETE'
1758 1758 return gp
1759 1759
1760 1760 def scanpatch(fp):
1761 1761 """like patch.iterhunks, but yield different events
1762 1762
1763 1763 - ('file', [header_lines + fromfile + tofile])
1764 1764 - ('context', [context_lines])
1765 1765 - ('hunk', [hunk_lines])
1766 1766 - ('range', (-start,len, +start,len, proc))
1767 1767 """
1768 1768 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1769 1769 lr = linereader(fp)
1770 1770
1771 1771 def scanwhile(first, p):
1772 1772 """scan lr while predicate holds"""
1773 1773 lines = [first]
1774 1774 for line in iter(lr.readline, ''):
1775 1775 if p(line):
1776 1776 lines.append(line)
1777 1777 else:
1778 1778 lr.push(line)
1779 1779 break
1780 1780 return lines
1781 1781
1782 1782 for line in iter(lr.readline, ''):
1783 1783 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1784 1784 def notheader(line):
1785 1785 s = line.split(None, 1)
1786 1786 return not s or s[0] not in ('---', 'diff')
1787 1787 header = scanwhile(line, notheader)
1788 1788 fromfile = lr.readline()
1789 1789 if fromfile.startswith('---'):
1790 1790 tofile = lr.readline()
1791 1791 header += [fromfile, tofile]
1792 1792 else:
1793 1793 lr.push(fromfile)
1794 1794 yield 'file', header
1795 1795 elif line[0:1] == ' ':
1796 1796 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1797 1797 elif line[0] in '-+':
1798 1798 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1799 1799 else:
1800 1800 m = lines_re.match(line)
1801 1801 if m:
1802 1802 yield 'range', m.groups()
1803 1803 else:
1804 1804 yield 'other', line
1805 1805
1806 1806 def scangitpatch(lr, firstline):
1807 1807 """
1808 1808 Git patches can emit:
1809 1809 - rename a to b
1810 1810 - change b
1811 1811 - copy a to c
1812 1812 - change c
1813 1813
1814 1814 We cannot apply this sequence as-is, the renamed 'a' could not be
1815 1815 found for it would have been renamed already. And we cannot copy
1816 1816 from 'b' instead because 'b' would have been changed already. So
1817 1817 we scan the git patch for copy and rename commands so we can
1818 1818 perform the copies ahead of time.
1819 1819 """
1820 1820 pos = 0
1821 1821 try:
1822 1822 pos = lr.fp.tell()
1823 1823 fp = lr.fp
1824 1824 except IOError:
1825 1825 fp = stringio(lr.fp.read())
1826 1826 gitlr = linereader(fp)
1827 1827 gitlr.push(firstline)
1828 1828 gitpatches = readgitpatch(gitlr)
1829 1829 fp.seek(pos)
1830 1830 return gitpatches
1831 1831
1832 1832 def iterhunks(fp):
1833 1833 """Read a patch and yield the following events:
1834 1834 - ("file", afile, bfile, firsthunk): select a new target file.
1835 1835 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1836 1836 "file" event.
1837 1837 - ("git", gitchanges): current diff is in git format, gitchanges
1838 1838 maps filenames to gitpatch records. Unique event.
1839 1839 """
1840 1840 afile = ""
1841 1841 bfile = ""
1842 1842 state = None
1843 1843 hunknum = 0
1844 1844 emitfile = newfile = False
1845 1845 gitpatches = None
1846 1846
1847 1847 # our states
1848 1848 BFILE = 1
1849 1849 context = None
1850 1850 lr = linereader(fp)
1851 1851
1852 1852 for x in iter(lr.readline, ''):
1853 1853 if state == BFILE and (
1854 1854 (not context and x[0] == '@')
1855 1855 or (context is not False and x.startswith('***************'))
1856 1856 or x.startswith('GIT binary patch')):
1857 1857 gp = None
1858 1858 if (gitpatches and
1859 1859 gitpatches[-1].ispatching(afile, bfile)):
1860 1860 gp = gitpatches.pop()
1861 1861 if x.startswith('GIT binary patch'):
1862 1862 h = binhunk(lr, gp.path)
1863 1863 else:
1864 1864 if context is None and x.startswith('***************'):
1865 1865 context = True
1866 1866 h = hunk(x, hunknum + 1, lr, context)
1867 1867 hunknum += 1
1868 1868 if emitfile:
1869 1869 emitfile = False
1870 1870 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1871 1871 yield 'hunk', h
1872 1872 elif x.startswith('diff --git a/'):
1873 1873 m = gitre.match(x.rstrip(' \r\n'))
1874 1874 if not m:
1875 1875 continue
1876 1876 if gitpatches is None:
1877 1877 # scan whole input for git metadata
1878 1878 gitpatches = scangitpatch(lr, x)
1879 1879 yield 'git', [g.copy() for g in gitpatches
1880 1880 if g.op in ('COPY', 'RENAME')]
1881 1881 gitpatches.reverse()
1882 1882 afile = 'a/' + m.group(1)
1883 1883 bfile = 'b/' + m.group(2)
1884 1884 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1885 1885 gp = gitpatches.pop()
1886 1886 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1887 1887 if not gitpatches:
1888 1888 raise PatchError(_('failed to synchronize metadata for "%s"')
1889 1889 % afile[2:])
1890 1890 gp = gitpatches[-1]
1891 1891 newfile = True
1892 1892 elif x.startswith('---'):
1893 1893 # check for a unified diff
1894 1894 l2 = lr.readline()
1895 1895 if not l2.startswith('+++'):
1896 1896 lr.push(l2)
1897 1897 continue
1898 1898 newfile = True
1899 1899 context = False
1900 1900 afile = parsefilename(x)
1901 1901 bfile = parsefilename(l2)
1902 1902 elif x.startswith('***'):
1903 1903 # check for a context diff
1904 1904 l2 = lr.readline()
1905 1905 if not l2.startswith('---'):
1906 1906 lr.push(l2)
1907 1907 continue
1908 1908 l3 = lr.readline()
1909 1909 lr.push(l3)
1910 1910 if not l3.startswith("***************"):
1911 1911 lr.push(l2)
1912 1912 continue
1913 1913 newfile = True
1914 1914 context = True
1915 1915 afile = parsefilename(x)
1916 1916 bfile = parsefilename(l2)
1917 1917
1918 1918 if newfile:
1919 1919 newfile = False
1920 1920 emitfile = True
1921 1921 state = BFILE
1922 1922 hunknum = 0
1923 1923
1924 1924 while gitpatches:
1925 1925 gp = gitpatches.pop()
1926 1926 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1927 1927
1928 1928 def applybindelta(binchunk, data):
1929 1929 """Apply a binary delta hunk
1930 1930 The algorithm used is the algorithm from git's patch-delta.c
1931 1931 """
1932 1932 def deltahead(binchunk):
1933 1933 i = 0
1934 1934 for c in binchunk:
1935 1935 i += 1
1936 1936 if not (ord(c) & 0x80):
1937 1937 return i
1938 1938 return i
1939 1939 out = ""
1940 1940 s = deltahead(binchunk)
1941 1941 binchunk = binchunk[s:]
1942 1942 s = deltahead(binchunk)
1943 1943 binchunk = binchunk[s:]
1944 1944 i = 0
1945 1945 while i < len(binchunk):
1946 1946 cmd = ord(binchunk[i])
1947 1947 i += 1
1948 1948 if (cmd & 0x80):
1949 1949 offset = 0
1950 1950 size = 0
1951 1951 if (cmd & 0x01):
1952 1952 offset = ord(binchunk[i])
1953 1953 i += 1
1954 1954 if (cmd & 0x02):
1955 1955 offset |= ord(binchunk[i]) << 8
1956 1956 i += 1
1957 1957 if (cmd & 0x04):
1958 1958 offset |= ord(binchunk[i]) << 16
1959 1959 i += 1
1960 1960 if (cmd & 0x08):
1961 1961 offset |= ord(binchunk[i]) << 24
1962 1962 i += 1
1963 1963 if (cmd & 0x10):
1964 1964 size = ord(binchunk[i])
1965 1965 i += 1
1966 1966 if (cmd & 0x20):
1967 1967 size |= ord(binchunk[i]) << 8
1968 1968 i += 1
1969 1969 if (cmd & 0x40):
1970 1970 size |= ord(binchunk[i]) << 16
1971 1971 i += 1
1972 1972 if size == 0:
1973 1973 size = 0x10000
1974 1974 offset_end = offset + size
1975 1975 out += data[offset:offset_end]
1976 1976 elif cmd != 0:
1977 1977 offset_end = i + cmd
1978 1978 out += binchunk[i:offset_end]
1979 1979 i += cmd
1980 1980 else:
1981 1981 raise PatchError(_('unexpected delta opcode 0'))
1982 1982 return out
1983 1983
1984 1984 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1985 1985 """Reads a patch from fp and tries to apply it.
1986 1986
1987 1987 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1988 1988 there was any fuzz.
1989 1989
1990 1990 If 'eolmode' is 'strict', the patch content and patched file are
1991 1991 read in binary mode. Otherwise, line endings are ignored when
1992 1992 patching then normalized according to 'eolmode'.
1993 1993 """
1994 1994 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1995 1995 prefix=prefix, eolmode=eolmode)
1996 1996
1997 1997 def _canonprefix(repo, prefix):
1998 1998 if prefix:
1999 1999 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2000 2000 if prefix != '':
2001 2001 prefix += '/'
2002 2002 return prefix
2003 2003
2004 2004 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2005 2005 eolmode='strict'):
2006 2006 prefix = _canonprefix(backend.repo, prefix)
2007 2007 def pstrip(p):
2008 2008 return pathtransform(p, strip - 1, prefix)[1]
2009 2009
2010 2010 rejects = 0
2011 2011 err = 0
2012 2012 current_file = None
2013 2013
2014 2014 for state, values in iterhunks(fp):
2015 2015 if state == 'hunk':
2016 2016 if not current_file:
2017 2017 continue
2018 2018 ret = current_file.apply(values)
2019 2019 if ret > 0:
2020 2020 err = 1
2021 2021 elif state == 'file':
2022 2022 if current_file:
2023 2023 rejects += current_file.close()
2024 2024 current_file = None
2025 2025 afile, bfile, first_hunk, gp = values
2026 2026 if gp:
2027 2027 gp.path = pstrip(gp.path)
2028 2028 if gp.oldpath:
2029 2029 gp.oldpath = pstrip(gp.oldpath)
2030 2030 else:
2031 2031 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2032 2032 prefix)
2033 2033 if gp.op == 'RENAME':
2034 2034 backend.unlink(gp.oldpath)
2035 2035 if not first_hunk:
2036 2036 if gp.op == 'DELETE':
2037 2037 backend.unlink(gp.path)
2038 2038 continue
2039 2039 data, mode = None, None
2040 2040 if gp.op in ('RENAME', 'COPY'):
2041 2041 data, mode = store.getfile(gp.oldpath)[:2]
2042 2042 if data is None:
2043 2043 # This means that the old path does not exist
2044 2044 raise PatchError(_("source file '%s' does not exist")
2045 2045 % gp.oldpath)
2046 2046 if gp.mode:
2047 2047 mode = gp.mode
2048 2048 if gp.op == 'ADD':
2049 2049 # Added files without content have no hunk and
2050 2050 # must be created
2051 2051 data = ''
2052 2052 if data or mode:
2053 2053 if (gp.op in ('ADD', 'RENAME', 'COPY')
2054 2054 and backend.exists(gp.path)):
2055 2055 raise PatchError(_("cannot create %s: destination "
2056 2056 "already exists") % gp.path)
2057 2057 backend.setfile(gp.path, data, mode, gp.oldpath)
2058 2058 continue
2059 2059 try:
2060 2060 current_file = patcher(ui, gp, backend, store,
2061 2061 eolmode=eolmode)
2062 2062 except PatchError as inst:
2063 2063 ui.warn(str(inst) + '\n')
2064 2064 current_file = None
2065 2065 rejects += 1
2066 2066 continue
2067 2067 elif state == 'git':
2068 2068 for gp in values:
2069 2069 path = pstrip(gp.oldpath)
2070 2070 data, mode = backend.getfile(path)
2071 2071 if data is None:
2072 2072 # The error ignored here will trigger a getfile()
2073 2073 # error in a place more appropriate for error
2074 2074 # handling, and will not interrupt the patching
2075 2075 # process.
2076 2076 pass
2077 2077 else:
2078 2078 store.setfile(path, data, mode)
2079 2079 else:
2080 2080 raise error.Abort(_('unsupported parser state: %s') % state)
2081 2081
2082 2082 if current_file:
2083 2083 rejects += current_file.close()
2084 2084
2085 2085 if rejects:
2086 2086 return -1
2087 2087 return err
2088 2088
2089 2089 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2090 2090 similarity):
2091 2091 """use <patcher> to apply <patchname> to the working directory.
2092 2092 returns whether patch was applied with fuzz factor."""
2093 2093
2094 2094 fuzz = False
2095 2095 args = []
2096 2096 cwd = repo.root
2097 2097 if cwd:
2098 2098 args.append('-d %s' % util.shellquote(cwd))
2099 2099 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2100 2100 util.shellquote(patchname)))
2101 2101 try:
2102 2102 for line in util.iterfile(fp):
2103 2103 line = line.rstrip()
2104 2104 ui.note(line + '\n')
2105 2105 if line.startswith('patching file '):
2106 2106 pf = util.parsepatchoutput(line)
2107 2107 printed_file = False
2108 2108 files.add(pf)
2109 2109 elif line.find('with fuzz') >= 0:
2110 2110 fuzz = True
2111 2111 if not printed_file:
2112 2112 ui.warn(pf + '\n')
2113 2113 printed_file = True
2114 2114 ui.warn(line + '\n')
2115 2115 elif line.find('saving rejects to file') >= 0:
2116 2116 ui.warn(line + '\n')
2117 2117 elif line.find('FAILED') >= 0:
2118 2118 if not printed_file:
2119 2119 ui.warn(pf + '\n')
2120 2120 printed_file = True
2121 2121 ui.warn(line + '\n')
2122 2122 finally:
2123 2123 if files:
2124 2124 scmutil.marktouched(repo, files, similarity)
2125 2125 code = fp.close()
2126 2126 if code:
2127 2127 raise PatchError(_("patch command failed: %s") %
2128 2128 util.explainexit(code)[0])
2129 2129 return fuzz
2130 2130
2131 2131 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2132 2132 eolmode='strict'):
2133 2133 if files is None:
2134 2134 files = set()
2135 2135 if eolmode is None:
2136 2136 eolmode = ui.config('patch', 'eol')
2137 2137 if eolmode.lower() not in eolmodes:
2138 2138 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2139 2139 eolmode = eolmode.lower()
2140 2140
2141 2141 store = filestore()
2142 2142 try:
2143 2143 fp = open(patchobj, 'rb')
2144 2144 except TypeError:
2145 2145 fp = patchobj
2146 2146 try:
2147 2147 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2148 2148 eolmode=eolmode)
2149 2149 finally:
2150 2150 if fp != patchobj:
2151 2151 fp.close()
2152 2152 files.update(backend.close())
2153 2153 store.close()
2154 2154 if ret < 0:
2155 2155 raise PatchError(_('patch failed to apply'))
2156 2156 return ret > 0
2157 2157
2158 2158 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2159 2159 eolmode='strict', similarity=0):
2160 2160 """use builtin patch to apply <patchobj> to the working directory.
2161 2161 returns whether patch was applied with fuzz factor."""
2162 2162 backend = workingbackend(ui, repo, similarity)
2163 2163 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2164 2164
2165 2165 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2166 2166 eolmode='strict'):
2167 2167 backend = repobackend(ui, repo, ctx, store)
2168 2168 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2169 2169
2170 2170 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2171 2171 similarity=0):
2172 2172 """Apply <patchname> to the working directory.
2173 2173
2174 2174 'eolmode' specifies how end of lines should be handled. It can be:
2175 2175 - 'strict': inputs are read in binary mode, EOLs are preserved
2176 2176 - 'crlf': EOLs are ignored when patching and reset to CRLF
2177 2177 - 'lf': EOLs are ignored when patching and reset to LF
2178 2178 - None: get it from user settings, default to 'strict'
2179 2179 'eolmode' is ignored when using an external patcher program.
2180 2180
2181 2181 Returns whether patch was applied with fuzz factor.
2182 2182 """
2183 2183 patcher = ui.config('ui', 'patch')
2184 2184 if files is None:
2185 2185 files = set()
2186 2186 if patcher:
2187 2187 return _externalpatch(ui, repo, patcher, patchname, strip,
2188 2188 files, similarity)
2189 2189 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2190 2190 similarity)
2191 2191
2192 2192 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2193 2193 backend = fsbackend(ui, repo.root)
2194 2194 prefix = _canonprefix(repo, prefix)
2195 2195 with open(patchpath, 'rb') as fp:
2196 2196 changed = set()
2197 2197 for state, values in iterhunks(fp):
2198 2198 if state == 'file':
2199 2199 afile, bfile, first_hunk, gp = values
2200 2200 if gp:
2201 2201 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2202 2202 if gp.oldpath:
2203 2203 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2204 2204 prefix)[1]
2205 2205 else:
2206 2206 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2207 2207 prefix)
2208 2208 changed.add(gp.path)
2209 2209 if gp.op == 'RENAME':
2210 2210 changed.add(gp.oldpath)
2211 2211 elif state not in ('hunk', 'git'):
2212 2212 raise error.Abort(_('unsupported parser state: %s') % state)
2213 2213 return changed
2214 2214
2215 2215 class GitDiffRequired(Exception):
2216 2216 pass
2217 2217
2218 2218 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2219 2219 '''return diffopts with all features supported and parsed'''
2220 2220 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2221 2221 git=True, whitespace=True, formatchanging=True)
2222 2222
2223 2223 diffopts = diffallopts
2224 2224
2225 2225 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2226 2226 whitespace=False, formatchanging=False):
2227 2227 '''return diffopts with only opted-in features parsed
2228 2228
2229 2229 Features:
2230 2230 - git: git-style diffs
2231 2231 - whitespace: whitespace options like ignoreblanklines and ignorews
2232 2232 - formatchanging: options that will likely break or cause correctness issues
2233 2233 with most diff parsers
2234 2234 '''
2235 2235 def get(key, name=None, getter=ui.configbool, forceplain=None):
2236 2236 if opts:
2237 2237 v = opts.get(key)
2238 2238 # diffopts flags are either None-default (which is passed
2239 2239 # through unchanged, so we can identify unset values), or
2240 2240 # some other falsey default (eg --unified, which defaults
2241 2241 # to an empty string). We only want to override the config
2242 2242 # entries from hgrc with command line values if they
2243 2243 # appear to have been set, which is any truthy value,
2244 2244 # True, or False.
2245 2245 if v or isinstance(v, bool):
2246 2246 return v
2247 2247 if forceplain is not None and ui.plain():
2248 2248 return forceplain
2249 2249 return getter(section, name or key, untrusted=untrusted)
2250 2250
2251 2251 # core options, expected to be understood by every diff parser
2252 2252 buildopts = {
2253 2253 'nodates': get('nodates'),
2254 2254 'showfunc': get('show_function', 'showfunc'),
2255 2255 'context': get('unified', getter=ui.config),
2256 2256 }
2257 2257 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2258 2258
2259 2259 if git:
2260 2260 buildopts['git'] = get('git')
2261 2261
2262 2262 # since this is in the experimental section, we need to call
2263 2263 # ui.configbool directory
2264 2264 buildopts['showsimilarity'] = ui.configbool('experimental',
2265 2265 'extendedheader.similarity')
2266 2266
2267 2267 # need to inspect the ui object instead of using get() since we want to
2268 2268 # test for an int
2269 2269 hconf = ui.config('experimental', 'extendedheader.index')
2270 2270 if hconf is not None:
2271 2271 hlen = None
2272 2272 try:
2273 2273 # the hash config could be an integer (for length of hash) or a
2274 2274 # word (e.g. short, full, none)
2275 2275 hlen = int(hconf)
2276 2276 if hlen < 0 or hlen > 40:
2277 2277 msg = _("invalid length for extendedheader.index: '%d'\n")
2278 2278 ui.warn(msg % hlen)
2279 2279 except ValueError:
2280 2280 # default value
2281 2281 if hconf == 'short' or hconf == '':
2282 2282 hlen = 12
2283 2283 elif hconf == 'full':
2284 2284 hlen = 40
2285 2285 elif hconf != 'none':
2286 2286 msg = _("invalid value for extendedheader.index: '%s'\n")
2287 2287 ui.warn(msg % hconf)
2288 2288 finally:
2289 2289 buildopts['index'] = hlen
2290 2290
2291 2291 if whitespace:
2292 2292 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2293 2293 buildopts['ignorewsamount'] = get('ignore_space_change',
2294 2294 'ignorewsamount')
2295 2295 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2296 2296 'ignoreblanklines')
2297 2297 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2298 2298 if formatchanging:
2299 2299 buildopts['text'] = opts and opts.get('text')
2300 2300 binary = None if opts is None else opts.get('binary')
2301 2301 buildopts['nobinary'] = (not binary if binary is not None
2302 2302 else get('nobinary', forceplain=False))
2303 2303 buildopts['noprefix'] = get('noprefix', forceplain=False)
2304 2304
2305 2305 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2306 2306
2307 2307 def diff(repo, node1=None, node2=None, match=None, changes=None,
2308 2308 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2309 2309 hunksfilterfn=None):
2310 2310 '''yields diff of changes to files between two nodes, or node and
2311 2311 working directory.
2312 2312
2313 2313 if node1 is None, use first dirstate parent instead.
2314 2314 if node2 is None, compare node1 with working directory.
2315 2315
2316 2316 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2317 2317 every time some change cannot be represented with the current
2318 2318 patch format. Return False to upgrade to git patch format, True to
2319 2319 accept the loss or raise an exception to abort the diff. It is
2320 2320 called with the name of current file being diffed as 'fn'. If set
2321 2321 to None, patches will always be upgraded to git format when
2322 2322 necessary.
2323 2323
2324 2324 prefix is a filename prefix that is prepended to all filenames on
2325 2325 display (used for subrepos).
2326 2326
2327 2327 relroot, if not empty, must be normalized with a trailing /. Any match
2328 2328 patterns that fall outside it will be ignored.
2329 2329
2330 2330 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2331 2331 information.
2332 2332
2333 2333 hunksfilterfn, if not None, should be a function taking a filectx and
2334 2334 hunks generator that may yield filtered hunks.
2335 2335 '''
2336 2336 for fctx1, fctx2, hdr, hunks in diffhunks(
2337 2337 repo, node1=node1, node2=node2,
2338 2338 match=match, changes=changes, opts=opts,
2339 2339 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2340 2340 ):
2341 2341 if hunksfilterfn is not None:
2342 2342 # If the file has been removed, fctx2 is None; but this should
2343 2343 # not occur here since we catch removed files early in
2344 2344 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2345 2345 assert fctx2 is not None, \
2346 2346 'fctx2 unexpectly None in diff hunks filtering'
2347 2347 hunks = hunksfilterfn(fctx2, hunks)
2348 2348 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2349 2349 if hdr and (text or len(hdr) > 1):
2350 2350 yield '\n'.join(hdr) + '\n'
2351 2351 if text:
2352 2352 yield text
2353 2353
2354 2354 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2355 2355 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2356 2356 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2357 2357 where `header` is a list of diff headers and `hunks` is an iterable of
2358 2358 (`hunkrange`, `hunklines`) tuples.
2359 2359
2360 2360 See diff() for the meaning of parameters.
2361 2361 """
2362 2362
2363 2363 if opts is None:
2364 2364 opts = mdiff.defaultopts
2365 2365
2366 2366 if not node1 and not node2:
2367 2367 node1 = repo.dirstate.p1()
2368 2368
2369 2369 def lrugetfilectx():
2370 2370 cache = {}
2371 2371 order = collections.deque()
2372 2372 def getfilectx(f, ctx):
2373 2373 fctx = ctx.filectx(f, filelog=cache.get(f))
2374 2374 if f not in cache:
2375 2375 if len(cache) > 20:
2376 2376 del cache[order.popleft()]
2377 2377 cache[f] = fctx.filelog()
2378 2378 else:
2379 2379 order.remove(f)
2380 2380 order.append(f)
2381 2381 return fctx
2382 2382 return getfilectx
2383 2383 getfilectx = lrugetfilectx()
2384 2384
2385 2385 ctx1 = repo[node1]
2386 2386 ctx2 = repo[node2]
2387 2387
2388 2388 relfiltered = False
2389 2389 if relroot != '' and match.always():
2390 2390 # as a special case, create a new matcher with just the relroot
2391 2391 pats = [relroot]
2392 2392 match = scmutil.match(ctx2, pats, default='path')
2393 2393 relfiltered = True
2394 2394
2395 2395 if not changes:
2396 2396 changes = repo.status(ctx1, ctx2, match=match)
2397 2397 modified, added, removed = changes[:3]
2398 2398
2399 2399 if not modified and not added and not removed:
2400 2400 return []
2401 2401
2402 2402 if repo.ui.debugflag:
2403 2403 hexfunc = hex
2404 2404 else:
2405 2405 hexfunc = short
2406 2406 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2407 2407
2408 2408 if copy is None:
2409 2409 copy = {}
2410 2410 if opts.git or opts.upgrade:
2411 2411 copy = copies.pathcopies(ctx1, ctx2, match=match)
2412 2412
2413 2413 if relroot is not None:
2414 2414 if not relfiltered:
2415 2415 # XXX this would ideally be done in the matcher, but that is
2416 2416 # generally meant to 'or' patterns, not 'and' them. In this case we
2417 2417 # need to 'and' all the patterns from the matcher with relroot.
2418 2418 def filterrel(l):
2419 2419 return [f for f in l if f.startswith(relroot)]
2420 2420 modified = filterrel(modified)
2421 2421 added = filterrel(added)
2422 2422 removed = filterrel(removed)
2423 2423 relfiltered = True
2424 2424 # filter out copies where either side isn't inside the relative root
2425 2425 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2426 2426 if dst.startswith(relroot)
2427 2427 and src.startswith(relroot)))
2428 2428
2429 2429 modifiedset = set(modified)
2430 2430 addedset = set(added)
2431 2431 removedset = set(removed)
2432 2432 for f in modified:
2433 2433 if f not in ctx1:
2434 2434 # Fix up added, since merged-in additions appear as
2435 2435 # modifications during merges
2436 2436 modifiedset.remove(f)
2437 2437 addedset.add(f)
2438 2438 for f in removed:
2439 2439 if f not in ctx1:
2440 2440 # Merged-in additions that are then removed are reported as removed.
2441 2441 # They are not in ctx1, so We don't want to show them in the diff.
2442 2442 removedset.remove(f)
2443 2443 modified = sorted(modifiedset)
2444 2444 added = sorted(addedset)
2445 2445 removed = sorted(removedset)
2446 2446 for dst, src in list(copy.items()):
2447 2447 if src not in ctx1:
2448 2448 # Files merged in during a merge and then copied/renamed are
2449 2449 # reported as copies. We want to show them in the diff as additions.
2450 2450 del copy[dst]
2451 2451
2452 2452 def difffn(opts, losedata):
2453 2453 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2454 2454 copy, getfilectx, opts, losedata, prefix, relroot)
2455 2455 if opts.upgrade and not opts.git:
2456 2456 try:
2457 2457 def losedata(fn):
2458 2458 if not losedatafn or not losedatafn(fn=fn):
2459 2459 raise GitDiffRequired
2460 2460 # Buffer the whole output until we are sure it can be generated
2461 2461 return list(difffn(opts.copy(git=False), losedata))
2462 2462 except GitDiffRequired:
2463 2463 return difffn(opts.copy(git=True), None)
2464 2464 else:
2465 2465 return difffn(opts, None)
2466 2466
2467 2467 def difflabel(func, *args, **kw):
2468 2468 '''yields 2-tuples of (output, label) based on the output of func()'''
2469 2469 inlinecolor = False
2470 2470 if kw.get(r'opts'):
2471 2471 inlinecolor = kw[r'opts'].worddiff
2472 2472 headprefixes = [('diff', 'diff.diffline'),
2473 2473 ('copy', 'diff.extended'),
2474 2474 ('rename', 'diff.extended'),
2475 2475 ('old', 'diff.extended'),
2476 2476 ('new', 'diff.extended'),
2477 2477 ('deleted', 'diff.extended'),
2478 2478 ('index', 'diff.extended'),
2479 2479 ('similarity', 'diff.extended'),
2480 2480 ('---', 'diff.file_a'),
2481 2481 ('+++', 'diff.file_b')]
2482 2482 textprefixes = [('@', 'diff.hunk'),
2483 2483 ('-', 'diff.deleted'),
2484 2484 ('+', 'diff.inserted')]
2485 2485 head = False
2486 2486 for chunk in func(*args, **kw):
2487 2487 lines = chunk.split('\n')
2488 2488 matches = {}
2489 2489 if inlinecolor:
2490 2490 matches = _findmatches(lines)
2491 2491 for i, line in enumerate(lines):
2492 2492 if i != 0:
2493 2493 yield ('\n', '')
2494 2494 if head:
2495 2495 if line.startswith('@'):
2496 2496 head = False
2497 2497 else:
2498 2498 if line and line[0] not in ' +-@\\':
2499 2499 head = True
2500 2500 stripline = line
2501 2501 diffline = False
2502 2502 if not head and line and line[0] in '+-':
2503 2503 # highlight tabs and trailing whitespace, but only in
2504 2504 # changed lines
2505 2505 stripline = line.rstrip()
2506 2506 diffline = True
2507 2507
2508 2508 prefixes = textprefixes
2509 2509 if head:
2510 2510 prefixes = headprefixes
2511 2511 for prefix, label in prefixes:
2512 2512 if stripline.startswith(prefix):
2513 2513 if diffline:
2514 2514 if i in matches:
2515 2515 for t, l in _inlinediff(lines[i].rstrip(),
2516 2516 lines[matches[i]].rstrip(),
2517 2517 label):
2518 2518 yield (t, l)
2519 2519 else:
2520 2520 for token in tabsplitter.findall(stripline):
2521 if '\t' == token[0]:
2521 if token.startswith('\t'):
2522 2522 yield (token, 'diff.tab')
2523 2523 else:
2524 2524 yield (token, label)
2525 2525 else:
2526 2526 yield (stripline, label)
2527 2527 break
2528 2528 else:
2529 2529 yield (line, '')
2530 2530 if line != stripline:
2531 2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532 2532
2533 2533 def _findmatches(slist):
2534 2534 '''Look for insertion matches to deletion and returns a dict of
2535 2535 correspondences.
2536 2536 '''
2537 2537 lastmatch = 0
2538 2538 matches = {}
2539 2539 for i, line in enumerate(slist):
2540 2540 if line == '':
2541 2541 continue
2542 2542 if line[0] == '-':
2543 2543 lastmatch = max(lastmatch, i)
2544 2544 newgroup = False
2545 2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 2546 if newline == '':
2547 2547 continue
2548 2548 if newline[0] == '-' and newgroup: # too far, no match
2549 2549 break
2550 2550 if newline[0] == '+': # potential match
2551 2551 newgroup = True
2552 2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 2553 if sim > 0.7:
2554 2554 lastmatch = lastmatch + 1 + j
2555 2555 matches[i] = lastmatch
2556 2556 matches[lastmatch] = i
2557 2557 break
2558 2558 return matches
2559 2559
2560 2560 def _inlinediff(s1, s2, operation):
2561 2561 '''Perform string diff to highlight specific changes.'''
2562 2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 2563 if operation == 'diff.deleted':
2564 2564 s2, s1 = s1, s2
2565 2565
2566 2566 buff = []
2567 2567 # we never want to higlight the leading +-
2568 2568 if operation == 'diff.deleted' and s2.startswith('-'):
2569 2569 label = operation
2570 2570 token = '-'
2571 2571 s2 = s2[1:]
2572 2572 s1 = s1[1:]
2573 2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 2574 label = operation
2575 2575 token = '+'
2576 2576 s2 = s2[1:]
2577 2577 s1 = s1[1:]
2578 2578 else:
2579 2579 raise error.ProgrammingError("Case not expected, operation = %s" %
2580 2580 operation)
2581 2581
2582 2582 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2583 2583 for part in s:
2584 2584 if part[0] in operation_skip or len(part) == 2:
2585 2585 continue
2586 2586 l = operation + '.highlight'
2587 2587 if part[0] in ' ':
2588 2588 l = operation
2589 2589 if part[2:] == '\t':
2590 2590 l = 'diff.tab'
2591 2591 if l == label: # contiguous token with same label
2592 2592 token += part[2:]
2593 2593 continue
2594 2594 else:
2595 2595 buff.append((token, label))
2596 2596 label = l
2597 2597 token = part[2:]
2598 2598 buff.append((token, label))
2599 2599
2600 2600 return buff
2601 2601
2602 2602 def diffui(*args, **kw):
2603 2603 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2604 2604 return difflabel(diff, *args, **kw)
2605 2605
2606 2606 def _filepairs(modified, added, removed, copy, opts):
2607 2607 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2608 2608 before and f2 is the the name after. For added files, f1 will be None,
2609 2609 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2610 2610 or 'rename' (the latter two only if opts.git is set).'''
2611 2611 gone = set()
2612 2612
2613 2613 copyto = dict([(v, k) for k, v in copy.items()])
2614 2614
2615 2615 addedset, removedset = set(added), set(removed)
2616 2616
2617 2617 for f in sorted(modified + added + removed):
2618 2618 copyop = None
2619 2619 f1, f2 = f, f
2620 2620 if f in addedset:
2621 2621 f1 = None
2622 2622 if f in copy:
2623 2623 if opts.git:
2624 2624 f1 = copy[f]
2625 2625 if f1 in removedset and f1 not in gone:
2626 2626 copyop = 'rename'
2627 2627 gone.add(f1)
2628 2628 else:
2629 2629 copyop = 'copy'
2630 2630 elif f in removedset:
2631 2631 f2 = None
2632 2632 if opts.git:
2633 2633 # have we already reported a copy above?
2634 2634 if (f in copyto and copyto[f] in addedset
2635 2635 and copy[copyto[f]] == f):
2636 2636 continue
2637 2637 yield f1, f2, copyop
2638 2638
2639 2639 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2640 2640 copy, getfilectx, opts, losedatafn, prefix, relroot):
2641 2641 '''given input data, generate a diff and yield it in blocks
2642 2642
2643 2643 If generating a diff would lose data like flags or binary data and
2644 2644 losedatafn is not None, it will be called.
2645 2645
2646 2646 relroot is removed and prefix is added to every path in the diff output.
2647 2647
2648 2648 If relroot is not empty, this function expects every path in modified,
2649 2649 added, removed and copy to start with it.'''
2650 2650
2651 2651 def gitindex(text):
2652 2652 if not text:
2653 2653 text = ""
2654 2654 l = len(text)
2655 2655 s = hashlib.sha1('blob %d\0' % l)
2656 2656 s.update(text)
2657 2657 return hex(s.digest())
2658 2658
2659 2659 if opts.noprefix:
2660 2660 aprefix = bprefix = ''
2661 2661 else:
2662 2662 aprefix = 'a/'
2663 2663 bprefix = 'b/'
2664 2664
2665 2665 def diffline(f, revs):
2666 2666 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2667 2667 return 'diff %s %s' % (revinfo, f)
2668 2668
2669 2669 def isempty(fctx):
2670 2670 return fctx is None or fctx.size() == 0
2671 2671
2672 2672 date1 = util.datestr(ctx1.date())
2673 2673 date2 = util.datestr(ctx2.date())
2674 2674
2675 2675 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2676 2676
2677 2677 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2678 2678 or repo.ui.configbool('devel', 'check-relroot')):
2679 2679 for f in modified + added + removed + list(copy) + list(copy.values()):
2680 2680 if f is not None and not f.startswith(relroot):
2681 2681 raise AssertionError(
2682 2682 "file %s doesn't start with relroot %s" % (f, relroot))
2683 2683
2684 2684 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2685 2685 content1 = None
2686 2686 content2 = None
2687 2687 fctx1 = None
2688 2688 fctx2 = None
2689 2689 flag1 = None
2690 2690 flag2 = None
2691 2691 if f1:
2692 2692 fctx1 = getfilectx(f1, ctx1)
2693 2693 if opts.git or losedatafn:
2694 2694 flag1 = ctx1.flags(f1)
2695 2695 if f2:
2696 2696 fctx2 = getfilectx(f2, ctx2)
2697 2697 if opts.git or losedatafn:
2698 2698 flag2 = ctx2.flags(f2)
2699 2699 # if binary is True, output "summary" or "base85", but not "text diff"
2700 2700 if opts.text:
2701 2701 binary = False
2702 2702 else:
2703 2703 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2704 2704
2705 2705 if losedatafn and not opts.git:
2706 2706 if (binary or
2707 2707 # copy/rename
2708 2708 f2 in copy or
2709 2709 # empty file creation
2710 2710 (not f1 and isempty(fctx2)) or
2711 2711 # empty file deletion
2712 2712 (isempty(fctx1) and not f2) or
2713 2713 # create with flags
2714 2714 (not f1 and flag2) or
2715 2715 # change flags
2716 2716 (f1 and f2 and flag1 != flag2)):
2717 2717 losedatafn(f2 or f1)
2718 2718
2719 2719 path1 = f1 or f2
2720 2720 path2 = f2 or f1
2721 2721 path1 = posixpath.join(prefix, path1[len(relroot):])
2722 2722 path2 = posixpath.join(prefix, path2[len(relroot):])
2723 2723 header = []
2724 2724 if opts.git:
2725 2725 header.append('diff --git %s%s %s%s' %
2726 2726 (aprefix, path1, bprefix, path2))
2727 2727 if not f1: # added
2728 2728 header.append('new file mode %s' % gitmode[flag2])
2729 2729 elif not f2: # removed
2730 2730 header.append('deleted file mode %s' % gitmode[flag1])
2731 2731 else: # modified/copied/renamed
2732 2732 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2733 2733 if mode1 != mode2:
2734 2734 header.append('old mode %s' % mode1)
2735 2735 header.append('new mode %s' % mode2)
2736 2736 if copyop is not None:
2737 2737 if opts.showsimilarity:
2738 2738 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2739 2739 header.append('similarity index %d%%' % sim)
2740 2740 header.append('%s from %s' % (copyop, path1))
2741 2741 header.append('%s to %s' % (copyop, path2))
2742 2742 elif revs and not repo.ui.quiet:
2743 2743 header.append(diffline(path1, revs))
2744 2744
2745 2745 # fctx.is | diffopts | what to | is fctx.data()
2746 2746 # binary() | text nobinary git index | output? | outputted?
2747 2747 # ------------------------------------|----------------------------
2748 2748 # yes | no no no * | summary | no
2749 2749 # yes | no no yes * | base85 | yes
2750 2750 # yes | no yes no * | summary | no
2751 2751 # yes | no yes yes 0 | summary | no
2752 2752 # yes | no yes yes >0 | summary | semi [1]
2753 2753 # yes | yes * * * | text diff | yes
2754 2754 # no | * * * * | text diff | yes
2755 2755 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2756 2756 if binary and (not opts.git or (opts.git and opts.nobinary and not
2757 2757 opts.index)):
2758 2758 # fast path: no binary content will be displayed, content1 and
2759 2759 # content2 are only used for equivalent test. cmp() could have a
2760 2760 # fast path.
2761 2761 if fctx1 is not None:
2762 2762 content1 = b'\0'
2763 2763 if fctx2 is not None:
2764 2764 if fctx1 is not None and not fctx1.cmp(fctx2):
2765 2765 content2 = b'\0' # not different
2766 2766 else:
2767 2767 content2 = b'\0\0'
2768 2768 else:
2769 2769 # normal path: load contents
2770 2770 if fctx1 is not None:
2771 2771 content1 = fctx1.data()
2772 2772 if fctx2 is not None:
2773 2773 content2 = fctx2.data()
2774 2774
2775 2775 if binary and opts.git and not opts.nobinary:
2776 2776 text = mdiff.b85diff(content1, content2)
2777 2777 if text:
2778 2778 header.append('index %s..%s' %
2779 2779 (gitindex(content1), gitindex(content2)))
2780 2780 hunks = (None, [text]),
2781 2781 else:
2782 2782 if opts.git and opts.index > 0:
2783 2783 flag = flag1
2784 2784 if flag is None:
2785 2785 flag = flag2
2786 2786 header.append('index %s..%s %s' %
2787 2787 (gitindex(content1)[0:opts.index],
2788 2788 gitindex(content2)[0:opts.index],
2789 2789 gitmode[flag]))
2790 2790
2791 2791 uheaders, hunks = mdiff.unidiff(content1, date1,
2792 2792 content2, date2,
2793 2793 path1, path2,
2794 2794 binary=binary, opts=opts)
2795 2795 header.extend(uheaders)
2796 2796 yield fctx1, fctx2, header, hunks
2797 2797
2798 2798 def diffstatsum(stats):
2799 2799 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2800 2800 for f, a, r, b in stats:
2801 2801 maxfile = max(maxfile, encoding.colwidth(f))
2802 2802 maxtotal = max(maxtotal, a + r)
2803 2803 addtotal += a
2804 2804 removetotal += r
2805 2805 binary = binary or b
2806 2806
2807 2807 return maxfile, maxtotal, addtotal, removetotal, binary
2808 2808
2809 2809 def diffstatdata(lines):
2810 2810 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2811 2811
2812 2812 results = []
2813 2813 filename, adds, removes, isbinary = None, 0, 0, False
2814 2814
2815 2815 def addresult():
2816 2816 if filename:
2817 2817 results.append((filename, adds, removes, isbinary))
2818 2818
2819 2819 # inheader is used to track if a line is in the
2820 2820 # header portion of the diff. This helps properly account
2821 2821 # for lines that start with '--' or '++'
2822 2822 inheader = False
2823 2823
2824 2824 for line in lines:
2825 2825 if line.startswith('diff'):
2826 2826 addresult()
2827 2827 # starting a new file diff
2828 2828 # set numbers to 0 and reset inheader
2829 2829 inheader = True
2830 2830 adds, removes, isbinary = 0, 0, False
2831 2831 if line.startswith('diff --git a/'):
2832 2832 filename = gitre.search(line).group(2)
2833 2833 elif line.startswith('diff -r'):
2834 2834 # format: "diff -r ... -r ... filename"
2835 2835 filename = diffre.search(line).group(1)
2836 2836 elif line.startswith('@@'):
2837 2837 inheader = False
2838 2838 elif line.startswith('+') and not inheader:
2839 2839 adds += 1
2840 2840 elif line.startswith('-') and not inheader:
2841 2841 removes += 1
2842 2842 elif (line.startswith('GIT binary patch') or
2843 2843 line.startswith('Binary file')):
2844 2844 isbinary = True
2845 2845 addresult()
2846 2846 return results
2847 2847
2848 2848 def diffstat(lines, width=80):
2849 2849 output = []
2850 2850 stats = diffstatdata(lines)
2851 2851 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2852 2852
2853 2853 countwidth = len(str(maxtotal))
2854 2854 if hasbinary and countwidth < 3:
2855 2855 countwidth = 3
2856 2856 graphwidth = width - countwidth - maxname - 6
2857 2857 if graphwidth < 10:
2858 2858 graphwidth = 10
2859 2859
2860 2860 def scale(i):
2861 2861 if maxtotal <= graphwidth:
2862 2862 return i
2863 2863 # If diffstat runs out of room it doesn't print anything,
2864 2864 # which isn't very useful, so always print at least one + or -
2865 2865 # if there were at least some changes.
2866 2866 return max(i * graphwidth // maxtotal, int(bool(i)))
2867 2867
2868 2868 for filename, adds, removes, isbinary in stats:
2869 2869 if isbinary:
2870 2870 count = 'Bin'
2871 2871 else:
2872 2872 count = '%d' % (adds + removes)
2873 2873 pluses = '+' * scale(adds)
2874 2874 minuses = '-' * scale(removes)
2875 2875 output.append(' %s%s | %*s %s%s\n' %
2876 2876 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2877 2877 countwidth, count, pluses, minuses))
2878 2878
2879 2879 if stats:
2880 2880 output.append(_(' %d files changed, %d insertions(+), '
2881 2881 '%d deletions(-)\n')
2882 2882 % (len(stats), totaladds, totalremoves))
2883 2883
2884 2884 return ''.join(output)
2885 2885
2886 2886 def diffstatui(*args, **kw):
2887 2887 '''like diffstat(), but yields 2-tuples of (output, label) for
2888 2888 ui.write()
2889 2889 '''
2890 2890
2891 2891 for line in diffstat(*args, **kw).splitlines():
2892 2892 if line and line[-1] in '+-':
2893 2893 name, graph = line.rsplit(' ', 1)
2894 2894 yield (name + ' ', '')
2895 2895 m = re.search(br'\++', graph)
2896 2896 if m:
2897 2897 yield (m.group(0), 'diffstat.inserted')
2898 2898 m = re.search(br'-+', graph)
2899 2899 if m:
2900 2900 yield (m.group(0), 'diffstat.deleted')
2901 2901 else:
2902 2902 yield (line, '')
2903 2903 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now