##// END OF EJS Templates
patch: disable nobinary when HGPLAIN=1...
Mateusz Kwapich -
r27401:186f2afe default
parent child Browse files
Show More
@@ -1,2557 +1,2557 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 ## Some facility for extensible patch parsing:
154 ## Some facility for extensible patch parsing:
155 # list of pairs ("header to match", "data key")
155 # list of pairs ("header to match", "data key")
156 patchheadermap = [('Date', 'date'),
156 patchheadermap = [('Date', 'date'),
157 ('Branch', 'branch'),
157 ('Branch', 'branch'),
158 ('Node ID', 'nodeid'),
158 ('Node ID', 'nodeid'),
159 ]
159 ]
160
160
161 def extract(ui, fileobj):
161 def extract(ui, fileobj):
162 '''extract patch from data read from fileobj.
162 '''extract patch from data read from fileobj.
163
163
164 patch can be a normal patch or contained in an email message.
164 patch can be a normal patch or contained in an email message.
165
165
166 return a dictionary. Standard keys are:
166 return a dictionary. Standard keys are:
167 - filename,
167 - filename,
168 - message,
168 - message,
169 - user,
169 - user,
170 - date,
170 - date,
171 - branch,
171 - branch,
172 - node,
172 - node,
173 - p1,
173 - p1,
174 - p2.
174 - p2.
175 Any item can be missing from the dictionary. If filename is missing,
175 Any item can be missing from the dictionary. If filename is missing,
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177
177
178 # attempt to detect the start of a patch
178 # attempt to detect the start of a patch
179 # (this heuristic is borrowed from quilt)
179 # (this heuristic is borrowed from quilt)
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184
184
185 data = {}
185 data = {}
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
188 try:
188 try:
189 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
190
190
191 subject = msg['Subject']
191 subject = msg['Subject']
192 data['user'] = msg['From']
192 data['user'] = msg['From']
193 if not subject and not data['user']:
193 if not subject and not data['user']:
194 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196
196
197 # should try to parse msg['Date']
197 # should try to parse msg['Date']
198 parents = []
198 parents = []
199
199
200 if subject:
200 if subject:
201 if subject.startswith('[PATCH'):
201 if subject.startswith('[PATCH'):
202 pend = subject.find(']')
202 pend = subject.find(']')
203 if pend >= 0:
203 if pend >= 0:
204 subject = subject[pend + 1:].lstrip()
204 subject = subject[pend + 1:].lstrip()
205 subject = re.sub(r'\n[ \t]+', ' ', subject)
205 subject = re.sub(r'\n[ \t]+', ' ', subject)
206 ui.debug('Subject: %s\n' % subject)
206 ui.debug('Subject: %s\n' % subject)
207 if data['user']:
207 if data['user']:
208 ui.debug('From: %s\n' % data['user'])
208 ui.debug('From: %s\n' % data['user'])
209 diffs_seen = 0
209 diffs_seen = 0
210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
211 message = ''
211 message = ''
212 for part in msg.walk():
212 for part in msg.walk():
213 content_type = part.get_content_type()
213 content_type = part.get_content_type()
214 ui.debug('Content-Type: %s\n' % content_type)
214 ui.debug('Content-Type: %s\n' % content_type)
215 if content_type not in ok_types:
215 if content_type not in ok_types:
216 continue
216 continue
217 payload = part.get_payload(decode=True)
217 payload = part.get_payload(decode=True)
218 m = diffre.search(payload)
218 m = diffre.search(payload)
219 if m:
219 if m:
220 hgpatch = False
220 hgpatch = False
221 hgpatchheader = False
221 hgpatchheader = False
222 ignoretext = False
222 ignoretext = False
223
223
224 ui.debug('found patch at byte %d\n' % m.start(0))
224 ui.debug('found patch at byte %d\n' % m.start(0))
225 diffs_seen += 1
225 diffs_seen += 1
226 cfp = cStringIO.StringIO()
226 cfp = cStringIO.StringIO()
227 for line in payload[:m.start(0)].splitlines():
227 for line in payload[:m.start(0)].splitlines():
228 if line.startswith('# HG changeset patch') and not hgpatch:
228 if line.startswith('# HG changeset patch') and not hgpatch:
229 ui.debug('patch generated by hg export\n')
229 ui.debug('patch generated by hg export\n')
230 hgpatch = True
230 hgpatch = True
231 hgpatchheader = True
231 hgpatchheader = True
232 # drop earlier commit message content
232 # drop earlier commit message content
233 cfp.seek(0)
233 cfp.seek(0)
234 cfp.truncate()
234 cfp.truncate()
235 subject = None
235 subject = None
236 elif hgpatchheader:
236 elif hgpatchheader:
237 if line.startswith('# User '):
237 if line.startswith('# User '):
238 data['user'] = line[7:]
238 data['user'] = line[7:]
239 ui.debug('From: %s\n' % data['user'])
239 ui.debug('From: %s\n' % data['user'])
240 elif line.startswith("# Parent "):
240 elif line.startswith("# Parent "):
241 parents.append(line[9:].lstrip())
241 parents.append(line[9:].lstrip())
242 elif line.startswith("# "):
242 elif line.startswith("# "):
243 for header, key in patchheadermap:
243 for header, key in patchheadermap:
244 prefix = '# %s ' % header
244 prefix = '# %s ' % header
245 if line.startswith(prefix):
245 if line.startswith(prefix):
246 data[key] = line[len(prefix):]
246 data[key] = line[len(prefix):]
247 else:
247 else:
248 hgpatchheader = False
248 hgpatchheader = False
249 elif line == '---':
249 elif line == '---':
250 ignoretext = True
250 ignoretext = True
251 if not hgpatchheader and not ignoretext:
251 if not hgpatchheader and not ignoretext:
252 cfp.write(line)
252 cfp.write(line)
253 cfp.write('\n')
253 cfp.write('\n')
254 message = cfp.getvalue()
254 message = cfp.getvalue()
255 if tmpfp:
255 if tmpfp:
256 tmpfp.write(payload)
256 tmpfp.write(payload)
257 if not payload.endswith('\n'):
257 if not payload.endswith('\n'):
258 tmpfp.write('\n')
258 tmpfp.write('\n')
259 elif not diffs_seen and message and content_type == 'text/plain':
259 elif not diffs_seen and message and content_type == 'text/plain':
260 message += '\n' + payload
260 message += '\n' + payload
261 except: # re-raises
261 except: # re-raises
262 tmpfp.close()
262 tmpfp.close()
263 os.unlink(tmpname)
263 os.unlink(tmpname)
264 raise
264 raise
265
265
266 if subject and not message.startswith(subject):
266 if subject and not message.startswith(subject):
267 message = '%s\n%s' % (subject, message)
267 message = '%s\n%s' % (subject, message)
268 data['message'] = message
268 data['message'] = message
269 tmpfp.close()
269 tmpfp.close()
270 if parents:
270 if parents:
271 data['p1'] = parents.pop(0)
271 data['p1'] = parents.pop(0)
272 if parents:
272 if parents:
273 data['p2'] = parents.pop(0)
273 data['p2'] = parents.pop(0)
274
274
275 if diffs_seen:
275 if diffs_seen:
276 data['filename'] = tmpname
276 data['filename'] = tmpname
277 else:
277 else:
278 os.unlink(tmpname)
278 os.unlink(tmpname)
279 return data
279 return data
280
280
281 class patchmeta(object):
281 class patchmeta(object):
282 """Patched file metadata
282 """Patched file metadata
283
283
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
289 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
290 """
290 """
291 def __init__(self, path):
291 def __init__(self, path):
292 self.path = path
292 self.path = path
293 self.oldpath = None
293 self.oldpath = None
294 self.mode = None
294 self.mode = None
295 self.op = 'MODIFY'
295 self.op = 'MODIFY'
296 self.binary = False
296 self.binary = False
297
297
298 def setmode(self, mode):
298 def setmode(self, mode):
299 islink = mode & 0o20000
299 islink = mode & 0o20000
300 isexec = mode & 0o100
300 isexec = mode & 0o100
301 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
302
302
303 def copy(self):
303 def copy(self):
304 other = patchmeta(self.path)
304 other = patchmeta(self.path)
305 other.oldpath = self.oldpath
305 other.oldpath = self.oldpath
306 other.mode = self.mode
306 other.mode = self.mode
307 other.op = self.op
307 other.op = self.op
308 other.binary = self.binary
308 other.binary = self.binary
309 return other
309 return other
310
310
311 def _ispatchinga(self, afile):
311 def _ispatchinga(self, afile):
312 if afile == '/dev/null':
312 if afile == '/dev/null':
313 return self.op == 'ADD'
313 return self.op == 'ADD'
314 return afile == 'a/' + (self.oldpath or self.path)
314 return afile == 'a/' + (self.oldpath or self.path)
315
315
316 def _ispatchingb(self, bfile):
316 def _ispatchingb(self, bfile):
317 if bfile == '/dev/null':
317 if bfile == '/dev/null':
318 return self.op == 'DELETE'
318 return self.op == 'DELETE'
319 return bfile == 'b/' + self.path
319 return bfile == 'b/' + self.path
320
320
321 def ispatching(self, afile, bfile):
321 def ispatching(self, afile, bfile):
322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
323
323
324 def __repr__(self):
324 def __repr__(self):
325 return "<patchmeta %s %r>" % (self.op, self.path)
325 return "<patchmeta %s %r>" % (self.op, self.path)
326
326
327 def readgitpatch(lr):
327 def readgitpatch(lr):
328 """extract git-style metadata about patches from <patchname>"""
328 """extract git-style metadata about patches from <patchname>"""
329
329
330 # Filter patch for git information
330 # Filter patch for git information
331 gp = None
331 gp = None
332 gitpatches = []
332 gitpatches = []
333 for line in lr:
333 for line in lr:
334 line = line.rstrip(' \r\n')
334 line = line.rstrip(' \r\n')
335 if line.startswith('diff --git a/'):
335 if line.startswith('diff --git a/'):
336 m = gitre.match(line)
336 m = gitre.match(line)
337 if m:
337 if m:
338 if gp:
338 if gp:
339 gitpatches.append(gp)
339 gitpatches.append(gp)
340 dst = m.group(2)
340 dst = m.group(2)
341 gp = patchmeta(dst)
341 gp = patchmeta(dst)
342 elif gp:
342 elif gp:
343 if line.startswith('--- '):
343 if line.startswith('--- '):
344 gitpatches.append(gp)
344 gitpatches.append(gp)
345 gp = None
345 gp = None
346 continue
346 continue
347 if line.startswith('rename from '):
347 if line.startswith('rename from '):
348 gp.op = 'RENAME'
348 gp.op = 'RENAME'
349 gp.oldpath = line[12:]
349 gp.oldpath = line[12:]
350 elif line.startswith('rename to '):
350 elif line.startswith('rename to '):
351 gp.path = line[10:]
351 gp.path = line[10:]
352 elif line.startswith('copy from '):
352 elif line.startswith('copy from '):
353 gp.op = 'COPY'
353 gp.op = 'COPY'
354 gp.oldpath = line[10:]
354 gp.oldpath = line[10:]
355 elif line.startswith('copy to '):
355 elif line.startswith('copy to '):
356 gp.path = line[8:]
356 gp.path = line[8:]
357 elif line.startswith('deleted file'):
357 elif line.startswith('deleted file'):
358 gp.op = 'DELETE'
358 gp.op = 'DELETE'
359 elif line.startswith('new file mode '):
359 elif line.startswith('new file mode '):
360 gp.op = 'ADD'
360 gp.op = 'ADD'
361 gp.setmode(int(line[-6:], 8))
361 gp.setmode(int(line[-6:], 8))
362 elif line.startswith('new mode '):
362 elif line.startswith('new mode '):
363 gp.setmode(int(line[-6:], 8))
363 gp.setmode(int(line[-6:], 8))
364 elif line.startswith('GIT binary patch'):
364 elif line.startswith('GIT binary patch'):
365 gp.binary = True
365 gp.binary = True
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368
368
369 return gitpatches
369 return gitpatches
370
370
371 class linereader(object):
371 class linereader(object):
372 # simple class to allow pushing lines back into the input stream
372 # simple class to allow pushing lines back into the input stream
373 def __init__(self, fp):
373 def __init__(self, fp):
374 self.fp = fp
374 self.fp = fp
375 self.buf = []
375 self.buf = []
376
376
377 def push(self, line):
377 def push(self, line):
378 if line is not None:
378 if line is not None:
379 self.buf.append(line)
379 self.buf.append(line)
380
380
381 def readline(self):
381 def readline(self):
382 if self.buf:
382 if self.buf:
383 l = self.buf[0]
383 l = self.buf[0]
384 del self.buf[0]
384 del self.buf[0]
385 return l
385 return l
386 return self.fp.readline()
386 return self.fp.readline()
387
387
388 def __iter__(self):
388 def __iter__(self):
389 while True:
389 while True:
390 l = self.readline()
390 l = self.readline()
391 if not l:
391 if not l:
392 break
392 break
393 yield l
393 yield l
394
394
395 class abstractbackend(object):
395 class abstractbackend(object):
396 def __init__(self, ui):
396 def __init__(self, ui):
397 self.ui = ui
397 self.ui = ui
398
398
399 def getfile(self, fname):
399 def getfile(self, fname):
400 """Return target file data and flags as a (data, (islink,
400 """Return target file data and flags as a (data, (islink,
401 isexec)) tuple. Data is None if file is missing/deleted.
401 isexec)) tuple. Data is None if file is missing/deleted.
402 """
402 """
403 raise NotImplementedError
403 raise NotImplementedError
404
404
405 def setfile(self, fname, data, mode, copysource):
405 def setfile(self, fname, data, mode, copysource):
406 """Write data to target file fname and set its mode. mode is a
406 """Write data to target file fname and set its mode. mode is a
407 (islink, isexec) tuple. If data is None, the file content should
407 (islink, isexec) tuple. If data is None, the file content should
408 be left unchanged. If the file is modified after being copied,
408 be left unchanged. If the file is modified after being copied,
409 copysource is set to the original file name.
409 copysource is set to the original file name.
410 """
410 """
411 raise NotImplementedError
411 raise NotImplementedError
412
412
413 def unlink(self, fname):
413 def unlink(self, fname):
414 """Unlink target file."""
414 """Unlink target file."""
415 raise NotImplementedError
415 raise NotImplementedError
416
416
417 def writerej(self, fname, failed, total, lines):
417 def writerej(self, fname, failed, total, lines):
418 """Write rejected lines for fname. total is the number of hunks
418 """Write rejected lines for fname. total is the number of hunks
419 which failed to apply and total the total number of hunks for this
419 which failed to apply and total the total number of hunks for this
420 files.
420 files.
421 """
421 """
422 pass
422 pass
423
423
424 def exists(self, fname):
424 def exists(self, fname):
425 raise NotImplementedError
425 raise NotImplementedError
426
426
427 class fsbackend(abstractbackend):
427 class fsbackend(abstractbackend):
428 def __init__(self, ui, basedir):
428 def __init__(self, ui, basedir):
429 super(fsbackend, self).__init__(ui)
429 super(fsbackend, self).__init__(ui)
430 self.opener = scmutil.opener(basedir)
430 self.opener = scmutil.opener(basedir)
431
431
432 def _join(self, f):
432 def _join(self, f):
433 return os.path.join(self.opener.base, f)
433 return os.path.join(self.opener.base, f)
434
434
435 def getfile(self, fname):
435 def getfile(self, fname):
436 if self.opener.islink(fname):
436 if self.opener.islink(fname):
437 return (self.opener.readlink(fname), (True, False))
437 return (self.opener.readlink(fname), (True, False))
438
438
439 isexec = False
439 isexec = False
440 try:
440 try:
441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
442 except OSError as e:
442 except OSError as e:
443 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
444 raise
444 raise
445 try:
445 try:
446 return (self.opener.read(fname), (False, isexec))
446 return (self.opener.read(fname), (False, isexec))
447 except IOError as e:
447 except IOError as e:
448 if e.errno != errno.ENOENT:
448 if e.errno != errno.ENOENT:
449 raise
449 raise
450 return None, None
450 return None, None
451
451
452 def setfile(self, fname, data, mode, copysource):
452 def setfile(self, fname, data, mode, copysource):
453 islink, isexec = mode
453 islink, isexec = mode
454 if data is None:
454 if data is None:
455 self.opener.setflags(fname, islink, isexec)
455 self.opener.setflags(fname, islink, isexec)
456 return
456 return
457 if islink:
457 if islink:
458 self.opener.symlink(data, fname)
458 self.opener.symlink(data, fname)
459 else:
459 else:
460 self.opener.write(fname, data)
460 self.opener.write(fname, data)
461 if isexec:
461 if isexec:
462 self.opener.setflags(fname, False, True)
462 self.opener.setflags(fname, False, True)
463
463
464 def unlink(self, fname):
464 def unlink(self, fname):
465 self.opener.unlinkpath(fname, ignoremissing=True)
465 self.opener.unlinkpath(fname, ignoremissing=True)
466
466
467 def writerej(self, fname, failed, total, lines):
467 def writerej(self, fname, failed, total, lines):
468 fname = fname + ".rej"
468 fname = fname + ".rej"
469 self.ui.warn(
469 self.ui.warn(
470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
471 (failed, total, fname))
471 (failed, total, fname))
472 fp = self.opener(fname, 'w')
472 fp = self.opener(fname, 'w')
473 fp.writelines(lines)
473 fp.writelines(lines)
474 fp.close()
474 fp.close()
475
475
476 def exists(self, fname):
476 def exists(self, fname):
477 return self.opener.lexists(fname)
477 return self.opener.lexists(fname)
478
478
479 class workingbackend(fsbackend):
479 class workingbackend(fsbackend):
480 def __init__(self, ui, repo, similarity):
480 def __init__(self, ui, repo, similarity):
481 super(workingbackend, self).__init__(ui, repo.root)
481 super(workingbackend, self).__init__(ui, repo.root)
482 self.repo = repo
482 self.repo = repo
483 self.similarity = similarity
483 self.similarity = similarity
484 self.removed = set()
484 self.removed = set()
485 self.changed = set()
485 self.changed = set()
486 self.copied = []
486 self.copied = []
487
487
488 def _checkknown(self, fname):
488 def _checkknown(self, fname):
489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
491
491
492 def setfile(self, fname, data, mode, copysource):
492 def setfile(self, fname, data, mode, copysource):
493 self._checkknown(fname)
493 self._checkknown(fname)
494 super(workingbackend, self).setfile(fname, data, mode, copysource)
494 super(workingbackend, self).setfile(fname, data, mode, copysource)
495 if copysource is not None:
495 if copysource is not None:
496 self.copied.append((copysource, fname))
496 self.copied.append((copysource, fname))
497 self.changed.add(fname)
497 self.changed.add(fname)
498
498
499 def unlink(self, fname):
499 def unlink(self, fname):
500 self._checkknown(fname)
500 self._checkknown(fname)
501 super(workingbackend, self).unlink(fname)
501 super(workingbackend, self).unlink(fname)
502 self.removed.add(fname)
502 self.removed.add(fname)
503 self.changed.add(fname)
503 self.changed.add(fname)
504
504
505 def close(self):
505 def close(self):
506 wctx = self.repo[None]
506 wctx = self.repo[None]
507 changed = set(self.changed)
507 changed = set(self.changed)
508 for src, dst in self.copied:
508 for src, dst in self.copied:
509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
510 if self.removed:
510 if self.removed:
511 wctx.forget(sorted(self.removed))
511 wctx.forget(sorted(self.removed))
512 for f in self.removed:
512 for f in self.removed:
513 if f not in self.repo.dirstate:
513 if f not in self.repo.dirstate:
514 # File was deleted and no longer belongs to the
514 # File was deleted and no longer belongs to the
515 # dirstate, it was probably marked added then
515 # dirstate, it was probably marked added then
516 # deleted, and should not be considered by
516 # deleted, and should not be considered by
517 # marktouched().
517 # marktouched().
518 changed.discard(f)
518 changed.discard(f)
519 if changed:
519 if changed:
520 scmutil.marktouched(self.repo, changed, self.similarity)
520 scmutil.marktouched(self.repo, changed, self.similarity)
521 return sorted(self.changed)
521 return sorted(self.changed)
522
522
523 class filestore(object):
523 class filestore(object):
524 def __init__(self, maxsize=None):
524 def __init__(self, maxsize=None):
525 self.opener = None
525 self.opener = None
526 self.files = {}
526 self.files = {}
527 self.created = 0
527 self.created = 0
528 self.maxsize = maxsize
528 self.maxsize = maxsize
529 if self.maxsize is None:
529 if self.maxsize is None:
530 self.maxsize = 4*(2**20)
530 self.maxsize = 4*(2**20)
531 self.size = 0
531 self.size = 0
532 self.data = {}
532 self.data = {}
533
533
534 def setfile(self, fname, data, mode, copied=None):
534 def setfile(self, fname, data, mode, copied=None):
535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
536 self.data[fname] = (data, mode, copied)
536 self.data[fname] = (data, mode, copied)
537 self.size += len(data)
537 self.size += len(data)
538 else:
538 else:
539 if self.opener is None:
539 if self.opener is None:
540 root = tempfile.mkdtemp(prefix='hg-patch-')
540 root = tempfile.mkdtemp(prefix='hg-patch-')
541 self.opener = scmutil.opener(root)
541 self.opener = scmutil.opener(root)
542 # Avoid filename issues with these simple names
542 # Avoid filename issues with these simple names
543 fn = str(self.created)
543 fn = str(self.created)
544 self.opener.write(fn, data)
544 self.opener.write(fn, data)
545 self.created += 1
545 self.created += 1
546 self.files[fname] = (fn, mode, copied)
546 self.files[fname] = (fn, mode, copied)
547
547
548 def getfile(self, fname):
548 def getfile(self, fname):
549 if fname in self.data:
549 if fname in self.data:
550 return self.data[fname]
550 return self.data[fname]
551 if not self.opener or fname not in self.files:
551 if not self.opener or fname not in self.files:
552 return None, None, None
552 return None, None, None
553 fn, mode, copied = self.files[fname]
553 fn, mode, copied = self.files[fname]
554 return self.opener.read(fn), mode, copied
554 return self.opener.read(fn), mode, copied
555
555
556 def close(self):
556 def close(self):
557 if self.opener:
557 if self.opener:
558 shutil.rmtree(self.opener.base)
558 shutil.rmtree(self.opener.base)
559
559
560 class repobackend(abstractbackend):
560 class repobackend(abstractbackend):
561 def __init__(self, ui, repo, ctx, store):
561 def __init__(self, ui, repo, ctx, store):
562 super(repobackend, self).__init__(ui)
562 super(repobackend, self).__init__(ui)
563 self.repo = repo
563 self.repo = repo
564 self.ctx = ctx
564 self.ctx = ctx
565 self.store = store
565 self.store = store
566 self.changed = set()
566 self.changed = set()
567 self.removed = set()
567 self.removed = set()
568 self.copied = {}
568 self.copied = {}
569
569
570 def _checkknown(self, fname):
570 def _checkknown(self, fname):
571 if fname not in self.ctx:
571 if fname not in self.ctx:
572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
573
573
574 def getfile(self, fname):
574 def getfile(self, fname):
575 try:
575 try:
576 fctx = self.ctx[fname]
576 fctx = self.ctx[fname]
577 except error.LookupError:
577 except error.LookupError:
578 return None, None
578 return None, None
579 flags = fctx.flags()
579 flags = fctx.flags()
580 return fctx.data(), ('l' in flags, 'x' in flags)
580 return fctx.data(), ('l' in flags, 'x' in flags)
581
581
582 def setfile(self, fname, data, mode, copysource):
582 def setfile(self, fname, data, mode, copysource):
583 if copysource:
583 if copysource:
584 self._checkknown(copysource)
584 self._checkknown(copysource)
585 if data is None:
585 if data is None:
586 data = self.ctx[fname].data()
586 data = self.ctx[fname].data()
587 self.store.setfile(fname, data, mode, copysource)
587 self.store.setfile(fname, data, mode, copysource)
588 self.changed.add(fname)
588 self.changed.add(fname)
589 if copysource:
589 if copysource:
590 self.copied[fname] = copysource
590 self.copied[fname] = copysource
591
591
592 def unlink(self, fname):
592 def unlink(self, fname):
593 self._checkknown(fname)
593 self._checkknown(fname)
594 self.removed.add(fname)
594 self.removed.add(fname)
595
595
596 def exists(self, fname):
596 def exists(self, fname):
597 return fname in self.ctx
597 return fname in self.ctx
598
598
599 def close(self):
599 def close(self):
600 return self.changed | self.removed
600 return self.changed | self.removed
601
601
602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
606
606
607 class patchfile(object):
607 class patchfile(object):
608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
609 self.fname = gp.path
609 self.fname = gp.path
610 self.eolmode = eolmode
610 self.eolmode = eolmode
611 self.eol = None
611 self.eol = None
612 self.backend = backend
612 self.backend = backend
613 self.ui = ui
613 self.ui = ui
614 self.lines = []
614 self.lines = []
615 self.exists = False
615 self.exists = False
616 self.missing = True
616 self.missing = True
617 self.mode = gp.mode
617 self.mode = gp.mode
618 self.copysource = gp.oldpath
618 self.copysource = gp.oldpath
619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
620 self.remove = gp.op == 'DELETE'
620 self.remove = gp.op == 'DELETE'
621 if self.copysource is None:
621 if self.copysource is None:
622 data, mode = backend.getfile(self.fname)
622 data, mode = backend.getfile(self.fname)
623 else:
623 else:
624 data, mode = store.getfile(self.copysource)[:2]
624 data, mode = store.getfile(self.copysource)[:2]
625 if data is not None:
625 if data is not None:
626 self.exists = self.copysource is None or backend.exists(self.fname)
626 self.exists = self.copysource is None or backend.exists(self.fname)
627 self.missing = False
627 self.missing = False
628 if data:
628 if data:
629 self.lines = mdiff.splitnewlines(data)
629 self.lines = mdiff.splitnewlines(data)
630 if self.mode is None:
630 if self.mode is None:
631 self.mode = mode
631 self.mode = mode
632 if self.lines:
632 if self.lines:
633 # Normalize line endings
633 # Normalize line endings
634 if self.lines[0].endswith('\r\n'):
634 if self.lines[0].endswith('\r\n'):
635 self.eol = '\r\n'
635 self.eol = '\r\n'
636 elif self.lines[0].endswith('\n'):
636 elif self.lines[0].endswith('\n'):
637 self.eol = '\n'
637 self.eol = '\n'
638 if eolmode != 'strict':
638 if eolmode != 'strict':
639 nlines = []
639 nlines = []
640 for l in self.lines:
640 for l in self.lines:
641 if l.endswith('\r\n'):
641 if l.endswith('\r\n'):
642 l = l[:-2] + '\n'
642 l = l[:-2] + '\n'
643 nlines.append(l)
643 nlines.append(l)
644 self.lines = nlines
644 self.lines = nlines
645 else:
645 else:
646 if self.create:
646 if self.create:
647 self.missing = False
647 self.missing = False
648 if self.mode is None:
648 if self.mode is None:
649 self.mode = (False, False)
649 self.mode = (False, False)
650 if self.missing:
650 if self.missing:
651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
652
652
653 self.hash = {}
653 self.hash = {}
654 self.dirty = 0
654 self.dirty = 0
655 self.offset = 0
655 self.offset = 0
656 self.skew = 0
656 self.skew = 0
657 self.rej = []
657 self.rej = []
658 self.fileprinted = False
658 self.fileprinted = False
659 self.printfile(False)
659 self.printfile(False)
660 self.hunks = 0
660 self.hunks = 0
661
661
662 def writelines(self, fname, lines, mode):
662 def writelines(self, fname, lines, mode):
663 if self.eolmode == 'auto':
663 if self.eolmode == 'auto':
664 eol = self.eol
664 eol = self.eol
665 elif self.eolmode == 'crlf':
665 elif self.eolmode == 'crlf':
666 eol = '\r\n'
666 eol = '\r\n'
667 else:
667 else:
668 eol = '\n'
668 eol = '\n'
669
669
670 if self.eolmode != 'strict' and eol and eol != '\n':
670 if self.eolmode != 'strict' and eol and eol != '\n':
671 rawlines = []
671 rawlines = []
672 for l in lines:
672 for l in lines:
673 if l and l[-1] == '\n':
673 if l and l[-1] == '\n':
674 l = l[:-1] + eol
674 l = l[:-1] + eol
675 rawlines.append(l)
675 rawlines.append(l)
676 lines = rawlines
676 lines = rawlines
677
677
678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
679
679
680 def printfile(self, warn):
680 def printfile(self, warn):
681 if self.fileprinted:
681 if self.fileprinted:
682 return
682 return
683 if warn or self.ui.verbose:
683 if warn or self.ui.verbose:
684 self.fileprinted = True
684 self.fileprinted = True
685 s = _("patching file %s\n") % self.fname
685 s = _("patching file %s\n") % self.fname
686 if warn:
686 if warn:
687 self.ui.warn(s)
687 self.ui.warn(s)
688 else:
688 else:
689 self.ui.note(s)
689 self.ui.note(s)
690
690
691
691
692 def findlines(self, l, linenum):
692 def findlines(self, l, linenum):
693 # looks through the hash and finds candidate lines. The
693 # looks through the hash and finds candidate lines. The
694 # result is a list of line numbers sorted based on distance
694 # result is a list of line numbers sorted based on distance
695 # from linenum
695 # from linenum
696
696
697 cand = self.hash.get(l, [])
697 cand = self.hash.get(l, [])
698 if len(cand) > 1:
698 if len(cand) > 1:
699 # resort our list of potentials forward then back.
699 # resort our list of potentials forward then back.
700 cand.sort(key=lambda x: abs(x - linenum))
700 cand.sort(key=lambda x: abs(x - linenum))
701 return cand
701 return cand
702
702
703 def write_rej(self):
703 def write_rej(self):
704 # our rejects are a little different from patch(1). This always
704 # our rejects are a little different from patch(1). This always
705 # creates rejects in the same form as the original patch. A file
705 # creates rejects in the same form as the original patch. A file
706 # header is inserted so that you can run the reject through patch again
706 # header is inserted so that you can run the reject through patch again
707 # without having to type the filename.
707 # without having to type the filename.
708 if not self.rej:
708 if not self.rej:
709 return
709 return
710 base = os.path.basename(self.fname)
710 base = os.path.basename(self.fname)
711 lines = ["--- %s\n+++ %s\n" % (base, base)]
711 lines = ["--- %s\n+++ %s\n" % (base, base)]
712 for x in self.rej:
712 for x in self.rej:
713 for l in x.hunk:
713 for l in x.hunk:
714 lines.append(l)
714 lines.append(l)
715 if l[-1] != '\n':
715 if l[-1] != '\n':
716 lines.append("\n\ No newline at end of file\n")
716 lines.append("\n\ No newline at end of file\n")
717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
718
718
719 def apply(self, h):
719 def apply(self, h):
720 if not h.complete():
720 if not h.complete():
721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
723 h.lenb))
723 h.lenb))
724
724
725 self.hunks += 1
725 self.hunks += 1
726
726
727 if self.missing:
727 if self.missing:
728 self.rej.append(h)
728 self.rej.append(h)
729 return -1
729 return -1
730
730
731 if self.exists and self.create:
731 if self.exists and self.create:
732 if self.copysource:
732 if self.copysource:
733 self.ui.warn(_("cannot create %s: destination already "
733 self.ui.warn(_("cannot create %s: destination already "
734 "exists\n") % self.fname)
734 "exists\n") % self.fname)
735 else:
735 else:
736 self.ui.warn(_("file %s already exists\n") % self.fname)
736 self.ui.warn(_("file %s already exists\n") % self.fname)
737 self.rej.append(h)
737 self.rej.append(h)
738 return -1
738 return -1
739
739
740 if isinstance(h, binhunk):
740 if isinstance(h, binhunk):
741 if self.remove:
741 if self.remove:
742 self.backend.unlink(self.fname)
742 self.backend.unlink(self.fname)
743 else:
743 else:
744 l = h.new(self.lines)
744 l = h.new(self.lines)
745 self.lines[:] = l
745 self.lines[:] = l
746 self.offset += len(l)
746 self.offset += len(l)
747 self.dirty = True
747 self.dirty = True
748 return 0
748 return 0
749
749
750 horig = h
750 horig = h
751 if (self.eolmode in ('crlf', 'lf')
751 if (self.eolmode in ('crlf', 'lf')
752 or self.eolmode == 'auto' and self.eol):
752 or self.eolmode == 'auto' and self.eol):
753 # If new eols are going to be normalized, then normalize
753 # If new eols are going to be normalized, then normalize
754 # hunk data before patching. Otherwise, preserve input
754 # hunk data before patching. Otherwise, preserve input
755 # line-endings.
755 # line-endings.
756 h = h.getnormalized()
756 h = h.getnormalized()
757
757
758 # fast case first, no offsets, no fuzz
758 # fast case first, no offsets, no fuzz
759 old, oldstart, new, newstart = h.fuzzit(0, False)
759 old, oldstart, new, newstart = h.fuzzit(0, False)
760 oldstart += self.offset
760 oldstart += self.offset
761 orig_start = oldstart
761 orig_start = oldstart
762 # if there's skew we want to emit the "(offset %d lines)" even
762 # if there's skew we want to emit the "(offset %d lines)" even
763 # when the hunk cleanly applies at start + skew, so skip the
763 # when the hunk cleanly applies at start + skew, so skip the
764 # fast case code
764 # fast case code
765 if (self.skew == 0 and
765 if (self.skew == 0 and
766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
767 if self.remove:
767 if self.remove:
768 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
769 else:
769 else:
770 self.lines[oldstart:oldstart + len(old)] = new
770 self.lines[oldstart:oldstart + len(old)] = new
771 self.offset += len(new) - len(old)
771 self.offset += len(new) - len(old)
772 self.dirty = True
772 self.dirty = True
773 return 0
773 return 0
774
774
775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
776 self.hash = {}
776 self.hash = {}
777 for x, s in enumerate(self.lines):
777 for x, s in enumerate(self.lines):
778 self.hash.setdefault(s, []).append(x)
778 self.hash.setdefault(s, []).append(x)
779
779
780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
781 for toponly in [True, False]:
781 for toponly in [True, False]:
782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
783 oldstart = oldstart + self.offset + self.skew
783 oldstart = oldstart + self.offset + self.skew
784 oldstart = min(oldstart, len(self.lines))
784 oldstart = min(oldstart, len(self.lines))
785 if old:
785 if old:
786 cand = self.findlines(old[0][1:], oldstart)
786 cand = self.findlines(old[0][1:], oldstart)
787 else:
787 else:
788 # Only adding lines with no or fuzzed context, just
788 # Only adding lines with no or fuzzed context, just
789 # take the skew in account
789 # take the skew in account
790 cand = [oldstart]
790 cand = [oldstart]
791
791
792 for l in cand:
792 for l in cand:
793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
794 self.lines[l : l + len(old)] = new
794 self.lines[l : l + len(old)] = new
795 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
796 self.skew = l - orig_start
796 self.skew = l - orig_start
797 self.dirty = True
797 self.dirty = True
798 offset = l - orig_start - fuzzlen
798 offset = l - orig_start - fuzzlen
799 if fuzzlen:
799 if fuzzlen:
800 msg = _("Hunk #%d succeeded at %d "
800 msg = _("Hunk #%d succeeded at %d "
801 "with fuzz %d "
801 "with fuzz %d "
802 "(offset %d lines).\n")
802 "(offset %d lines).\n")
803 self.printfile(True)
803 self.printfile(True)
804 self.ui.warn(msg %
804 self.ui.warn(msg %
805 (h.number, l + 1, fuzzlen, offset))
805 (h.number, l + 1, fuzzlen, offset))
806 else:
806 else:
807 msg = _("Hunk #%d succeeded at %d "
807 msg = _("Hunk #%d succeeded at %d "
808 "(offset %d lines).\n")
808 "(offset %d lines).\n")
809 self.ui.note(msg % (h.number, l + 1, offset))
809 self.ui.note(msg % (h.number, l + 1, offset))
810 return fuzzlen
810 return fuzzlen
811 self.printfile(True)
811 self.printfile(True)
812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
813 self.rej.append(horig)
813 self.rej.append(horig)
814 return -1
814 return -1
815
815
816 def close(self):
816 def close(self):
817 if self.dirty:
817 if self.dirty:
818 self.writelines(self.fname, self.lines, self.mode)
818 self.writelines(self.fname, self.lines, self.mode)
819 self.write_rej()
819 self.write_rej()
820 return len(self.rej)
820 return len(self.rej)
821
821
822 class header(object):
822 class header(object):
823 """patch header
823 """patch header
824 """
824 """
825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
826 diff_re = re.compile('diff -r .* (.*)$')
826 diff_re = re.compile('diff -r .* (.*)$')
827 allhunks_re = re.compile('(?:index|deleted file) ')
827 allhunks_re = re.compile('(?:index|deleted file) ')
828 pretty_re = re.compile('(?:new file|deleted file) ')
828 pretty_re = re.compile('(?:new file|deleted file) ')
829 special_re = re.compile('(?:index|deleted|copy|rename) ')
829 special_re = re.compile('(?:index|deleted|copy|rename) ')
830 newfile_re = re.compile('(?:new file)')
830 newfile_re = re.compile('(?:new file)')
831
831
832 def __init__(self, header):
832 def __init__(self, header):
833 self.header = header
833 self.header = header
834 self.hunks = []
834 self.hunks = []
835
835
836 def binary(self):
836 def binary(self):
837 return any(h.startswith('index ') for h in self.header)
837 return any(h.startswith('index ') for h in self.header)
838
838
839 def pretty(self, fp):
839 def pretty(self, fp):
840 for h in self.header:
840 for h in self.header:
841 if h.startswith('index '):
841 if h.startswith('index '):
842 fp.write(_('this modifies a binary file (all or nothing)\n'))
842 fp.write(_('this modifies a binary file (all or nothing)\n'))
843 break
843 break
844 if self.pretty_re.match(h):
844 if self.pretty_re.match(h):
845 fp.write(h)
845 fp.write(h)
846 if self.binary():
846 if self.binary():
847 fp.write(_('this is a binary file\n'))
847 fp.write(_('this is a binary file\n'))
848 break
848 break
849 if h.startswith('---'):
849 if h.startswith('---'):
850 fp.write(_('%d hunks, %d lines changed\n') %
850 fp.write(_('%d hunks, %d lines changed\n') %
851 (len(self.hunks),
851 (len(self.hunks),
852 sum([max(h.added, h.removed) for h in self.hunks])))
852 sum([max(h.added, h.removed) for h in self.hunks])))
853 break
853 break
854 fp.write(h)
854 fp.write(h)
855
855
856 def write(self, fp):
856 def write(self, fp):
857 fp.write(''.join(self.header))
857 fp.write(''.join(self.header))
858
858
859 def allhunks(self):
859 def allhunks(self):
860 return any(self.allhunks_re.match(h) for h in self.header)
860 return any(self.allhunks_re.match(h) for h in self.header)
861
861
862 def files(self):
862 def files(self):
863 match = self.diffgit_re.match(self.header[0])
863 match = self.diffgit_re.match(self.header[0])
864 if match:
864 if match:
865 fromfile, tofile = match.groups()
865 fromfile, tofile = match.groups()
866 if fromfile == tofile:
866 if fromfile == tofile:
867 return [fromfile]
867 return [fromfile]
868 return [fromfile, tofile]
868 return [fromfile, tofile]
869 else:
869 else:
870 return self.diff_re.match(self.header[0]).groups()
870 return self.diff_re.match(self.header[0]).groups()
871
871
872 def filename(self):
872 def filename(self):
873 return self.files()[-1]
873 return self.files()[-1]
874
874
875 def __repr__(self):
875 def __repr__(self):
876 return '<header %s>' % (' '.join(map(repr, self.files())))
876 return '<header %s>' % (' '.join(map(repr, self.files())))
877
877
878 def isnewfile(self):
878 def isnewfile(self):
879 return any(self.newfile_re.match(h) for h in self.header)
879 return any(self.newfile_re.match(h) for h in self.header)
880
880
881 def special(self):
881 def special(self):
882 # Special files are shown only at the header level and not at the hunk
882 # Special files are shown only at the header level and not at the hunk
883 # level for example a file that has been deleted is a special file.
883 # level for example a file that has been deleted is a special file.
884 # The user cannot change the content of the operation, in the case of
884 # The user cannot change the content of the operation, in the case of
885 # the deleted file he has to take the deletion or not take it, he
885 # the deleted file he has to take the deletion or not take it, he
886 # cannot take some of it.
886 # cannot take some of it.
887 # Newly added files are special if they are empty, they are not special
887 # Newly added files are special if they are empty, they are not special
888 # if they have some content as we want to be able to change it
888 # if they have some content as we want to be able to change it
889 nocontent = len(self.header) == 2
889 nocontent = len(self.header) == 2
890 emptynewfile = self.isnewfile() and nocontent
890 emptynewfile = self.isnewfile() and nocontent
891 return emptynewfile or \
891 return emptynewfile or \
892 any(self.special_re.match(h) for h in self.header)
892 any(self.special_re.match(h) for h in self.header)
893
893
894 class recordhunk(object):
894 class recordhunk(object):
895 """patch hunk
895 """patch hunk
896
896
897 XXX shouldn't we merge this with the other hunk class?
897 XXX shouldn't we merge this with the other hunk class?
898 """
898 """
899 maxcontext = 3
899 maxcontext = 3
900
900
901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
902 def trimcontext(number, lines):
902 def trimcontext(number, lines):
903 delta = len(lines) - self.maxcontext
903 delta = len(lines) - self.maxcontext
904 if False and delta > 0:
904 if False and delta > 0:
905 return number + delta, lines[:self.maxcontext]
905 return number + delta, lines[:self.maxcontext]
906 return number, lines
906 return number, lines
907
907
908 self.header = header
908 self.header = header
909 self.fromline, self.before = trimcontext(fromline, before)
909 self.fromline, self.before = trimcontext(fromline, before)
910 self.toline, self.after = trimcontext(toline, after)
910 self.toline, self.after = trimcontext(toline, after)
911 self.proc = proc
911 self.proc = proc
912 self.hunk = hunk
912 self.hunk = hunk
913 self.added, self.removed = self.countchanges(self.hunk)
913 self.added, self.removed = self.countchanges(self.hunk)
914
914
915 def __eq__(self, v):
915 def __eq__(self, v):
916 if not isinstance(v, recordhunk):
916 if not isinstance(v, recordhunk):
917 return False
917 return False
918
918
919 return ((v.hunk == self.hunk) and
919 return ((v.hunk == self.hunk) and
920 (v.proc == self.proc) and
920 (v.proc == self.proc) and
921 (self.fromline == v.fromline) and
921 (self.fromline == v.fromline) and
922 (self.header.files() == v.header.files()))
922 (self.header.files() == v.header.files()))
923
923
924 def __hash__(self):
924 def __hash__(self):
925 return hash((tuple(self.hunk),
925 return hash((tuple(self.hunk),
926 tuple(self.header.files()),
926 tuple(self.header.files()),
927 self.fromline,
927 self.fromline,
928 self.proc))
928 self.proc))
929
929
930 def countchanges(self, hunk):
930 def countchanges(self, hunk):
931 """hunk -> (n+,n-)"""
931 """hunk -> (n+,n-)"""
932 add = len([h for h in hunk if h[0] == '+'])
932 add = len([h for h in hunk if h[0] == '+'])
933 rem = len([h for h in hunk if h[0] == '-'])
933 rem = len([h for h in hunk if h[0] == '-'])
934 return add, rem
934 return add, rem
935
935
936 def write(self, fp):
936 def write(self, fp):
937 delta = len(self.before) + len(self.after)
937 delta = len(self.before) + len(self.after)
938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
939 delta -= 1
939 delta -= 1
940 fromlen = delta + self.removed
940 fromlen = delta + self.removed
941 tolen = delta + self.added
941 tolen = delta + self.added
942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
943 (self.fromline, fromlen, self.toline, tolen,
943 (self.fromline, fromlen, self.toline, tolen,
944 self.proc and (' ' + self.proc)))
944 self.proc and (' ' + self.proc)))
945 fp.write(''.join(self.before + self.hunk + self.after))
945 fp.write(''.join(self.before + self.hunk + self.after))
946
946
947 pretty = write
947 pretty = write
948
948
949 def filename(self):
949 def filename(self):
950 return self.header.filename()
950 return self.header.filename()
951
951
952 def __repr__(self):
952 def __repr__(self):
953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
954
954
955 def filterpatch(ui, headers, operation=None):
955 def filterpatch(ui, headers, operation=None):
956 """Interactively filter patch chunks into applied-only chunks"""
956 """Interactively filter patch chunks into applied-only chunks"""
957 if operation is None:
957 if operation is None:
958 operation = _('record')
958 operation = _('record')
959
959
960 def prompt(skipfile, skipall, query, chunk):
960 def prompt(skipfile, skipall, query, chunk):
961 """prompt query, and process base inputs
961 """prompt query, and process base inputs
962
962
963 - y/n for the rest of file
963 - y/n for the rest of file
964 - y/n for the rest
964 - y/n for the rest
965 - ? (help)
965 - ? (help)
966 - q (quit)
966 - q (quit)
967
967
968 Return True/False and possibly updated skipfile and skipall.
968 Return True/False and possibly updated skipfile and skipall.
969 """
969 """
970 newpatches = None
970 newpatches = None
971 if skipall is not None:
971 if skipall is not None:
972 return skipall, skipfile, skipall, newpatches
972 return skipall, skipfile, skipall, newpatches
973 if skipfile is not None:
973 if skipfile is not None:
974 return skipfile, skipfile, skipall, newpatches
974 return skipfile, skipfile, skipall, newpatches
975 while True:
975 while True:
976 resps = _('[Ynesfdaq?]'
976 resps = _('[Ynesfdaq?]'
977 '$$ &Yes, record this change'
977 '$$ &Yes, record this change'
978 '$$ &No, skip this change'
978 '$$ &No, skip this change'
979 '$$ &Edit this change manually'
979 '$$ &Edit this change manually'
980 '$$ &Skip remaining changes to this file'
980 '$$ &Skip remaining changes to this file'
981 '$$ Record remaining changes to this &file'
981 '$$ Record remaining changes to this &file'
982 '$$ &Done, skip remaining changes and files'
982 '$$ &Done, skip remaining changes and files'
983 '$$ Record &all changes to all remaining files'
983 '$$ Record &all changes to all remaining files'
984 '$$ &Quit, recording no changes'
984 '$$ &Quit, recording no changes'
985 '$$ &? (display help)')
985 '$$ &? (display help)')
986 r = ui.promptchoice("%s %s" % (query, resps))
986 r = ui.promptchoice("%s %s" % (query, resps))
987 ui.write("\n")
987 ui.write("\n")
988 if r == 8: # ?
988 if r == 8: # ?
989 for c, t in ui.extractchoices(resps)[1]:
989 for c, t in ui.extractchoices(resps)[1]:
990 ui.write('%s - %s\n' % (c, t.lower()))
990 ui.write('%s - %s\n' % (c, t.lower()))
991 continue
991 continue
992 elif r == 0: # yes
992 elif r == 0: # yes
993 ret = True
993 ret = True
994 elif r == 1: # no
994 elif r == 1: # no
995 ret = False
995 ret = False
996 elif r == 2: # Edit patch
996 elif r == 2: # Edit patch
997 if chunk is None:
997 if chunk is None:
998 ui.write(_('cannot edit patch for whole file'))
998 ui.write(_('cannot edit patch for whole file'))
999 ui.write("\n")
999 ui.write("\n")
1000 continue
1000 continue
1001 if chunk.header.binary():
1001 if chunk.header.binary():
1002 ui.write(_('cannot edit patch for binary file'))
1002 ui.write(_('cannot edit patch for binary file'))
1003 ui.write("\n")
1003 ui.write("\n")
1004 continue
1004 continue
1005 # Patch comment based on the Git one (based on comment at end of
1005 # Patch comment based on the Git one (based on comment at end of
1006 # https://mercurial-scm.org/wiki/RecordExtension)
1006 # https://mercurial-scm.org/wiki/RecordExtension)
1007 phelp = '---' + _("""
1007 phelp = '---' + _("""
1008 To remove '-' lines, make them ' ' lines (context).
1008 To remove '-' lines, make them ' ' lines (context).
1009 To remove '+' lines, delete them.
1009 To remove '+' lines, delete them.
1010 Lines starting with # will be removed from the patch.
1010 Lines starting with # will be removed from the patch.
1011
1011
1012 If the patch applies cleanly, the edited hunk will immediately be
1012 If the patch applies cleanly, the edited hunk will immediately be
1013 added to the record list. If it does not apply cleanly, a rejects
1013 added to the record list. If it does not apply cleanly, a rejects
1014 file will be generated: you can use that when you try again. If
1014 file will be generated: you can use that when you try again. If
1015 all lines of the hunk are removed, then the edit is aborted and
1015 all lines of the hunk are removed, then the edit is aborted and
1016 the hunk is left unchanged.
1016 the hunk is left unchanged.
1017 """)
1017 """)
1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1019 suffix=".diff", text=True)
1019 suffix=".diff", text=True)
1020 ncpatchfp = None
1020 ncpatchfp = None
1021 try:
1021 try:
1022 # Write the initial patch
1022 # Write the initial patch
1023 f = os.fdopen(patchfd, "w")
1023 f = os.fdopen(patchfd, "w")
1024 chunk.header.write(f)
1024 chunk.header.write(f)
1025 chunk.write(f)
1025 chunk.write(f)
1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1027 f.close()
1027 f.close()
1028 # Start the editor and wait for it to complete
1028 # Start the editor and wait for it to complete
1029 editor = ui.geteditor()
1029 editor = ui.geteditor()
1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1031 environ={'HGUSER': ui.username()})
1031 environ={'HGUSER': ui.username()})
1032 if ret != 0:
1032 if ret != 0:
1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1034 continue
1034 continue
1035 # Remove comment lines
1035 # Remove comment lines
1036 patchfp = open(patchfn)
1036 patchfp = open(patchfn)
1037 ncpatchfp = cStringIO.StringIO()
1037 ncpatchfp = cStringIO.StringIO()
1038 for line in patchfp:
1038 for line in patchfp:
1039 if not line.startswith('#'):
1039 if not line.startswith('#'):
1040 ncpatchfp.write(line)
1040 ncpatchfp.write(line)
1041 patchfp.close()
1041 patchfp.close()
1042 ncpatchfp.seek(0)
1042 ncpatchfp.seek(0)
1043 newpatches = parsepatch(ncpatchfp)
1043 newpatches = parsepatch(ncpatchfp)
1044 finally:
1044 finally:
1045 os.unlink(patchfn)
1045 os.unlink(patchfn)
1046 del ncpatchfp
1046 del ncpatchfp
1047 # Signal that the chunk shouldn't be applied as-is, but
1047 # Signal that the chunk shouldn't be applied as-is, but
1048 # provide the new patch to be used instead.
1048 # provide the new patch to be used instead.
1049 ret = False
1049 ret = False
1050 elif r == 3: # Skip
1050 elif r == 3: # Skip
1051 ret = skipfile = False
1051 ret = skipfile = False
1052 elif r == 4: # file (Record remaining)
1052 elif r == 4: # file (Record remaining)
1053 ret = skipfile = True
1053 ret = skipfile = True
1054 elif r == 5: # done, skip remaining
1054 elif r == 5: # done, skip remaining
1055 ret = skipall = False
1055 ret = skipall = False
1056 elif r == 6: # all
1056 elif r == 6: # all
1057 ret = skipall = True
1057 ret = skipall = True
1058 elif r == 7: # quit
1058 elif r == 7: # quit
1059 raise error.Abort(_('user quit'))
1059 raise error.Abort(_('user quit'))
1060 return ret, skipfile, skipall, newpatches
1060 return ret, skipfile, skipall, newpatches
1061
1061
1062 seen = set()
1062 seen = set()
1063 applied = {} # 'filename' -> [] of chunks
1063 applied = {} # 'filename' -> [] of chunks
1064 skipfile, skipall = None, None
1064 skipfile, skipall = None, None
1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1066 for h in headers:
1066 for h in headers:
1067 pos += len(h.hunks)
1067 pos += len(h.hunks)
1068 skipfile = None
1068 skipfile = None
1069 fixoffset = 0
1069 fixoffset = 0
1070 hdr = ''.join(h.header)
1070 hdr = ''.join(h.header)
1071 if hdr in seen:
1071 if hdr in seen:
1072 continue
1072 continue
1073 seen.add(hdr)
1073 seen.add(hdr)
1074 if skipall is None:
1074 if skipall is None:
1075 h.pretty(ui)
1075 h.pretty(ui)
1076 msg = (_('examine changes to %s?') %
1076 msg = (_('examine changes to %s?') %
1077 _(' and ').join("'%s'" % f for f in h.files()))
1077 _(' and ').join("'%s'" % f for f in h.files()))
1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1079 if not r:
1079 if not r:
1080 continue
1080 continue
1081 applied[h.filename()] = [h]
1081 applied[h.filename()] = [h]
1082 if h.allhunks():
1082 if h.allhunks():
1083 applied[h.filename()] += h.hunks
1083 applied[h.filename()] += h.hunks
1084 continue
1084 continue
1085 for i, chunk in enumerate(h.hunks):
1085 for i, chunk in enumerate(h.hunks):
1086 if skipfile is None and skipall is None:
1086 if skipfile is None and skipall is None:
1087 chunk.pretty(ui)
1087 chunk.pretty(ui)
1088 if total == 1:
1088 if total == 1:
1089 msg = _("record this change to '%s'?") % chunk.filename()
1089 msg = _("record this change to '%s'?") % chunk.filename()
1090 else:
1090 else:
1091 idx = pos - len(h.hunks) + i
1091 idx = pos - len(h.hunks) + i
1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1093 chunk.filename())
1093 chunk.filename())
1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1095 skipall, msg, chunk)
1095 skipall, msg, chunk)
1096 if r:
1096 if r:
1097 if fixoffset:
1097 if fixoffset:
1098 chunk = copy.copy(chunk)
1098 chunk = copy.copy(chunk)
1099 chunk.toline += fixoffset
1099 chunk.toline += fixoffset
1100 applied[chunk.filename()].append(chunk)
1100 applied[chunk.filename()].append(chunk)
1101 elif newpatches is not None:
1101 elif newpatches is not None:
1102 for newpatch in newpatches:
1102 for newpatch in newpatches:
1103 for newhunk in newpatch.hunks:
1103 for newhunk in newpatch.hunks:
1104 if fixoffset:
1104 if fixoffset:
1105 newhunk.toline += fixoffset
1105 newhunk.toline += fixoffset
1106 applied[newhunk.filename()].append(newhunk)
1106 applied[newhunk.filename()].append(newhunk)
1107 else:
1107 else:
1108 fixoffset += chunk.removed - chunk.added
1108 fixoffset += chunk.removed - chunk.added
1109 return (sum([h for h in applied.itervalues()
1109 return (sum([h for h in applied.itervalues()
1110 if h[0].special() or len(h) > 1], []), {})
1110 if h[0].special() or len(h) > 1], []), {})
1111 class hunk(object):
1111 class hunk(object):
1112 def __init__(self, desc, num, lr, context):
1112 def __init__(self, desc, num, lr, context):
1113 self.number = num
1113 self.number = num
1114 self.desc = desc
1114 self.desc = desc
1115 self.hunk = [desc]
1115 self.hunk = [desc]
1116 self.a = []
1116 self.a = []
1117 self.b = []
1117 self.b = []
1118 self.starta = self.lena = None
1118 self.starta = self.lena = None
1119 self.startb = self.lenb = None
1119 self.startb = self.lenb = None
1120 if lr is not None:
1120 if lr is not None:
1121 if context:
1121 if context:
1122 self.read_context_hunk(lr)
1122 self.read_context_hunk(lr)
1123 else:
1123 else:
1124 self.read_unified_hunk(lr)
1124 self.read_unified_hunk(lr)
1125
1125
1126 def getnormalized(self):
1126 def getnormalized(self):
1127 """Return a copy with line endings normalized to LF."""
1127 """Return a copy with line endings normalized to LF."""
1128
1128
1129 def normalize(lines):
1129 def normalize(lines):
1130 nlines = []
1130 nlines = []
1131 for line in lines:
1131 for line in lines:
1132 if line.endswith('\r\n'):
1132 if line.endswith('\r\n'):
1133 line = line[:-2] + '\n'
1133 line = line[:-2] + '\n'
1134 nlines.append(line)
1134 nlines.append(line)
1135 return nlines
1135 return nlines
1136
1136
1137 # Dummy object, it is rebuilt manually
1137 # Dummy object, it is rebuilt manually
1138 nh = hunk(self.desc, self.number, None, None)
1138 nh = hunk(self.desc, self.number, None, None)
1139 nh.number = self.number
1139 nh.number = self.number
1140 nh.desc = self.desc
1140 nh.desc = self.desc
1141 nh.hunk = self.hunk
1141 nh.hunk = self.hunk
1142 nh.a = normalize(self.a)
1142 nh.a = normalize(self.a)
1143 nh.b = normalize(self.b)
1143 nh.b = normalize(self.b)
1144 nh.starta = self.starta
1144 nh.starta = self.starta
1145 nh.startb = self.startb
1145 nh.startb = self.startb
1146 nh.lena = self.lena
1146 nh.lena = self.lena
1147 nh.lenb = self.lenb
1147 nh.lenb = self.lenb
1148 return nh
1148 return nh
1149
1149
1150 def read_unified_hunk(self, lr):
1150 def read_unified_hunk(self, lr):
1151 m = unidesc.match(self.desc)
1151 m = unidesc.match(self.desc)
1152 if not m:
1152 if not m:
1153 raise PatchError(_("bad hunk #%d") % self.number)
1153 raise PatchError(_("bad hunk #%d") % self.number)
1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1155 if self.lena is None:
1155 if self.lena is None:
1156 self.lena = 1
1156 self.lena = 1
1157 else:
1157 else:
1158 self.lena = int(self.lena)
1158 self.lena = int(self.lena)
1159 if self.lenb is None:
1159 if self.lenb is None:
1160 self.lenb = 1
1160 self.lenb = 1
1161 else:
1161 else:
1162 self.lenb = int(self.lenb)
1162 self.lenb = int(self.lenb)
1163 self.starta = int(self.starta)
1163 self.starta = int(self.starta)
1164 self.startb = int(self.startb)
1164 self.startb = int(self.startb)
1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1166 self.b)
1166 self.b)
1167 # if we hit eof before finishing out the hunk, the last line will
1167 # if we hit eof before finishing out the hunk, the last line will
1168 # be zero length. Lets try to fix it up.
1168 # be zero length. Lets try to fix it up.
1169 while len(self.hunk[-1]) == 0:
1169 while len(self.hunk[-1]) == 0:
1170 del self.hunk[-1]
1170 del self.hunk[-1]
1171 del self.a[-1]
1171 del self.a[-1]
1172 del self.b[-1]
1172 del self.b[-1]
1173 self.lena -= 1
1173 self.lena -= 1
1174 self.lenb -= 1
1174 self.lenb -= 1
1175 self._fixnewline(lr)
1175 self._fixnewline(lr)
1176
1176
1177 def read_context_hunk(self, lr):
1177 def read_context_hunk(self, lr):
1178 self.desc = lr.readline()
1178 self.desc = lr.readline()
1179 m = contextdesc.match(self.desc)
1179 m = contextdesc.match(self.desc)
1180 if not m:
1180 if not m:
1181 raise PatchError(_("bad hunk #%d") % self.number)
1181 raise PatchError(_("bad hunk #%d") % self.number)
1182 self.starta, aend = m.groups()
1182 self.starta, aend = m.groups()
1183 self.starta = int(self.starta)
1183 self.starta = int(self.starta)
1184 if aend is None:
1184 if aend is None:
1185 aend = self.starta
1185 aend = self.starta
1186 self.lena = int(aend) - self.starta
1186 self.lena = int(aend) - self.starta
1187 if self.starta:
1187 if self.starta:
1188 self.lena += 1
1188 self.lena += 1
1189 for x in xrange(self.lena):
1189 for x in xrange(self.lena):
1190 l = lr.readline()
1190 l = lr.readline()
1191 if l.startswith('---'):
1191 if l.startswith('---'):
1192 # lines addition, old block is empty
1192 # lines addition, old block is empty
1193 lr.push(l)
1193 lr.push(l)
1194 break
1194 break
1195 s = l[2:]
1195 s = l[2:]
1196 if l.startswith('- ') or l.startswith('! '):
1196 if l.startswith('- ') or l.startswith('! '):
1197 u = '-' + s
1197 u = '-' + s
1198 elif l.startswith(' '):
1198 elif l.startswith(' '):
1199 u = ' ' + s
1199 u = ' ' + s
1200 else:
1200 else:
1201 raise PatchError(_("bad hunk #%d old text line %d") %
1201 raise PatchError(_("bad hunk #%d old text line %d") %
1202 (self.number, x))
1202 (self.number, x))
1203 self.a.append(u)
1203 self.a.append(u)
1204 self.hunk.append(u)
1204 self.hunk.append(u)
1205
1205
1206 l = lr.readline()
1206 l = lr.readline()
1207 if l.startswith('\ '):
1207 if l.startswith('\ '):
1208 s = self.a[-1][:-1]
1208 s = self.a[-1][:-1]
1209 self.a[-1] = s
1209 self.a[-1] = s
1210 self.hunk[-1] = s
1210 self.hunk[-1] = s
1211 l = lr.readline()
1211 l = lr.readline()
1212 m = contextdesc.match(l)
1212 m = contextdesc.match(l)
1213 if not m:
1213 if not m:
1214 raise PatchError(_("bad hunk #%d") % self.number)
1214 raise PatchError(_("bad hunk #%d") % self.number)
1215 self.startb, bend = m.groups()
1215 self.startb, bend = m.groups()
1216 self.startb = int(self.startb)
1216 self.startb = int(self.startb)
1217 if bend is None:
1217 if bend is None:
1218 bend = self.startb
1218 bend = self.startb
1219 self.lenb = int(bend) - self.startb
1219 self.lenb = int(bend) - self.startb
1220 if self.startb:
1220 if self.startb:
1221 self.lenb += 1
1221 self.lenb += 1
1222 hunki = 1
1222 hunki = 1
1223 for x in xrange(self.lenb):
1223 for x in xrange(self.lenb):
1224 l = lr.readline()
1224 l = lr.readline()
1225 if l.startswith('\ '):
1225 if l.startswith('\ '):
1226 # XXX: the only way to hit this is with an invalid line range.
1226 # XXX: the only way to hit this is with an invalid line range.
1227 # The no-eol marker is not counted in the line range, but I
1227 # The no-eol marker is not counted in the line range, but I
1228 # guess there are diff(1) out there which behave differently.
1228 # guess there are diff(1) out there which behave differently.
1229 s = self.b[-1][:-1]
1229 s = self.b[-1][:-1]
1230 self.b[-1] = s
1230 self.b[-1] = s
1231 self.hunk[hunki - 1] = s
1231 self.hunk[hunki - 1] = s
1232 continue
1232 continue
1233 if not l:
1233 if not l:
1234 # line deletions, new block is empty and we hit EOF
1234 # line deletions, new block is empty and we hit EOF
1235 lr.push(l)
1235 lr.push(l)
1236 break
1236 break
1237 s = l[2:]
1237 s = l[2:]
1238 if l.startswith('+ ') or l.startswith('! '):
1238 if l.startswith('+ ') or l.startswith('! '):
1239 u = '+' + s
1239 u = '+' + s
1240 elif l.startswith(' '):
1240 elif l.startswith(' '):
1241 u = ' ' + s
1241 u = ' ' + s
1242 elif len(self.b) == 0:
1242 elif len(self.b) == 0:
1243 # line deletions, new block is empty
1243 # line deletions, new block is empty
1244 lr.push(l)
1244 lr.push(l)
1245 break
1245 break
1246 else:
1246 else:
1247 raise PatchError(_("bad hunk #%d old text line %d") %
1247 raise PatchError(_("bad hunk #%d old text line %d") %
1248 (self.number, x))
1248 (self.number, x))
1249 self.b.append(s)
1249 self.b.append(s)
1250 while True:
1250 while True:
1251 if hunki >= len(self.hunk):
1251 if hunki >= len(self.hunk):
1252 h = ""
1252 h = ""
1253 else:
1253 else:
1254 h = self.hunk[hunki]
1254 h = self.hunk[hunki]
1255 hunki += 1
1255 hunki += 1
1256 if h == u:
1256 if h == u:
1257 break
1257 break
1258 elif h.startswith('-'):
1258 elif h.startswith('-'):
1259 continue
1259 continue
1260 else:
1260 else:
1261 self.hunk.insert(hunki - 1, u)
1261 self.hunk.insert(hunki - 1, u)
1262 break
1262 break
1263
1263
1264 if not self.a:
1264 if not self.a:
1265 # this happens when lines were only added to the hunk
1265 # this happens when lines were only added to the hunk
1266 for x in self.hunk:
1266 for x in self.hunk:
1267 if x.startswith('-') or x.startswith(' '):
1267 if x.startswith('-') or x.startswith(' '):
1268 self.a.append(x)
1268 self.a.append(x)
1269 if not self.b:
1269 if not self.b:
1270 # this happens when lines were only deleted from the hunk
1270 # this happens when lines were only deleted from the hunk
1271 for x in self.hunk:
1271 for x in self.hunk:
1272 if x.startswith('+') or x.startswith(' '):
1272 if x.startswith('+') or x.startswith(' '):
1273 self.b.append(x[1:])
1273 self.b.append(x[1:])
1274 # @@ -start,len +start,len @@
1274 # @@ -start,len +start,len @@
1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1276 self.startb, self.lenb)
1276 self.startb, self.lenb)
1277 self.hunk[0] = self.desc
1277 self.hunk[0] = self.desc
1278 self._fixnewline(lr)
1278 self._fixnewline(lr)
1279
1279
1280 def _fixnewline(self, lr):
1280 def _fixnewline(self, lr):
1281 l = lr.readline()
1281 l = lr.readline()
1282 if l.startswith('\ '):
1282 if l.startswith('\ '):
1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1284 else:
1284 else:
1285 lr.push(l)
1285 lr.push(l)
1286
1286
1287 def complete(self):
1287 def complete(self):
1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1289
1289
1290 def _fuzzit(self, old, new, fuzz, toponly):
1290 def _fuzzit(self, old, new, fuzz, toponly):
1291 # this removes context lines from the top and bottom of list 'l'. It
1291 # this removes context lines from the top and bottom of list 'l'. It
1292 # checks the hunk to make sure only context lines are removed, and then
1292 # checks the hunk to make sure only context lines are removed, and then
1293 # returns a new shortened list of lines.
1293 # returns a new shortened list of lines.
1294 fuzz = min(fuzz, len(old))
1294 fuzz = min(fuzz, len(old))
1295 if fuzz:
1295 if fuzz:
1296 top = 0
1296 top = 0
1297 bot = 0
1297 bot = 0
1298 hlen = len(self.hunk)
1298 hlen = len(self.hunk)
1299 for x in xrange(hlen - 1):
1299 for x in xrange(hlen - 1):
1300 # the hunk starts with the @@ line, so use x+1
1300 # the hunk starts with the @@ line, so use x+1
1301 if self.hunk[x + 1][0] == ' ':
1301 if self.hunk[x + 1][0] == ' ':
1302 top += 1
1302 top += 1
1303 else:
1303 else:
1304 break
1304 break
1305 if not toponly:
1305 if not toponly:
1306 for x in xrange(hlen - 1):
1306 for x in xrange(hlen - 1):
1307 if self.hunk[hlen - bot - 1][0] == ' ':
1307 if self.hunk[hlen - bot - 1][0] == ' ':
1308 bot += 1
1308 bot += 1
1309 else:
1309 else:
1310 break
1310 break
1311
1311
1312 bot = min(fuzz, bot)
1312 bot = min(fuzz, bot)
1313 top = min(fuzz, top)
1313 top = min(fuzz, top)
1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1315 return old, new, 0
1315 return old, new, 0
1316
1316
1317 def fuzzit(self, fuzz, toponly):
1317 def fuzzit(self, fuzz, toponly):
1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1319 oldstart = self.starta + top
1319 oldstart = self.starta + top
1320 newstart = self.startb + top
1320 newstart = self.startb + top
1321 # zero length hunk ranges already have their start decremented
1321 # zero length hunk ranges already have their start decremented
1322 if self.lena and oldstart > 0:
1322 if self.lena and oldstart > 0:
1323 oldstart -= 1
1323 oldstart -= 1
1324 if self.lenb and newstart > 0:
1324 if self.lenb and newstart > 0:
1325 newstart -= 1
1325 newstart -= 1
1326 return old, oldstart, new, newstart
1326 return old, oldstart, new, newstart
1327
1327
1328 class binhunk(object):
1328 class binhunk(object):
1329 'A binary patch file.'
1329 'A binary patch file.'
1330 def __init__(self, lr, fname):
1330 def __init__(self, lr, fname):
1331 self.text = None
1331 self.text = None
1332 self.delta = False
1332 self.delta = False
1333 self.hunk = ['GIT binary patch\n']
1333 self.hunk = ['GIT binary patch\n']
1334 self._fname = fname
1334 self._fname = fname
1335 self._read(lr)
1335 self._read(lr)
1336
1336
1337 def complete(self):
1337 def complete(self):
1338 return self.text is not None
1338 return self.text is not None
1339
1339
1340 def new(self, lines):
1340 def new(self, lines):
1341 if self.delta:
1341 if self.delta:
1342 return [applybindelta(self.text, ''.join(lines))]
1342 return [applybindelta(self.text, ''.join(lines))]
1343 return [self.text]
1343 return [self.text]
1344
1344
1345 def _read(self, lr):
1345 def _read(self, lr):
1346 def getline(lr, hunk):
1346 def getline(lr, hunk):
1347 l = lr.readline()
1347 l = lr.readline()
1348 hunk.append(l)
1348 hunk.append(l)
1349 return l.rstrip('\r\n')
1349 return l.rstrip('\r\n')
1350
1350
1351 size = 0
1351 size = 0
1352 while True:
1352 while True:
1353 line = getline(lr, self.hunk)
1353 line = getline(lr, self.hunk)
1354 if not line:
1354 if not line:
1355 raise PatchError(_('could not extract "%s" binary data')
1355 raise PatchError(_('could not extract "%s" binary data')
1356 % self._fname)
1356 % self._fname)
1357 if line.startswith('literal '):
1357 if line.startswith('literal '):
1358 size = int(line[8:].rstrip())
1358 size = int(line[8:].rstrip())
1359 break
1359 break
1360 if line.startswith('delta '):
1360 if line.startswith('delta '):
1361 size = int(line[6:].rstrip())
1361 size = int(line[6:].rstrip())
1362 self.delta = True
1362 self.delta = True
1363 break
1363 break
1364 dec = []
1364 dec = []
1365 line = getline(lr, self.hunk)
1365 line = getline(lr, self.hunk)
1366 while len(line) > 1:
1366 while len(line) > 1:
1367 l = line[0]
1367 l = line[0]
1368 if l <= 'Z' and l >= 'A':
1368 if l <= 'Z' and l >= 'A':
1369 l = ord(l) - ord('A') + 1
1369 l = ord(l) - ord('A') + 1
1370 else:
1370 else:
1371 l = ord(l) - ord('a') + 27
1371 l = ord(l) - ord('a') + 27
1372 try:
1372 try:
1373 dec.append(base85.b85decode(line[1:])[:l])
1373 dec.append(base85.b85decode(line[1:])[:l])
1374 except ValueError as e:
1374 except ValueError as e:
1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1376 % (self._fname, str(e)))
1376 % (self._fname, str(e)))
1377 line = getline(lr, self.hunk)
1377 line = getline(lr, self.hunk)
1378 text = zlib.decompress(''.join(dec))
1378 text = zlib.decompress(''.join(dec))
1379 if len(text) != size:
1379 if len(text) != size:
1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1381 % (self._fname, len(text), size))
1381 % (self._fname, len(text), size))
1382 self.text = text
1382 self.text = text
1383
1383
1384 def parsefilename(str):
1384 def parsefilename(str):
1385 # --- filename \t|space stuff
1385 # --- filename \t|space stuff
1386 s = str[4:].rstrip('\r\n')
1386 s = str[4:].rstrip('\r\n')
1387 i = s.find('\t')
1387 i = s.find('\t')
1388 if i < 0:
1388 if i < 0:
1389 i = s.find(' ')
1389 i = s.find(' ')
1390 if i < 0:
1390 if i < 0:
1391 return s
1391 return s
1392 return s[:i]
1392 return s[:i]
1393
1393
1394 def reversehunks(hunks):
1394 def reversehunks(hunks):
1395 '''reverse the signs in the hunks given as argument
1395 '''reverse the signs in the hunks given as argument
1396
1396
1397 This function operates on hunks coming out of patch.filterpatch, that is
1397 This function operates on hunks coming out of patch.filterpatch, that is
1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1399
1399
1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1401 ... --- a/folder1/g
1401 ... --- a/folder1/g
1402 ... +++ b/folder1/g
1402 ... +++ b/folder1/g
1403 ... @@ -1,7 +1,7 @@
1403 ... @@ -1,7 +1,7 @@
1404 ... +firstline
1404 ... +firstline
1405 ... c
1405 ... c
1406 ... 1
1406 ... 1
1407 ... 2
1407 ... 2
1408 ... + 3
1408 ... + 3
1409 ... -4
1409 ... -4
1410 ... 5
1410 ... 5
1411 ... d
1411 ... d
1412 ... +lastline"""
1412 ... +lastline"""
1413 >>> hunks = parsepatch(rawpatch)
1413 >>> hunks = parsepatch(rawpatch)
1414 >>> hunkscomingfromfilterpatch = []
1414 >>> hunkscomingfromfilterpatch = []
1415 >>> for h in hunks:
1415 >>> for h in hunks:
1416 ... hunkscomingfromfilterpatch.append(h)
1416 ... hunkscomingfromfilterpatch.append(h)
1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1418
1418
1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1420 >>> fp = cStringIO.StringIO()
1420 >>> fp = cStringIO.StringIO()
1421 >>> for c in reversedhunks:
1421 >>> for c in reversedhunks:
1422 ... c.write(fp)
1422 ... c.write(fp)
1423 >>> fp.seek(0)
1423 >>> fp.seek(0)
1424 >>> reversedpatch = fp.read()
1424 >>> reversedpatch = fp.read()
1425 >>> print reversedpatch
1425 >>> print reversedpatch
1426 diff --git a/folder1/g b/folder1/g
1426 diff --git a/folder1/g b/folder1/g
1427 --- a/folder1/g
1427 --- a/folder1/g
1428 +++ b/folder1/g
1428 +++ b/folder1/g
1429 @@ -1,4 +1,3 @@
1429 @@ -1,4 +1,3 @@
1430 -firstline
1430 -firstline
1431 c
1431 c
1432 1
1432 1
1433 2
1433 2
1434 @@ -1,6 +2,6 @@
1434 @@ -1,6 +2,6 @@
1435 c
1435 c
1436 1
1436 1
1437 2
1437 2
1438 - 3
1438 - 3
1439 +4
1439 +4
1440 5
1440 5
1441 d
1441 d
1442 @@ -5,3 +6,2 @@
1442 @@ -5,3 +6,2 @@
1443 5
1443 5
1444 d
1444 d
1445 -lastline
1445 -lastline
1446
1446
1447 '''
1447 '''
1448
1448
1449 import crecord as crecordmod
1449 import crecord as crecordmod
1450 newhunks = []
1450 newhunks = []
1451 for c in hunks:
1451 for c in hunks:
1452 if isinstance(c, crecordmod.uihunk):
1452 if isinstance(c, crecordmod.uihunk):
1453 # curses hunks encapsulate the record hunk in _hunk
1453 # curses hunks encapsulate the record hunk in _hunk
1454 c = c._hunk
1454 c = c._hunk
1455 if isinstance(c, recordhunk):
1455 if isinstance(c, recordhunk):
1456 for j, line in enumerate(c.hunk):
1456 for j, line in enumerate(c.hunk):
1457 if line.startswith("-"):
1457 if line.startswith("-"):
1458 c.hunk[j] = "+" + c.hunk[j][1:]
1458 c.hunk[j] = "+" + c.hunk[j][1:]
1459 elif line.startswith("+"):
1459 elif line.startswith("+"):
1460 c.hunk[j] = "-" + c.hunk[j][1:]
1460 c.hunk[j] = "-" + c.hunk[j][1:]
1461 c.added, c.removed = c.removed, c.added
1461 c.added, c.removed = c.removed, c.added
1462 newhunks.append(c)
1462 newhunks.append(c)
1463 return newhunks
1463 return newhunks
1464
1464
1465 def parsepatch(originalchunks):
1465 def parsepatch(originalchunks):
1466 """patch -> [] of headers -> [] of hunks """
1466 """patch -> [] of headers -> [] of hunks """
1467 class parser(object):
1467 class parser(object):
1468 """patch parsing state machine"""
1468 """patch parsing state machine"""
1469 def __init__(self):
1469 def __init__(self):
1470 self.fromline = 0
1470 self.fromline = 0
1471 self.toline = 0
1471 self.toline = 0
1472 self.proc = ''
1472 self.proc = ''
1473 self.header = None
1473 self.header = None
1474 self.context = []
1474 self.context = []
1475 self.before = []
1475 self.before = []
1476 self.hunk = []
1476 self.hunk = []
1477 self.headers = []
1477 self.headers = []
1478
1478
1479 def addrange(self, limits):
1479 def addrange(self, limits):
1480 fromstart, fromend, tostart, toend, proc = limits
1480 fromstart, fromend, tostart, toend, proc = limits
1481 self.fromline = int(fromstart)
1481 self.fromline = int(fromstart)
1482 self.toline = int(tostart)
1482 self.toline = int(tostart)
1483 self.proc = proc
1483 self.proc = proc
1484
1484
1485 def addcontext(self, context):
1485 def addcontext(self, context):
1486 if self.hunk:
1486 if self.hunk:
1487 h = recordhunk(self.header, self.fromline, self.toline,
1487 h = recordhunk(self.header, self.fromline, self.toline,
1488 self.proc, self.before, self.hunk, context)
1488 self.proc, self.before, self.hunk, context)
1489 self.header.hunks.append(h)
1489 self.header.hunks.append(h)
1490 self.fromline += len(self.before) + h.removed
1490 self.fromline += len(self.before) + h.removed
1491 self.toline += len(self.before) + h.added
1491 self.toline += len(self.before) + h.added
1492 self.before = []
1492 self.before = []
1493 self.hunk = []
1493 self.hunk = []
1494 self.proc = ''
1494 self.proc = ''
1495 self.context = context
1495 self.context = context
1496
1496
1497 def addhunk(self, hunk):
1497 def addhunk(self, hunk):
1498 if self.context:
1498 if self.context:
1499 self.before = self.context
1499 self.before = self.context
1500 self.context = []
1500 self.context = []
1501 self.hunk = hunk
1501 self.hunk = hunk
1502
1502
1503 def newfile(self, hdr):
1503 def newfile(self, hdr):
1504 self.addcontext([])
1504 self.addcontext([])
1505 h = header(hdr)
1505 h = header(hdr)
1506 self.headers.append(h)
1506 self.headers.append(h)
1507 self.header = h
1507 self.header = h
1508
1508
1509 def addother(self, line):
1509 def addother(self, line):
1510 pass # 'other' lines are ignored
1510 pass # 'other' lines are ignored
1511
1511
1512 def finished(self):
1512 def finished(self):
1513 self.addcontext([])
1513 self.addcontext([])
1514 return self.headers
1514 return self.headers
1515
1515
1516 transitions = {
1516 transitions = {
1517 'file': {'context': addcontext,
1517 'file': {'context': addcontext,
1518 'file': newfile,
1518 'file': newfile,
1519 'hunk': addhunk,
1519 'hunk': addhunk,
1520 'range': addrange},
1520 'range': addrange},
1521 'context': {'file': newfile,
1521 'context': {'file': newfile,
1522 'hunk': addhunk,
1522 'hunk': addhunk,
1523 'range': addrange,
1523 'range': addrange,
1524 'other': addother},
1524 'other': addother},
1525 'hunk': {'context': addcontext,
1525 'hunk': {'context': addcontext,
1526 'file': newfile,
1526 'file': newfile,
1527 'range': addrange},
1527 'range': addrange},
1528 'range': {'context': addcontext,
1528 'range': {'context': addcontext,
1529 'hunk': addhunk},
1529 'hunk': addhunk},
1530 'other': {'other': addother},
1530 'other': {'other': addother},
1531 }
1531 }
1532
1532
1533 p = parser()
1533 p = parser()
1534 fp = cStringIO.StringIO()
1534 fp = cStringIO.StringIO()
1535 fp.write(''.join(originalchunks))
1535 fp.write(''.join(originalchunks))
1536 fp.seek(0)
1536 fp.seek(0)
1537
1537
1538 state = 'context'
1538 state = 'context'
1539 for newstate, data in scanpatch(fp):
1539 for newstate, data in scanpatch(fp):
1540 try:
1540 try:
1541 p.transitions[state][newstate](p, data)
1541 p.transitions[state][newstate](p, data)
1542 except KeyError:
1542 except KeyError:
1543 raise PatchError('unhandled transition: %s -> %s' %
1543 raise PatchError('unhandled transition: %s -> %s' %
1544 (state, newstate))
1544 (state, newstate))
1545 state = newstate
1545 state = newstate
1546 del fp
1546 del fp
1547 return p.finished()
1547 return p.finished()
1548
1548
1549 def pathtransform(path, strip, prefix):
1549 def pathtransform(path, strip, prefix):
1550 '''turn a path from a patch into a path suitable for the repository
1550 '''turn a path from a patch into a path suitable for the repository
1551
1551
1552 prefix, if not empty, is expected to be normalized with a / at the end.
1552 prefix, if not empty, is expected to be normalized with a / at the end.
1553
1553
1554 Returns (stripped components, path in repository).
1554 Returns (stripped components, path in repository).
1555
1555
1556 >>> pathtransform('a/b/c', 0, '')
1556 >>> pathtransform('a/b/c', 0, '')
1557 ('', 'a/b/c')
1557 ('', 'a/b/c')
1558 >>> pathtransform(' a/b/c ', 0, '')
1558 >>> pathtransform(' a/b/c ', 0, '')
1559 ('', ' a/b/c')
1559 ('', ' a/b/c')
1560 >>> pathtransform(' a/b/c ', 2, '')
1560 >>> pathtransform(' a/b/c ', 2, '')
1561 ('a/b/', 'c')
1561 ('a/b/', 'c')
1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1563 ('', 'd/e/a/b/c')
1563 ('', 'd/e/a/b/c')
1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1565 ('a//b/', 'd/e/c')
1565 ('a//b/', 'd/e/c')
1566 >>> pathtransform('a/b/c', 3, '')
1566 >>> pathtransform('a/b/c', 3, '')
1567 Traceback (most recent call last):
1567 Traceback (most recent call last):
1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1569 '''
1569 '''
1570 pathlen = len(path)
1570 pathlen = len(path)
1571 i = 0
1571 i = 0
1572 if strip == 0:
1572 if strip == 0:
1573 return '', prefix + path.rstrip()
1573 return '', prefix + path.rstrip()
1574 count = strip
1574 count = strip
1575 while count > 0:
1575 while count > 0:
1576 i = path.find('/', i)
1576 i = path.find('/', i)
1577 if i == -1:
1577 if i == -1:
1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1579 (count, strip, path))
1579 (count, strip, path))
1580 i += 1
1580 i += 1
1581 # consume '//' in the path
1581 # consume '//' in the path
1582 while i < pathlen - 1 and path[i] == '/':
1582 while i < pathlen - 1 and path[i] == '/':
1583 i += 1
1583 i += 1
1584 count -= 1
1584 count -= 1
1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1586
1586
1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1588 nulla = afile_orig == "/dev/null"
1588 nulla = afile_orig == "/dev/null"
1589 nullb = bfile_orig == "/dev/null"
1589 nullb = bfile_orig == "/dev/null"
1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1593 gooda = not nulla and backend.exists(afile)
1593 gooda = not nulla and backend.exists(afile)
1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1595 if afile == bfile:
1595 if afile == bfile:
1596 goodb = gooda
1596 goodb = gooda
1597 else:
1597 else:
1598 goodb = not nullb and backend.exists(bfile)
1598 goodb = not nullb and backend.exists(bfile)
1599 missing = not goodb and not gooda and not create
1599 missing = not goodb and not gooda and not create
1600
1600
1601 # some diff programs apparently produce patches where the afile is
1601 # some diff programs apparently produce patches where the afile is
1602 # not /dev/null, but afile starts with bfile
1602 # not /dev/null, but afile starts with bfile
1603 abasedir = afile[:afile.rfind('/') + 1]
1603 abasedir = afile[:afile.rfind('/') + 1]
1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1606 and hunk.starta == 0 and hunk.lena == 0):
1606 and hunk.starta == 0 and hunk.lena == 0):
1607 create = True
1607 create = True
1608 missing = False
1608 missing = False
1609
1609
1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1611 # diff is between a file and its backup. In this case, the original
1611 # diff is between a file and its backup. In this case, the original
1612 # file should be patched (see original mpatch code).
1612 # file should be patched (see original mpatch code).
1613 isbackup = (abase == bbase and bfile.startswith(afile))
1613 isbackup = (abase == bbase and bfile.startswith(afile))
1614 fname = None
1614 fname = None
1615 if not missing:
1615 if not missing:
1616 if gooda and goodb:
1616 if gooda and goodb:
1617 if isbackup:
1617 if isbackup:
1618 fname = afile
1618 fname = afile
1619 else:
1619 else:
1620 fname = bfile
1620 fname = bfile
1621 elif gooda:
1621 elif gooda:
1622 fname = afile
1622 fname = afile
1623
1623
1624 if not fname:
1624 if not fname:
1625 if not nullb:
1625 if not nullb:
1626 if isbackup:
1626 if isbackup:
1627 fname = afile
1627 fname = afile
1628 else:
1628 else:
1629 fname = bfile
1629 fname = bfile
1630 elif not nulla:
1630 elif not nulla:
1631 fname = afile
1631 fname = afile
1632 else:
1632 else:
1633 raise PatchError(_("undefined source and destination files"))
1633 raise PatchError(_("undefined source and destination files"))
1634
1634
1635 gp = patchmeta(fname)
1635 gp = patchmeta(fname)
1636 if create:
1636 if create:
1637 gp.op = 'ADD'
1637 gp.op = 'ADD'
1638 elif remove:
1638 elif remove:
1639 gp.op = 'DELETE'
1639 gp.op = 'DELETE'
1640 return gp
1640 return gp
1641
1641
1642 def scanpatch(fp):
1642 def scanpatch(fp):
1643 """like patch.iterhunks, but yield different events
1643 """like patch.iterhunks, but yield different events
1644
1644
1645 - ('file', [header_lines + fromfile + tofile])
1645 - ('file', [header_lines + fromfile + tofile])
1646 - ('context', [context_lines])
1646 - ('context', [context_lines])
1647 - ('hunk', [hunk_lines])
1647 - ('hunk', [hunk_lines])
1648 - ('range', (-start,len, +start,len, proc))
1648 - ('range', (-start,len, +start,len, proc))
1649 """
1649 """
1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1651 lr = linereader(fp)
1651 lr = linereader(fp)
1652
1652
1653 def scanwhile(first, p):
1653 def scanwhile(first, p):
1654 """scan lr while predicate holds"""
1654 """scan lr while predicate holds"""
1655 lines = [first]
1655 lines = [first]
1656 while True:
1656 while True:
1657 line = lr.readline()
1657 line = lr.readline()
1658 if not line:
1658 if not line:
1659 break
1659 break
1660 if p(line):
1660 if p(line):
1661 lines.append(line)
1661 lines.append(line)
1662 else:
1662 else:
1663 lr.push(line)
1663 lr.push(line)
1664 break
1664 break
1665 return lines
1665 return lines
1666
1666
1667 while True:
1667 while True:
1668 line = lr.readline()
1668 line = lr.readline()
1669 if not line:
1669 if not line:
1670 break
1670 break
1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1672 def notheader(line):
1672 def notheader(line):
1673 s = line.split(None, 1)
1673 s = line.split(None, 1)
1674 return not s or s[0] not in ('---', 'diff')
1674 return not s or s[0] not in ('---', 'diff')
1675 header = scanwhile(line, notheader)
1675 header = scanwhile(line, notheader)
1676 fromfile = lr.readline()
1676 fromfile = lr.readline()
1677 if fromfile.startswith('---'):
1677 if fromfile.startswith('---'):
1678 tofile = lr.readline()
1678 tofile = lr.readline()
1679 header += [fromfile, tofile]
1679 header += [fromfile, tofile]
1680 else:
1680 else:
1681 lr.push(fromfile)
1681 lr.push(fromfile)
1682 yield 'file', header
1682 yield 'file', header
1683 elif line[0] == ' ':
1683 elif line[0] == ' ':
1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1685 elif line[0] in '-+':
1685 elif line[0] in '-+':
1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1687 else:
1687 else:
1688 m = lines_re.match(line)
1688 m = lines_re.match(line)
1689 if m:
1689 if m:
1690 yield 'range', m.groups()
1690 yield 'range', m.groups()
1691 else:
1691 else:
1692 yield 'other', line
1692 yield 'other', line
1693
1693
1694 def scangitpatch(lr, firstline):
1694 def scangitpatch(lr, firstline):
1695 """
1695 """
1696 Git patches can emit:
1696 Git patches can emit:
1697 - rename a to b
1697 - rename a to b
1698 - change b
1698 - change b
1699 - copy a to c
1699 - copy a to c
1700 - change c
1700 - change c
1701
1701
1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1703 found for it would have been renamed already. And we cannot copy
1703 found for it would have been renamed already. And we cannot copy
1704 from 'b' instead because 'b' would have been changed already. So
1704 from 'b' instead because 'b' would have been changed already. So
1705 we scan the git patch for copy and rename commands so we can
1705 we scan the git patch for copy and rename commands so we can
1706 perform the copies ahead of time.
1706 perform the copies ahead of time.
1707 """
1707 """
1708 pos = 0
1708 pos = 0
1709 try:
1709 try:
1710 pos = lr.fp.tell()
1710 pos = lr.fp.tell()
1711 fp = lr.fp
1711 fp = lr.fp
1712 except IOError:
1712 except IOError:
1713 fp = cStringIO.StringIO(lr.fp.read())
1713 fp = cStringIO.StringIO(lr.fp.read())
1714 gitlr = linereader(fp)
1714 gitlr = linereader(fp)
1715 gitlr.push(firstline)
1715 gitlr.push(firstline)
1716 gitpatches = readgitpatch(gitlr)
1716 gitpatches = readgitpatch(gitlr)
1717 fp.seek(pos)
1717 fp.seek(pos)
1718 return gitpatches
1718 return gitpatches
1719
1719
1720 def iterhunks(fp):
1720 def iterhunks(fp):
1721 """Read a patch and yield the following events:
1721 """Read a patch and yield the following events:
1722 - ("file", afile, bfile, firsthunk): select a new target file.
1722 - ("file", afile, bfile, firsthunk): select a new target file.
1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1724 "file" event.
1724 "file" event.
1725 - ("git", gitchanges): current diff is in git format, gitchanges
1725 - ("git", gitchanges): current diff is in git format, gitchanges
1726 maps filenames to gitpatch records. Unique event.
1726 maps filenames to gitpatch records. Unique event.
1727 """
1727 """
1728 afile = ""
1728 afile = ""
1729 bfile = ""
1729 bfile = ""
1730 state = None
1730 state = None
1731 hunknum = 0
1731 hunknum = 0
1732 emitfile = newfile = False
1732 emitfile = newfile = False
1733 gitpatches = None
1733 gitpatches = None
1734
1734
1735 # our states
1735 # our states
1736 BFILE = 1
1736 BFILE = 1
1737 context = None
1737 context = None
1738 lr = linereader(fp)
1738 lr = linereader(fp)
1739
1739
1740 while True:
1740 while True:
1741 x = lr.readline()
1741 x = lr.readline()
1742 if not x:
1742 if not x:
1743 break
1743 break
1744 if state == BFILE and (
1744 if state == BFILE and (
1745 (not context and x[0] == '@')
1745 (not context and x[0] == '@')
1746 or (context is not False and x.startswith('***************'))
1746 or (context is not False and x.startswith('***************'))
1747 or x.startswith('GIT binary patch')):
1747 or x.startswith('GIT binary patch')):
1748 gp = None
1748 gp = None
1749 if (gitpatches and
1749 if (gitpatches and
1750 gitpatches[-1].ispatching(afile, bfile)):
1750 gitpatches[-1].ispatching(afile, bfile)):
1751 gp = gitpatches.pop()
1751 gp = gitpatches.pop()
1752 if x.startswith('GIT binary patch'):
1752 if x.startswith('GIT binary patch'):
1753 h = binhunk(lr, gp.path)
1753 h = binhunk(lr, gp.path)
1754 else:
1754 else:
1755 if context is None and x.startswith('***************'):
1755 if context is None and x.startswith('***************'):
1756 context = True
1756 context = True
1757 h = hunk(x, hunknum + 1, lr, context)
1757 h = hunk(x, hunknum + 1, lr, context)
1758 hunknum += 1
1758 hunknum += 1
1759 if emitfile:
1759 if emitfile:
1760 emitfile = False
1760 emitfile = False
1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1762 yield 'hunk', h
1762 yield 'hunk', h
1763 elif x.startswith('diff --git a/'):
1763 elif x.startswith('diff --git a/'):
1764 m = gitre.match(x.rstrip(' \r\n'))
1764 m = gitre.match(x.rstrip(' \r\n'))
1765 if not m:
1765 if not m:
1766 continue
1766 continue
1767 if gitpatches is None:
1767 if gitpatches is None:
1768 # scan whole input for git metadata
1768 # scan whole input for git metadata
1769 gitpatches = scangitpatch(lr, x)
1769 gitpatches = scangitpatch(lr, x)
1770 yield 'git', [g.copy() for g in gitpatches
1770 yield 'git', [g.copy() for g in gitpatches
1771 if g.op in ('COPY', 'RENAME')]
1771 if g.op in ('COPY', 'RENAME')]
1772 gitpatches.reverse()
1772 gitpatches.reverse()
1773 afile = 'a/' + m.group(1)
1773 afile = 'a/' + m.group(1)
1774 bfile = 'b/' + m.group(2)
1774 bfile = 'b/' + m.group(2)
1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1776 gp = gitpatches.pop()
1776 gp = gitpatches.pop()
1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1778 if not gitpatches:
1778 if not gitpatches:
1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1780 % afile[2:])
1780 % afile[2:])
1781 gp = gitpatches[-1]
1781 gp = gitpatches[-1]
1782 newfile = True
1782 newfile = True
1783 elif x.startswith('---'):
1783 elif x.startswith('---'):
1784 # check for a unified diff
1784 # check for a unified diff
1785 l2 = lr.readline()
1785 l2 = lr.readline()
1786 if not l2.startswith('+++'):
1786 if not l2.startswith('+++'):
1787 lr.push(l2)
1787 lr.push(l2)
1788 continue
1788 continue
1789 newfile = True
1789 newfile = True
1790 context = False
1790 context = False
1791 afile = parsefilename(x)
1791 afile = parsefilename(x)
1792 bfile = parsefilename(l2)
1792 bfile = parsefilename(l2)
1793 elif x.startswith('***'):
1793 elif x.startswith('***'):
1794 # check for a context diff
1794 # check for a context diff
1795 l2 = lr.readline()
1795 l2 = lr.readline()
1796 if not l2.startswith('---'):
1796 if not l2.startswith('---'):
1797 lr.push(l2)
1797 lr.push(l2)
1798 continue
1798 continue
1799 l3 = lr.readline()
1799 l3 = lr.readline()
1800 lr.push(l3)
1800 lr.push(l3)
1801 if not l3.startswith("***************"):
1801 if not l3.startswith("***************"):
1802 lr.push(l2)
1802 lr.push(l2)
1803 continue
1803 continue
1804 newfile = True
1804 newfile = True
1805 context = True
1805 context = True
1806 afile = parsefilename(x)
1806 afile = parsefilename(x)
1807 bfile = parsefilename(l2)
1807 bfile = parsefilename(l2)
1808
1808
1809 if newfile:
1809 if newfile:
1810 newfile = False
1810 newfile = False
1811 emitfile = True
1811 emitfile = True
1812 state = BFILE
1812 state = BFILE
1813 hunknum = 0
1813 hunknum = 0
1814
1814
1815 while gitpatches:
1815 while gitpatches:
1816 gp = gitpatches.pop()
1816 gp = gitpatches.pop()
1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1818
1818
1819 def applybindelta(binchunk, data):
1819 def applybindelta(binchunk, data):
1820 """Apply a binary delta hunk
1820 """Apply a binary delta hunk
1821 The algorithm used is the algorithm from git's patch-delta.c
1821 The algorithm used is the algorithm from git's patch-delta.c
1822 """
1822 """
1823 def deltahead(binchunk):
1823 def deltahead(binchunk):
1824 i = 0
1824 i = 0
1825 for c in binchunk:
1825 for c in binchunk:
1826 i += 1
1826 i += 1
1827 if not (ord(c) & 0x80):
1827 if not (ord(c) & 0x80):
1828 return i
1828 return i
1829 return i
1829 return i
1830 out = ""
1830 out = ""
1831 s = deltahead(binchunk)
1831 s = deltahead(binchunk)
1832 binchunk = binchunk[s:]
1832 binchunk = binchunk[s:]
1833 s = deltahead(binchunk)
1833 s = deltahead(binchunk)
1834 binchunk = binchunk[s:]
1834 binchunk = binchunk[s:]
1835 i = 0
1835 i = 0
1836 while i < len(binchunk):
1836 while i < len(binchunk):
1837 cmd = ord(binchunk[i])
1837 cmd = ord(binchunk[i])
1838 i += 1
1838 i += 1
1839 if (cmd & 0x80):
1839 if (cmd & 0x80):
1840 offset = 0
1840 offset = 0
1841 size = 0
1841 size = 0
1842 if (cmd & 0x01):
1842 if (cmd & 0x01):
1843 offset = ord(binchunk[i])
1843 offset = ord(binchunk[i])
1844 i += 1
1844 i += 1
1845 if (cmd & 0x02):
1845 if (cmd & 0x02):
1846 offset |= ord(binchunk[i]) << 8
1846 offset |= ord(binchunk[i]) << 8
1847 i += 1
1847 i += 1
1848 if (cmd & 0x04):
1848 if (cmd & 0x04):
1849 offset |= ord(binchunk[i]) << 16
1849 offset |= ord(binchunk[i]) << 16
1850 i += 1
1850 i += 1
1851 if (cmd & 0x08):
1851 if (cmd & 0x08):
1852 offset |= ord(binchunk[i]) << 24
1852 offset |= ord(binchunk[i]) << 24
1853 i += 1
1853 i += 1
1854 if (cmd & 0x10):
1854 if (cmd & 0x10):
1855 size = ord(binchunk[i])
1855 size = ord(binchunk[i])
1856 i += 1
1856 i += 1
1857 if (cmd & 0x20):
1857 if (cmd & 0x20):
1858 size |= ord(binchunk[i]) << 8
1858 size |= ord(binchunk[i]) << 8
1859 i += 1
1859 i += 1
1860 if (cmd & 0x40):
1860 if (cmd & 0x40):
1861 size |= ord(binchunk[i]) << 16
1861 size |= ord(binchunk[i]) << 16
1862 i += 1
1862 i += 1
1863 if size == 0:
1863 if size == 0:
1864 size = 0x10000
1864 size = 0x10000
1865 offset_end = offset + size
1865 offset_end = offset + size
1866 out += data[offset:offset_end]
1866 out += data[offset:offset_end]
1867 elif cmd != 0:
1867 elif cmd != 0:
1868 offset_end = i + cmd
1868 offset_end = i + cmd
1869 out += binchunk[i:offset_end]
1869 out += binchunk[i:offset_end]
1870 i += cmd
1870 i += cmd
1871 else:
1871 else:
1872 raise PatchError(_('unexpected delta opcode 0'))
1872 raise PatchError(_('unexpected delta opcode 0'))
1873 return out
1873 return out
1874
1874
1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1876 """Reads a patch from fp and tries to apply it.
1876 """Reads a patch from fp and tries to apply it.
1877
1877
1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1879 there was any fuzz.
1879 there was any fuzz.
1880
1880
1881 If 'eolmode' is 'strict', the patch content and patched file are
1881 If 'eolmode' is 'strict', the patch content and patched file are
1882 read in binary mode. Otherwise, line endings are ignored when
1882 read in binary mode. Otherwise, line endings are ignored when
1883 patching then normalized according to 'eolmode'.
1883 patching then normalized according to 'eolmode'.
1884 """
1884 """
1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1886 prefix=prefix, eolmode=eolmode)
1886 prefix=prefix, eolmode=eolmode)
1887
1887
1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1889 eolmode='strict'):
1889 eolmode='strict'):
1890
1890
1891 if prefix:
1891 if prefix:
1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1893 prefix)
1893 prefix)
1894 if prefix != '':
1894 if prefix != '':
1895 prefix += '/'
1895 prefix += '/'
1896 def pstrip(p):
1896 def pstrip(p):
1897 return pathtransform(p, strip - 1, prefix)[1]
1897 return pathtransform(p, strip - 1, prefix)[1]
1898
1898
1899 rejects = 0
1899 rejects = 0
1900 err = 0
1900 err = 0
1901 current_file = None
1901 current_file = None
1902
1902
1903 for state, values in iterhunks(fp):
1903 for state, values in iterhunks(fp):
1904 if state == 'hunk':
1904 if state == 'hunk':
1905 if not current_file:
1905 if not current_file:
1906 continue
1906 continue
1907 ret = current_file.apply(values)
1907 ret = current_file.apply(values)
1908 if ret > 0:
1908 if ret > 0:
1909 err = 1
1909 err = 1
1910 elif state == 'file':
1910 elif state == 'file':
1911 if current_file:
1911 if current_file:
1912 rejects += current_file.close()
1912 rejects += current_file.close()
1913 current_file = None
1913 current_file = None
1914 afile, bfile, first_hunk, gp = values
1914 afile, bfile, first_hunk, gp = values
1915 if gp:
1915 if gp:
1916 gp.path = pstrip(gp.path)
1916 gp.path = pstrip(gp.path)
1917 if gp.oldpath:
1917 if gp.oldpath:
1918 gp.oldpath = pstrip(gp.oldpath)
1918 gp.oldpath = pstrip(gp.oldpath)
1919 else:
1919 else:
1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1921 prefix)
1921 prefix)
1922 if gp.op == 'RENAME':
1922 if gp.op == 'RENAME':
1923 backend.unlink(gp.oldpath)
1923 backend.unlink(gp.oldpath)
1924 if not first_hunk:
1924 if not first_hunk:
1925 if gp.op == 'DELETE':
1925 if gp.op == 'DELETE':
1926 backend.unlink(gp.path)
1926 backend.unlink(gp.path)
1927 continue
1927 continue
1928 data, mode = None, None
1928 data, mode = None, None
1929 if gp.op in ('RENAME', 'COPY'):
1929 if gp.op in ('RENAME', 'COPY'):
1930 data, mode = store.getfile(gp.oldpath)[:2]
1930 data, mode = store.getfile(gp.oldpath)[:2]
1931 # FIXME: failing getfile has never been handled here
1931 # FIXME: failing getfile has never been handled here
1932 assert data is not None
1932 assert data is not None
1933 if gp.mode:
1933 if gp.mode:
1934 mode = gp.mode
1934 mode = gp.mode
1935 if gp.op == 'ADD':
1935 if gp.op == 'ADD':
1936 # Added files without content have no hunk and
1936 # Added files without content have no hunk and
1937 # must be created
1937 # must be created
1938 data = ''
1938 data = ''
1939 if data or mode:
1939 if data or mode:
1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1941 and backend.exists(gp.path)):
1941 and backend.exists(gp.path)):
1942 raise PatchError(_("cannot create %s: destination "
1942 raise PatchError(_("cannot create %s: destination "
1943 "already exists") % gp.path)
1943 "already exists") % gp.path)
1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1945 continue
1945 continue
1946 try:
1946 try:
1947 current_file = patcher(ui, gp, backend, store,
1947 current_file = patcher(ui, gp, backend, store,
1948 eolmode=eolmode)
1948 eolmode=eolmode)
1949 except PatchError as inst:
1949 except PatchError as inst:
1950 ui.warn(str(inst) + '\n')
1950 ui.warn(str(inst) + '\n')
1951 current_file = None
1951 current_file = None
1952 rejects += 1
1952 rejects += 1
1953 continue
1953 continue
1954 elif state == 'git':
1954 elif state == 'git':
1955 for gp in values:
1955 for gp in values:
1956 path = pstrip(gp.oldpath)
1956 path = pstrip(gp.oldpath)
1957 data, mode = backend.getfile(path)
1957 data, mode = backend.getfile(path)
1958 if data is None:
1958 if data is None:
1959 # The error ignored here will trigger a getfile()
1959 # The error ignored here will trigger a getfile()
1960 # error in a place more appropriate for error
1960 # error in a place more appropriate for error
1961 # handling, and will not interrupt the patching
1961 # handling, and will not interrupt the patching
1962 # process.
1962 # process.
1963 pass
1963 pass
1964 else:
1964 else:
1965 store.setfile(path, data, mode)
1965 store.setfile(path, data, mode)
1966 else:
1966 else:
1967 raise error.Abort(_('unsupported parser state: %s') % state)
1967 raise error.Abort(_('unsupported parser state: %s') % state)
1968
1968
1969 if current_file:
1969 if current_file:
1970 rejects += current_file.close()
1970 rejects += current_file.close()
1971
1971
1972 if rejects:
1972 if rejects:
1973 return -1
1973 return -1
1974 return err
1974 return err
1975
1975
1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1977 similarity):
1977 similarity):
1978 """use <patcher> to apply <patchname> to the working directory.
1978 """use <patcher> to apply <patchname> to the working directory.
1979 returns whether patch was applied with fuzz factor."""
1979 returns whether patch was applied with fuzz factor."""
1980
1980
1981 fuzz = False
1981 fuzz = False
1982 args = []
1982 args = []
1983 cwd = repo.root
1983 cwd = repo.root
1984 if cwd:
1984 if cwd:
1985 args.append('-d %s' % util.shellquote(cwd))
1985 args.append('-d %s' % util.shellquote(cwd))
1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1987 util.shellquote(patchname)))
1987 util.shellquote(patchname)))
1988 try:
1988 try:
1989 for line in fp:
1989 for line in fp:
1990 line = line.rstrip()
1990 line = line.rstrip()
1991 ui.note(line + '\n')
1991 ui.note(line + '\n')
1992 if line.startswith('patching file '):
1992 if line.startswith('patching file '):
1993 pf = util.parsepatchoutput(line)
1993 pf = util.parsepatchoutput(line)
1994 printed_file = False
1994 printed_file = False
1995 files.add(pf)
1995 files.add(pf)
1996 elif line.find('with fuzz') >= 0:
1996 elif line.find('with fuzz') >= 0:
1997 fuzz = True
1997 fuzz = True
1998 if not printed_file:
1998 if not printed_file:
1999 ui.warn(pf + '\n')
1999 ui.warn(pf + '\n')
2000 printed_file = True
2000 printed_file = True
2001 ui.warn(line + '\n')
2001 ui.warn(line + '\n')
2002 elif line.find('saving rejects to file') >= 0:
2002 elif line.find('saving rejects to file') >= 0:
2003 ui.warn(line + '\n')
2003 ui.warn(line + '\n')
2004 elif line.find('FAILED') >= 0:
2004 elif line.find('FAILED') >= 0:
2005 if not printed_file:
2005 if not printed_file:
2006 ui.warn(pf + '\n')
2006 ui.warn(pf + '\n')
2007 printed_file = True
2007 printed_file = True
2008 ui.warn(line + '\n')
2008 ui.warn(line + '\n')
2009 finally:
2009 finally:
2010 if files:
2010 if files:
2011 scmutil.marktouched(repo, files, similarity)
2011 scmutil.marktouched(repo, files, similarity)
2012 code = fp.close()
2012 code = fp.close()
2013 if code:
2013 if code:
2014 raise PatchError(_("patch command failed: %s") %
2014 raise PatchError(_("patch command failed: %s") %
2015 util.explainexit(code)[0])
2015 util.explainexit(code)[0])
2016 return fuzz
2016 return fuzz
2017
2017
2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2019 eolmode='strict'):
2019 eolmode='strict'):
2020 if files is None:
2020 if files is None:
2021 files = set()
2021 files = set()
2022 if eolmode is None:
2022 if eolmode is None:
2023 eolmode = ui.config('patch', 'eol', 'strict')
2023 eolmode = ui.config('patch', 'eol', 'strict')
2024 if eolmode.lower() not in eolmodes:
2024 if eolmode.lower() not in eolmodes:
2025 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2025 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2026 eolmode = eolmode.lower()
2026 eolmode = eolmode.lower()
2027
2027
2028 store = filestore()
2028 store = filestore()
2029 try:
2029 try:
2030 fp = open(patchobj, 'rb')
2030 fp = open(patchobj, 'rb')
2031 except TypeError:
2031 except TypeError:
2032 fp = patchobj
2032 fp = patchobj
2033 try:
2033 try:
2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2035 eolmode=eolmode)
2035 eolmode=eolmode)
2036 finally:
2036 finally:
2037 if fp != patchobj:
2037 if fp != patchobj:
2038 fp.close()
2038 fp.close()
2039 files.update(backend.close())
2039 files.update(backend.close())
2040 store.close()
2040 store.close()
2041 if ret < 0:
2041 if ret < 0:
2042 raise PatchError(_('patch failed to apply'))
2042 raise PatchError(_('patch failed to apply'))
2043 return ret > 0
2043 return ret > 0
2044
2044
2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2046 eolmode='strict', similarity=0):
2046 eolmode='strict', similarity=0):
2047 """use builtin patch to apply <patchobj> to the working directory.
2047 """use builtin patch to apply <patchobj> to the working directory.
2048 returns whether patch was applied with fuzz factor."""
2048 returns whether patch was applied with fuzz factor."""
2049 backend = workingbackend(ui, repo, similarity)
2049 backend = workingbackend(ui, repo, similarity)
2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2051
2051
2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2053 eolmode='strict'):
2053 eolmode='strict'):
2054 backend = repobackend(ui, repo, ctx, store)
2054 backend = repobackend(ui, repo, ctx, store)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2056
2056
2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2058 similarity=0):
2058 similarity=0):
2059 """Apply <patchname> to the working directory.
2059 """Apply <patchname> to the working directory.
2060
2060
2061 'eolmode' specifies how end of lines should be handled. It can be:
2061 'eolmode' specifies how end of lines should be handled. It can be:
2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2064 - 'lf': EOLs are ignored when patching and reset to LF
2064 - 'lf': EOLs are ignored when patching and reset to LF
2065 - None: get it from user settings, default to 'strict'
2065 - None: get it from user settings, default to 'strict'
2066 'eolmode' is ignored when using an external patcher program.
2066 'eolmode' is ignored when using an external patcher program.
2067
2067
2068 Returns whether patch was applied with fuzz factor.
2068 Returns whether patch was applied with fuzz factor.
2069 """
2069 """
2070 patcher = ui.config('ui', 'patch')
2070 patcher = ui.config('ui', 'patch')
2071 if files is None:
2071 if files is None:
2072 files = set()
2072 files = set()
2073 if patcher:
2073 if patcher:
2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2075 files, similarity)
2075 files, similarity)
2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2077 similarity)
2077 similarity)
2078
2078
2079 def changedfiles(ui, repo, patchpath, strip=1):
2079 def changedfiles(ui, repo, patchpath, strip=1):
2080 backend = fsbackend(ui, repo.root)
2080 backend = fsbackend(ui, repo.root)
2081 fp = open(patchpath, 'rb')
2081 fp = open(patchpath, 'rb')
2082 try:
2082 try:
2083 changed = set()
2083 changed = set()
2084 for state, values in iterhunks(fp):
2084 for state, values in iterhunks(fp):
2085 if state == 'file':
2085 if state == 'file':
2086 afile, bfile, first_hunk, gp = values
2086 afile, bfile, first_hunk, gp = values
2087 if gp:
2087 if gp:
2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2089 if gp.oldpath:
2089 if gp.oldpath:
2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2091 else:
2091 else:
2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2093 '')
2093 '')
2094 changed.add(gp.path)
2094 changed.add(gp.path)
2095 if gp.op == 'RENAME':
2095 if gp.op == 'RENAME':
2096 changed.add(gp.oldpath)
2096 changed.add(gp.oldpath)
2097 elif state not in ('hunk', 'git'):
2097 elif state not in ('hunk', 'git'):
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2099 return changed
2099 return changed
2100 finally:
2100 finally:
2101 fp.close()
2101 fp.close()
2102
2102
2103 class GitDiffRequired(Exception):
2103 class GitDiffRequired(Exception):
2104 pass
2104 pass
2105
2105
2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2107 '''return diffopts with all features supported and parsed'''
2107 '''return diffopts with all features supported and parsed'''
2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2109 git=True, whitespace=True, formatchanging=True)
2109 git=True, whitespace=True, formatchanging=True)
2110
2110
2111 diffopts = diffallopts
2111 diffopts = diffallopts
2112
2112
2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2114 whitespace=False, formatchanging=False):
2114 whitespace=False, formatchanging=False):
2115 '''return diffopts with only opted-in features parsed
2115 '''return diffopts with only opted-in features parsed
2116
2116
2117 Features:
2117 Features:
2118 - git: git-style diffs
2118 - git: git-style diffs
2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2120 - formatchanging: options that will likely break or cause correctness issues
2120 - formatchanging: options that will likely break or cause correctness issues
2121 with most diff parsers
2121 with most diff parsers
2122 '''
2122 '''
2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2124 if opts:
2124 if opts:
2125 v = opts.get(key)
2125 v = opts.get(key)
2126 if v:
2126 if v:
2127 return v
2127 return v
2128 if forceplain is not None and ui.plain():
2128 if forceplain is not None and ui.plain():
2129 return forceplain
2129 return forceplain
2130 return getter(section, name or key, None, untrusted=untrusted)
2130 return getter(section, name or key, None, untrusted=untrusted)
2131
2131
2132 # core options, expected to be understood by every diff parser
2132 # core options, expected to be understood by every diff parser
2133 buildopts = {
2133 buildopts = {
2134 'nodates': get('nodates'),
2134 'nodates': get('nodates'),
2135 'showfunc': get('show_function', 'showfunc'),
2135 'showfunc': get('show_function', 'showfunc'),
2136 'context': get('unified', getter=ui.config),
2136 'context': get('unified', getter=ui.config),
2137 }
2137 }
2138
2138
2139 if git:
2139 if git:
2140 buildopts['git'] = get('git')
2140 buildopts['git'] = get('git')
2141 if whitespace:
2141 if whitespace:
2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2144 'ignorewsamount')
2144 'ignorewsamount')
2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2146 'ignoreblanklines')
2146 'ignoreblanklines')
2147 if formatchanging:
2147 if formatchanging:
2148 buildopts['text'] = opts and opts.get('text')
2148 buildopts['text'] = opts and opts.get('text')
2149 buildopts['nobinary'] = get('nobinary')
2149 buildopts['nobinary'] = get('nobinary', forceplain=False)
2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2151
2151
2152 return mdiff.diffopts(**buildopts)
2152 return mdiff.diffopts(**buildopts)
2153
2153
2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2155 losedatafn=None, prefix='', relroot=''):
2155 losedatafn=None, prefix='', relroot=''):
2156 '''yields diff of changes to files between two nodes, or node and
2156 '''yields diff of changes to files between two nodes, or node and
2157 working directory.
2157 working directory.
2158
2158
2159 if node1 is None, use first dirstate parent instead.
2159 if node1 is None, use first dirstate parent instead.
2160 if node2 is None, compare node1 with working directory.
2160 if node2 is None, compare node1 with working directory.
2161
2161
2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2163 every time some change cannot be represented with the current
2163 every time some change cannot be represented with the current
2164 patch format. Return False to upgrade to git patch format, True to
2164 patch format. Return False to upgrade to git patch format, True to
2165 accept the loss or raise an exception to abort the diff. It is
2165 accept the loss or raise an exception to abort the diff. It is
2166 called with the name of current file being diffed as 'fn'. If set
2166 called with the name of current file being diffed as 'fn'. If set
2167 to None, patches will always be upgraded to git format when
2167 to None, patches will always be upgraded to git format when
2168 necessary.
2168 necessary.
2169
2169
2170 prefix is a filename prefix that is prepended to all filenames on
2170 prefix is a filename prefix that is prepended to all filenames on
2171 display (used for subrepos).
2171 display (used for subrepos).
2172
2172
2173 relroot, if not empty, must be normalized with a trailing /. Any match
2173 relroot, if not empty, must be normalized with a trailing /. Any match
2174 patterns that fall outside it will be ignored.'''
2174 patterns that fall outside it will be ignored.'''
2175
2175
2176 if opts is None:
2176 if opts is None:
2177 opts = mdiff.defaultopts
2177 opts = mdiff.defaultopts
2178
2178
2179 if not node1 and not node2:
2179 if not node1 and not node2:
2180 node1 = repo.dirstate.p1()
2180 node1 = repo.dirstate.p1()
2181
2181
2182 def lrugetfilectx():
2182 def lrugetfilectx():
2183 cache = {}
2183 cache = {}
2184 order = collections.deque()
2184 order = collections.deque()
2185 def getfilectx(f, ctx):
2185 def getfilectx(f, ctx):
2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2187 if f not in cache:
2187 if f not in cache:
2188 if len(cache) > 20:
2188 if len(cache) > 20:
2189 del cache[order.popleft()]
2189 del cache[order.popleft()]
2190 cache[f] = fctx.filelog()
2190 cache[f] = fctx.filelog()
2191 else:
2191 else:
2192 order.remove(f)
2192 order.remove(f)
2193 order.append(f)
2193 order.append(f)
2194 return fctx
2194 return fctx
2195 return getfilectx
2195 return getfilectx
2196 getfilectx = lrugetfilectx()
2196 getfilectx = lrugetfilectx()
2197
2197
2198 ctx1 = repo[node1]
2198 ctx1 = repo[node1]
2199 ctx2 = repo[node2]
2199 ctx2 = repo[node2]
2200
2200
2201 relfiltered = False
2201 relfiltered = False
2202 if relroot != '' and match.always():
2202 if relroot != '' and match.always():
2203 # as a special case, create a new matcher with just the relroot
2203 # as a special case, create a new matcher with just the relroot
2204 pats = [relroot]
2204 pats = [relroot]
2205 match = scmutil.match(ctx2, pats, default='path')
2205 match = scmutil.match(ctx2, pats, default='path')
2206 relfiltered = True
2206 relfiltered = True
2207
2207
2208 if not changes:
2208 if not changes:
2209 changes = repo.status(ctx1, ctx2, match=match)
2209 changes = repo.status(ctx1, ctx2, match=match)
2210 modified, added, removed = changes[:3]
2210 modified, added, removed = changes[:3]
2211
2211
2212 if not modified and not added and not removed:
2212 if not modified and not added and not removed:
2213 return []
2213 return []
2214
2214
2215 if repo.ui.debugflag:
2215 if repo.ui.debugflag:
2216 hexfunc = hex
2216 hexfunc = hex
2217 else:
2217 else:
2218 hexfunc = short
2218 hexfunc = short
2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2220
2220
2221 copy = {}
2221 copy = {}
2222 if opts.git or opts.upgrade:
2222 if opts.git or opts.upgrade:
2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2224
2224
2225 if relroot is not None:
2225 if relroot is not None:
2226 if not relfiltered:
2226 if not relfiltered:
2227 # XXX this would ideally be done in the matcher, but that is
2227 # XXX this would ideally be done in the matcher, but that is
2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2229 # need to 'and' all the patterns from the matcher with relroot.
2229 # need to 'and' all the patterns from the matcher with relroot.
2230 def filterrel(l):
2230 def filterrel(l):
2231 return [f for f in l if f.startswith(relroot)]
2231 return [f for f in l if f.startswith(relroot)]
2232 modified = filterrel(modified)
2232 modified = filterrel(modified)
2233 added = filterrel(added)
2233 added = filterrel(added)
2234 removed = filterrel(removed)
2234 removed = filterrel(removed)
2235 relfiltered = True
2235 relfiltered = True
2236 # filter out copies where either side isn't inside the relative root
2236 # filter out copies where either side isn't inside the relative root
2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2238 if dst.startswith(relroot)
2238 if dst.startswith(relroot)
2239 and src.startswith(relroot)))
2239 and src.startswith(relroot)))
2240
2240
2241 def difffn(opts, losedata):
2241 def difffn(opts, losedata):
2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2243 copy, getfilectx, opts, losedata, prefix, relroot)
2243 copy, getfilectx, opts, losedata, prefix, relroot)
2244 if opts.upgrade and not opts.git:
2244 if opts.upgrade and not opts.git:
2245 try:
2245 try:
2246 def losedata(fn):
2246 def losedata(fn):
2247 if not losedatafn or not losedatafn(fn=fn):
2247 if not losedatafn or not losedatafn(fn=fn):
2248 raise GitDiffRequired
2248 raise GitDiffRequired
2249 # Buffer the whole output until we are sure it can be generated
2249 # Buffer the whole output until we are sure it can be generated
2250 return list(difffn(opts.copy(git=False), losedata))
2250 return list(difffn(opts.copy(git=False), losedata))
2251 except GitDiffRequired:
2251 except GitDiffRequired:
2252 return difffn(opts.copy(git=True), None)
2252 return difffn(opts.copy(git=True), None)
2253 else:
2253 else:
2254 return difffn(opts, None)
2254 return difffn(opts, None)
2255
2255
2256 def difflabel(func, *args, **kw):
2256 def difflabel(func, *args, **kw):
2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2258 headprefixes = [('diff', 'diff.diffline'),
2258 headprefixes = [('diff', 'diff.diffline'),
2259 ('copy', 'diff.extended'),
2259 ('copy', 'diff.extended'),
2260 ('rename', 'diff.extended'),
2260 ('rename', 'diff.extended'),
2261 ('old', 'diff.extended'),
2261 ('old', 'diff.extended'),
2262 ('new', 'diff.extended'),
2262 ('new', 'diff.extended'),
2263 ('deleted', 'diff.extended'),
2263 ('deleted', 'diff.extended'),
2264 ('---', 'diff.file_a'),
2264 ('---', 'diff.file_a'),
2265 ('+++', 'diff.file_b')]
2265 ('+++', 'diff.file_b')]
2266 textprefixes = [('@', 'diff.hunk'),
2266 textprefixes = [('@', 'diff.hunk'),
2267 ('-', 'diff.deleted'),
2267 ('-', 'diff.deleted'),
2268 ('+', 'diff.inserted')]
2268 ('+', 'diff.inserted')]
2269 head = False
2269 head = False
2270 for chunk in func(*args, **kw):
2270 for chunk in func(*args, **kw):
2271 lines = chunk.split('\n')
2271 lines = chunk.split('\n')
2272 for i, line in enumerate(lines):
2272 for i, line in enumerate(lines):
2273 if i != 0:
2273 if i != 0:
2274 yield ('\n', '')
2274 yield ('\n', '')
2275 if head:
2275 if head:
2276 if line.startswith('@'):
2276 if line.startswith('@'):
2277 head = False
2277 head = False
2278 else:
2278 else:
2279 if line and line[0] not in ' +-@\\':
2279 if line and line[0] not in ' +-@\\':
2280 head = True
2280 head = True
2281 stripline = line
2281 stripline = line
2282 diffline = False
2282 diffline = False
2283 if not head and line and line[0] in '+-':
2283 if not head and line and line[0] in '+-':
2284 # highlight tabs and trailing whitespace, but only in
2284 # highlight tabs and trailing whitespace, but only in
2285 # changed lines
2285 # changed lines
2286 stripline = line.rstrip()
2286 stripline = line.rstrip()
2287 diffline = True
2287 diffline = True
2288
2288
2289 prefixes = textprefixes
2289 prefixes = textprefixes
2290 if head:
2290 if head:
2291 prefixes = headprefixes
2291 prefixes = headprefixes
2292 for prefix, label in prefixes:
2292 for prefix, label in prefixes:
2293 if stripline.startswith(prefix):
2293 if stripline.startswith(prefix):
2294 if diffline:
2294 if diffline:
2295 for token in tabsplitter.findall(stripline):
2295 for token in tabsplitter.findall(stripline):
2296 if '\t' == token[0]:
2296 if '\t' == token[0]:
2297 yield (token, 'diff.tab')
2297 yield (token, 'diff.tab')
2298 else:
2298 else:
2299 yield (token, label)
2299 yield (token, label)
2300 else:
2300 else:
2301 yield (stripline, label)
2301 yield (stripline, label)
2302 break
2302 break
2303 else:
2303 else:
2304 yield (line, '')
2304 yield (line, '')
2305 if line != stripline:
2305 if line != stripline:
2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2307
2307
2308 def diffui(*args, **kw):
2308 def diffui(*args, **kw):
2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2310 return difflabel(diff, *args, **kw)
2310 return difflabel(diff, *args, **kw)
2311
2311
2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2314 before and f2 is the the name after. For added files, f1 will be None,
2314 before and f2 is the the name after. For added files, f1 will be None,
2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2316 or 'rename' (the latter two only if opts.git is set).'''
2316 or 'rename' (the latter two only if opts.git is set).'''
2317 gone = set()
2317 gone = set()
2318
2318
2319 copyto = dict([(v, k) for k, v in copy.items()])
2319 copyto = dict([(v, k) for k, v in copy.items()])
2320
2320
2321 addedset, removedset = set(added), set(removed)
2321 addedset, removedset = set(added), set(removed)
2322 # Fix up added, since merged-in additions appear as
2322 # Fix up added, since merged-in additions appear as
2323 # modifications during merges
2323 # modifications during merges
2324 for f in modified:
2324 for f in modified:
2325 if f not in ctx1:
2325 if f not in ctx1:
2326 addedset.add(f)
2326 addedset.add(f)
2327
2327
2328 for f in sorted(modified + added + removed):
2328 for f in sorted(modified + added + removed):
2329 copyop = None
2329 copyop = None
2330 f1, f2 = f, f
2330 f1, f2 = f, f
2331 if f in addedset:
2331 if f in addedset:
2332 f1 = None
2332 f1 = None
2333 if f in copy:
2333 if f in copy:
2334 if opts.git:
2334 if opts.git:
2335 f1 = copy[f]
2335 f1 = copy[f]
2336 if f1 in removedset and f1 not in gone:
2336 if f1 in removedset and f1 not in gone:
2337 copyop = 'rename'
2337 copyop = 'rename'
2338 gone.add(f1)
2338 gone.add(f1)
2339 else:
2339 else:
2340 copyop = 'copy'
2340 copyop = 'copy'
2341 elif f in removedset:
2341 elif f in removedset:
2342 f2 = None
2342 f2 = None
2343 if opts.git:
2343 if opts.git:
2344 # have we already reported a copy above?
2344 # have we already reported a copy above?
2345 if (f in copyto and copyto[f] in addedset
2345 if (f in copyto and copyto[f] in addedset
2346 and copy[copyto[f]] == f):
2346 and copy[copyto[f]] == f):
2347 continue
2347 continue
2348 yield f1, f2, copyop
2348 yield f1, f2, copyop
2349
2349
2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2352 '''given input data, generate a diff and yield it in blocks
2352 '''given input data, generate a diff and yield it in blocks
2353
2353
2354 If generating a diff would lose data like flags or binary data and
2354 If generating a diff would lose data like flags or binary data and
2355 losedatafn is not None, it will be called.
2355 losedatafn is not None, it will be called.
2356
2356
2357 relroot is removed and prefix is added to every path in the diff output.
2357 relroot is removed and prefix is added to every path in the diff output.
2358
2358
2359 If relroot is not empty, this function expects every path in modified,
2359 If relroot is not empty, this function expects every path in modified,
2360 added, removed and copy to start with it.'''
2360 added, removed and copy to start with it.'''
2361
2361
2362 def gitindex(text):
2362 def gitindex(text):
2363 if not text:
2363 if not text:
2364 text = ""
2364 text = ""
2365 l = len(text)
2365 l = len(text)
2366 s = util.sha1('blob %d\0' % l)
2366 s = util.sha1('blob %d\0' % l)
2367 s.update(text)
2367 s.update(text)
2368 return s.hexdigest()
2368 return s.hexdigest()
2369
2369
2370 if opts.noprefix:
2370 if opts.noprefix:
2371 aprefix = bprefix = ''
2371 aprefix = bprefix = ''
2372 else:
2372 else:
2373 aprefix = 'a/'
2373 aprefix = 'a/'
2374 bprefix = 'b/'
2374 bprefix = 'b/'
2375
2375
2376 def diffline(f, revs):
2376 def diffline(f, revs):
2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2378 return 'diff %s %s' % (revinfo, f)
2378 return 'diff %s %s' % (revinfo, f)
2379
2379
2380 date1 = util.datestr(ctx1.date())
2380 date1 = util.datestr(ctx1.date())
2381 date2 = util.datestr(ctx2.date())
2381 date2 = util.datestr(ctx2.date())
2382
2382
2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2384
2384
2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2386 or repo.ui.configbool('devel', 'check-relroot')):
2386 or repo.ui.configbool('devel', 'check-relroot')):
2387 for f in modified + added + removed + copy.keys() + copy.values():
2387 for f in modified + added + removed + copy.keys() + copy.values():
2388 if f is not None and not f.startswith(relroot):
2388 if f is not None and not f.startswith(relroot):
2389 raise AssertionError(
2389 raise AssertionError(
2390 "file %s doesn't start with relroot %s" % (f, relroot))
2390 "file %s doesn't start with relroot %s" % (f, relroot))
2391
2391
2392 for f1, f2, copyop in _filepairs(
2392 for f1, f2, copyop in _filepairs(
2393 ctx1, modified, added, removed, copy, opts):
2393 ctx1, modified, added, removed, copy, opts):
2394 content1 = None
2394 content1 = None
2395 content2 = None
2395 content2 = None
2396 flag1 = None
2396 flag1 = None
2397 flag2 = None
2397 flag2 = None
2398 if f1:
2398 if f1:
2399 content1 = getfilectx(f1, ctx1).data()
2399 content1 = getfilectx(f1, ctx1).data()
2400 if opts.git or losedatafn:
2400 if opts.git or losedatafn:
2401 flag1 = ctx1.flags(f1)
2401 flag1 = ctx1.flags(f1)
2402 if f2:
2402 if f2:
2403 content2 = getfilectx(f2, ctx2).data()
2403 content2 = getfilectx(f2, ctx2).data()
2404 if opts.git or losedatafn:
2404 if opts.git or losedatafn:
2405 flag2 = ctx2.flags(f2)
2405 flag2 = ctx2.flags(f2)
2406 binary = False
2406 binary = False
2407 if opts.git or losedatafn:
2407 if opts.git or losedatafn:
2408 binary = util.binary(content1) or util.binary(content2)
2408 binary = util.binary(content1) or util.binary(content2)
2409
2409
2410 if losedatafn and not opts.git:
2410 if losedatafn and not opts.git:
2411 if (binary or
2411 if (binary or
2412 # copy/rename
2412 # copy/rename
2413 f2 in copy or
2413 f2 in copy or
2414 # empty file creation
2414 # empty file creation
2415 (not f1 and not content2) or
2415 (not f1 and not content2) or
2416 # empty file deletion
2416 # empty file deletion
2417 (not content1 and not f2) or
2417 (not content1 and not f2) or
2418 # create with flags
2418 # create with flags
2419 (not f1 and flag2) or
2419 (not f1 and flag2) or
2420 # change flags
2420 # change flags
2421 (f1 and f2 and flag1 != flag2)):
2421 (f1 and f2 and flag1 != flag2)):
2422 losedatafn(f2 or f1)
2422 losedatafn(f2 or f1)
2423
2423
2424 path1 = f1 or f2
2424 path1 = f1 or f2
2425 path2 = f2 or f1
2425 path2 = f2 or f1
2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2428 header = []
2428 header = []
2429 if opts.git:
2429 if opts.git:
2430 header.append('diff --git %s%s %s%s' %
2430 header.append('diff --git %s%s %s%s' %
2431 (aprefix, path1, bprefix, path2))
2431 (aprefix, path1, bprefix, path2))
2432 if not f1: # added
2432 if not f1: # added
2433 header.append('new file mode %s' % gitmode[flag2])
2433 header.append('new file mode %s' % gitmode[flag2])
2434 elif not f2: # removed
2434 elif not f2: # removed
2435 header.append('deleted file mode %s' % gitmode[flag1])
2435 header.append('deleted file mode %s' % gitmode[flag1])
2436 else: # modified/copied/renamed
2436 else: # modified/copied/renamed
2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2438 if mode1 != mode2:
2438 if mode1 != mode2:
2439 header.append('old mode %s' % mode1)
2439 header.append('old mode %s' % mode1)
2440 header.append('new mode %s' % mode2)
2440 header.append('new mode %s' % mode2)
2441 if copyop is not None:
2441 if copyop is not None:
2442 header.append('%s from %s' % (copyop, path1))
2442 header.append('%s from %s' % (copyop, path1))
2443 header.append('%s to %s' % (copyop, path2))
2443 header.append('%s to %s' % (copyop, path2))
2444 elif revs and not repo.ui.quiet:
2444 elif revs and not repo.ui.quiet:
2445 header.append(diffline(path1, revs))
2445 header.append(diffline(path1, revs))
2446
2446
2447 if binary and opts.git and not opts.nobinary:
2447 if binary and opts.git and not opts.nobinary:
2448 text = mdiff.b85diff(content1, content2)
2448 text = mdiff.b85diff(content1, content2)
2449 if text:
2449 if text:
2450 header.append('index %s..%s' %
2450 header.append('index %s..%s' %
2451 (gitindex(content1), gitindex(content2)))
2451 (gitindex(content1), gitindex(content2)))
2452 else:
2452 else:
2453 text = mdiff.unidiff(content1, date1,
2453 text = mdiff.unidiff(content1, date1,
2454 content2, date2,
2454 content2, date2,
2455 path1, path2, opts=opts)
2455 path1, path2, opts=opts)
2456 if header and (text or len(header) > 1):
2456 if header and (text or len(header) > 1):
2457 yield '\n'.join(header) + '\n'
2457 yield '\n'.join(header) + '\n'
2458 if text:
2458 if text:
2459 yield text
2459 yield text
2460
2460
2461 def diffstatsum(stats):
2461 def diffstatsum(stats):
2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2463 for f, a, r, b in stats:
2463 for f, a, r, b in stats:
2464 maxfile = max(maxfile, encoding.colwidth(f))
2464 maxfile = max(maxfile, encoding.colwidth(f))
2465 maxtotal = max(maxtotal, a + r)
2465 maxtotal = max(maxtotal, a + r)
2466 addtotal += a
2466 addtotal += a
2467 removetotal += r
2467 removetotal += r
2468 binary = binary or b
2468 binary = binary or b
2469
2469
2470 return maxfile, maxtotal, addtotal, removetotal, binary
2470 return maxfile, maxtotal, addtotal, removetotal, binary
2471
2471
2472 def diffstatdata(lines):
2472 def diffstatdata(lines):
2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2474
2474
2475 results = []
2475 results = []
2476 filename, adds, removes, isbinary = None, 0, 0, False
2476 filename, adds, removes, isbinary = None, 0, 0, False
2477
2477
2478 def addresult():
2478 def addresult():
2479 if filename:
2479 if filename:
2480 results.append((filename, adds, removes, isbinary))
2480 results.append((filename, adds, removes, isbinary))
2481
2481
2482 for line in lines:
2482 for line in lines:
2483 if line.startswith('diff'):
2483 if line.startswith('diff'):
2484 addresult()
2484 addresult()
2485 # set numbers to 0 anyway when starting new file
2485 # set numbers to 0 anyway when starting new file
2486 adds, removes, isbinary = 0, 0, False
2486 adds, removes, isbinary = 0, 0, False
2487 if line.startswith('diff --git a/'):
2487 if line.startswith('diff --git a/'):
2488 filename = gitre.search(line).group(2)
2488 filename = gitre.search(line).group(2)
2489 elif line.startswith('diff -r'):
2489 elif line.startswith('diff -r'):
2490 # format: "diff -r ... -r ... filename"
2490 # format: "diff -r ... -r ... filename"
2491 filename = diffre.search(line).group(1)
2491 filename = diffre.search(line).group(1)
2492 elif line.startswith('+') and not line.startswith('+++ '):
2492 elif line.startswith('+') and not line.startswith('+++ '):
2493 adds += 1
2493 adds += 1
2494 elif line.startswith('-') and not line.startswith('--- '):
2494 elif line.startswith('-') and not line.startswith('--- '):
2495 removes += 1
2495 removes += 1
2496 elif (line.startswith('GIT binary patch') or
2496 elif (line.startswith('GIT binary patch') or
2497 line.startswith('Binary file')):
2497 line.startswith('Binary file')):
2498 isbinary = True
2498 isbinary = True
2499 addresult()
2499 addresult()
2500 return results
2500 return results
2501
2501
2502 def diffstat(lines, width=80, git=False):
2502 def diffstat(lines, width=80, git=False):
2503 output = []
2503 output = []
2504 stats = diffstatdata(lines)
2504 stats = diffstatdata(lines)
2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2506
2506
2507 countwidth = len(str(maxtotal))
2507 countwidth = len(str(maxtotal))
2508 if hasbinary and countwidth < 3:
2508 if hasbinary and countwidth < 3:
2509 countwidth = 3
2509 countwidth = 3
2510 graphwidth = width - countwidth - maxname - 6
2510 graphwidth = width - countwidth - maxname - 6
2511 if graphwidth < 10:
2511 if graphwidth < 10:
2512 graphwidth = 10
2512 graphwidth = 10
2513
2513
2514 def scale(i):
2514 def scale(i):
2515 if maxtotal <= graphwidth:
2515 if maxtotal <= graphwidth:
2516 return i
2516 return i
2517 # If diffstat runs out of room it doesn't print anything,
2517 # If diffstat runs out of room it doesn't print anything,
2518 # which isn't very useful, so always print at least one + or -
2518 # which isn't very useful, so always print at least one + or -
2519 # if there were at least some changes.
2519 # if there were at least some changes.
2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2521
2521
2522 for filename, adds, removes, isbinary in stats:
2522 for filename, adds, removes, isbinary in stats:
2523 if isbinary:
2523 if isbinary:
2524 count = 'Bin'
2524 count = 'Bin'
2525 else:
2525 else:
2526 count = adds + removes
2526 count = adds + removes
2527 pluses = '+' * scale(adds)
2527 pluses = '+' * scale(adds)
2528 minuses = '-' * scale(removes)
2528 minuses = '-' * scale(removes)
2529 output.append(' %s%s | %*s %s%s\n' %
2529 output.append(' %s%s | %*s %s%s\n' %
2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2531 countwidth, count, pluses, minuses))
2531 countwidth, count, pluses, minuses))
2532
2532
2533 if stats:
2533 if stats:
2534 output.append(_(' %d files changed, %d insertions(+), '
2534 output.append(_(' %d files changed, %d insertions(+), '
2535 '%d deletions(-)\n')
2535 '%d deletions(-)\n')
2536 % (len(stats), totaladds, totalremoves))
2536 % (len(stats), totaladds, totalremoves))
2537
2537
2538 return ''.join(output)
2538 return ''.join(output)
2539
2539
2540 def diffstatui(*args, **kw):
2540 def diffstatui(*args, **kw):
2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2542 ui.write()
2542 ui.write()
2543 '''
2543 '''
2544
2544
2545 for line in diffstat(*args, **kw).splitlines():
2545 for line in diffstat(*args, **kw).splitlines():
2546 if line and line[-1] in '+-':
2546 if line and line[-1] in '+-':
2547 name, graph = line.rsplit(' ', 1)
2547 name, graph = line.rsplit(' ', 1)
2548 yield (name + ' ', '')
2548 yield (name + ' ', '')
2549 m = re.search(r'\++', graph)
2549 m = re.search(r'\++', graph)
2550 if m:
2550 if m:
2551 yield (m.group(0), 'diffstat.inserted')
2551 yield (m.group(0), 'diffstat.inserted')
2552 m = re.search(r'-+', graph)
2552 m = re.search(r'-+', graph)
2553 if m:
2553 if m:
2554 yield (m.group(0), 'diffstat.deleted')
2554 yield (m.group(0), 'diffstat.deleted')
2555 else:
2555 else:
2556 yield (line, '')
2556 yield (line, '')
2557 yield ('\n', '')
2557 yield ('\n', '')
@@ -1,57 +1,77 b''
1 $ hg init a
1 $ hg init a
2 $ cd a
2 $ cd a
3 $ cp "$TESTDIR/binfile.bin" .
3 $ cp "$TESTDIR/binfile.bin" .
4 $ hg add binfile.bin
4 $ hg add binfile.bin
5 $ hg ci -m 'add binfile.bin'
5 $ hg ci -m 'add binfile.bin'
6
6
7 $ echo >> binfile.bin
7 $ echo >> binfile.bin
8 $ hg ci -m 'change binfile.bin'
8 $ hg ci -m 'change binfile.bin'
9
9
10 $ hg revert -r 0 binfile.bin
10 $ hg revert -r 0 binfile.bin
11 $ hg ci -m 'revert binfile.bin'
11 $ hg ci -m 'revert binfile.bin'
12 $ hg cp binfile.bin nonbinfile
12 $ hg cp binfile.bin nonbinfile
13 $ echo text > nonbinfile
13 $ echo text > nonbinfile
14 $ hg ci -m 'make non-binary copy of binary file'
14 $ hg ci -m 'make non-binary copy of binary file'
15
15
16 $ hg diff --nodates -r 0 -r 1
16 $ hg diff --nodates -r 0 -r 1
17 diff -r 48b371597640 -r acea2ab458c8 binfile.bin
17 diff -r 48b371597640 -r acea2ab458c8 binfile.bin
18 Binary file binfile.bin has changed
18 Binary file binfile.bin has changed
19
19
20 $ hg diff --nodates -r 0 -r 2
20 $ hg diff --nodates -r 0 -r 2
21
21
22 $ hg diff --git -r 0 -r 1
22 $ hg diff --git -r 0 -r 1
23 diff --git a/binfile.bin b/binfile.bin
23 diff --git a/binfile.bin b/binfile.bin
24 index 37ba3d1c6f17137d9c5f5776fa040caf5fe73ff9..58dc31a9e2f40f74ff3b45903f7d620b8e5b7356
24 index 37ba3d1c6f17137d9c5f5776fa040caf5fe73ff9..58dc31a9e2f40f74ff3b45903f7d620b8e5b7356
25 GIT binary patch
25 GIT binary patch
26 literal 594
26 literal 594
27 zc$@)J0<HatP)<h;3K|Lk000e1NJLTq000mG000mO0ssI2kdbIM00009a7bBm000XU
27 zc$@)J0<HatP)<h;3K|Lk000e1NJLTq000mG000mO0ssI2kdbIM00009a7bBm000XU
28 z000XU0RWnu7ytkO2XskIMF-Uh9TW;VpMjwv0005-Nkl<ZD9@FWPs=e;7{<>W$NUkd
28 z000XU0RWnu7ytkO2XskIMF-Uh9TW;VpMjwv0005-Nkl<ZD9@FWPs=e;7{<>W$NUkd
29 zX$nnYLt$-$V!?uy+1V%`z&Eh=ah|duER<4|QWhju3gb^nF*8iYobxWG-qqXl=2~5M
29 zX$nnYLt$-$V!?uy+1V%`z&Eh=ah|duER<4|QWhju3gb^nF*8iYobxWG-qqXl=2~5M
30 z*IoDB)sG^CfNuoBmqLTVU^<;@nwHP!1wrWd`{(mHo6VNXWtyh{alzqmsH*yYzpvLT
30 z*IoDB)sG^CfNuoBmqLTVU^<;@nwHP!1wrWd`{(mHo6VNXWtyh{alzqmsH*yYzpvLT
31 zLdY<T=ks|woh-`&01!ej#(xbV1f|pI*=%;d-%F*E*X#ZH`4I%6SS+$EJDE&ct=8po
31 zLdY<T=ks|woh-`&01!ej#(xbV1f|pI*=%;d-%F*E*X#ZH`4I%6SS+$EJDE&ct=8po
32 ziN#{?_j|kD%Cd|oiqds`xm@;oJ-^?NG3Gdqrs?5u*zI;{nogxsx~^|Fn^Y?Gdc6<;
32 ziN#{?_j|kD%Cd|oiqds`xm@;oJ-^?NG3Gdqrs?5u*zI;{nogxsx~^|Fn^Y?Gdc6<;
33 zfMJ+iF1J`LMx&A2?dEwNW8ClebzPTbIh{@$hS6*`kH@1d%Lo7fA#}N1)oN7`gm$~V
33 zfMJ+iF1J`LMx&A2?dEwNW8ClebzPTbIh{@$hS6*`kH@1d%Lo7fA#}N1)oN7`gm$~V
34 z+wDx#)OFqMcE{s!JN0-xhG8ItAjVkJwEcb`3WWlJfU2r?;Pd%dmR+q@mSri5q9_W-
34 z+wDx#)OFqMcE{s!JN0-xhG8ItAjVkJwEcb`3WWlJfU2r?;Pd%dmR+q@mSri5q9_W-
35 zaR2~ECX?B2w+zELozC0s*6Z~|QG^f{3I#<`?)Q7U-JZ|q5W;9Q8i_=pBuSzunx=U;
35 zaR2~ECX?B2w+zELozC0s*6Z~|QG^f{3I#<`?)Q7U-JZ|q5W;9Q8i_=pBuSzunx=U;
36 z9C)5jBoYw9^?EHyQl(M}1OlQcCX>lXB*ODN003Z&P17_@)3Pi=i0wb04<W?v-u}7K
36 z9C)5jBoYw9^?EHyQl(M}1OlQcCX>lXB*ODN003Z&P17_@)3Pi=i0wb04<W?v-u}7K
37 zXmmQA+wDgE!qR9o8jr`%=ab_&uh(l?R=r;Tjiqon91I2-hIu?57~@*4h7h9uORK#=
37 zXmmQA+wDgE!qR9o8jr`%=ab_&uh(l?R=r;Tjiqon91I2-hIu?57~@*4h7h9uORK#=
38 gQItJW-{SoTm)8|5##k|m00000NkvXXu0mjf3JwksH2?qr
38 gQItJW-{SoTm)8|5##k|m00000NkvXXu0mjf3JwksH2?qr
39
39
40
40
41 $ hg diff --git -r 0 -r 2
41 $ hg diff --git -r 0 -r 2
42
42
43 $ hg diff --config diff.nobinary=True --git -r 0 -r 1
43 $ hg diff --config diff.nobinary=True --git -r 0 -r 1
44 diff --git a/binfile.bin b/binfile.bin
44 diff --git a/binfile.bin b/binfile.bin
45 Binary file binfile.bin has changed
45 Binary file binfile.bin has changed
46
46
47 $ HGPLAIN=1 hg diff --config diff.nobinary=True --git -r 0 -r 1
48 diff --git a/binfile.bin b/binfile.bin
49 index 37ba3d1c6f17137d9c5f5776fa040caf5fe73ff9..58dc31a9e2f40f74ff3b45903f7d620b8e5b7356
50 GIT binary patch
51 literal 594
52 zc$@)J0<HatP)<h;3K|Lk000e1NJLTq000mG000mO0ssI2kdbIM00009a7bBm000XU
53 z000XU0RWnu7ytkO2XskIMF-Uh9TW;VpMjwv0005-Nkl<ZD9@FWPs=e;7{<>W$NUkd
54 zX$nnYLt$-$V!?uy+1V%`z&Eh=ah|duER<4|QWhju3gb^nF*8iYobxWG-qqXl=2~5M
55 z*IoDB)sG^CfNuoBmqLTVU^<;@nwHP!1wrWd`{(mHo6VNXWtyh{alzqmsH*yYzpvLT
56 zLdY<T=ks|woh-`&01!ej#(xbV1f|pI*=%;d-%F*E*X#ZH`4I%6SS+$EJDE&ct=8po
57 ziN#{?_j|kD%Cd|oiqds`xm@;oJ-^?NG3Gdqrs?5u*zI;{nogxsx~^|Fn^Y?Gdc6<;
58 zfMJ+iF1J`LMx&A2?dEwNW8ClebzPTbIh{@$hS6*`kH@1d%Lo7fA#}N1)oN7`gm$~V
59 z+wDx#)OFqMcE{s!JN0-xhG8ItAjVkJwEcb`3WWlJfU2r?;Pd%dmR+q@mSri5q9_W-
60 zaR2~ECX?B2w+zELozC0s*6Z~|QG^f{3I#<`?)Q7U-JZ|q5W;9Q8i_=pBuSzunx=U;
61 z9C)5jBoYw9^?EHyQl(M}1OlQcCX>lXB*ODN003Z&P17_@)3Pi=i0wb04<W?v-u}7K
62 zXmmQA+wDgE!qR9o8jr`%=ab_&uh(l?R=r;Tjiqon91I2-hIu?57~@*4h7h9uORK#=
63 gQItJW-{SoTm)8|5##k|m00000NkvXXu0mjf3JwksH2?qr
64
65
66
47 $ hg diff --git -r 2 -r 3
67 $ hg diff --git -r 2 -r 3
48 diff --git a/binfile.bin b/nonbinfile
68 diff --git a/binfile.bin b/nonbinfile
49 copy from binfile.bin
69 copy from binfile.bin
50 copy to nonbinfile
70 copy to nonbinfile
51 index 37ba3d1c6f17137d9c5f5776fa040caf5fe73ff9..8e27be7d6154a1f68ea9160ef0e18691d20560dc
71 index 37ba3d1c6f17137d9c5f5776fa040caf5fe73ff9..8e27be7d6154a1f68ea9160ef0e18691d20560dc
52 GIT binary patch
72 GIT binary patch
53 literal 5
73 literal 5
54 Mc$_OqttjCF00uV!&;S4c
74 Mc$_OqttjCF00uV!&;S4c
55
75
56
76
57 $ cd ..
77 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now