##// END OF EJS Templates
patch.trydiff: add a docstring...
Siddharth Agarwal -
r24371:8a997bd7 default
parent child Browse files
Show More
@@ -1,2416 +1,2422 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath, copy
9 import cStringIO, email, os, errno, re, posixpath, copy
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import cStringIO
18 import cStringIO
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20
20
21 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 gitre = re.compile('diff --git a/(.*) b/(.*)')
22 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22 tabsplitter = re.compile(r'(\t+|[^\t]+)')
23
23
24 class PatchError(Exception):
24 class PatchError(Exception):
25 pass
25 pass
26
26
27
27
28 # public functions
28 # public functions
29
29
30 def split(stream):
30 def split(stream):
31 '''return an iterator of individual patches from a stream'''
31 '''return an iterator of individual patches from a stream'''
32 def isheader(line, inheader):
32 def isheader(line, inheader):
33 if inheader and line[0] in (' ', '\t'):
33 if inheader and line[0] in (' ', '\t'):
34 # continuation
34 # continuation
35 return True
35 return True
36 if line[0] in (' ', '-', '+'):
36 if line[0] in (' ', '-', '+'):
37 # diff line - don't check for header pattern in there
37 # diff line - don't check for header pattern in there
38 return False
38 return False
39 l = line.split(': ', 1)
39 l = line.split(': ', 1)
40 return len(l) == 2 and ' ' not in l[0]
40 return len(l) == 2 and ' ' not in l[0]
41
41
42 def chunk(lines):
42 def chunk(lines):
43 return cStringIO.StringIO(''.join(lines))
43 return cStringIO.StringIO(''.join(lines))
44
44
45 def hgsplit(stream, cur):
45 def hgsplit(stream, cur):
46 inheader = True
46 inheader = True
47
47
48 for line in stream:
48 for line in stream:
49 if not line.strip():
49 if not line.strip():
50 inheader = False
50 inheader = False
51 if not inheader and line.startswith('# HG changeset patch'):
51 if not inheader and line.startswith('# HG changeset patch'):
52 yield chunk(cur)
52 yield chunk(cur)
53 cur = []
53 cur = []
54 inheader = True
54 inheader = True
55
55
56 cur.append(line)
56 cur.append(line)
57
57
58 if cur:
58 if cur:
59 yield chunk(cur)
59 yield chunk(cur)
60
60
61 def mboxsplit(stream, cur):
61 def mboxsplit(stream, cur):
62 for line in stream:
62 for line in stream:
63 if line.startswith('From '):
63 if line.startswith('From '):
64 for c in split(chunk(cur[1:])):
64 for c in split(chunk(cur[1:])):
65 yield c
65 yield c
66 cur = []
66 cur = []
67
67
68 cur.append(line)
68 cur.append(line)
69
69
70 if cur:
70 if cur:
71 for c in split(chunk(cur[1:])):
71 for c in split(chunk(cur[1:])):
72 yield c
72 yield c
73
73
74 def mimesplit(stream, cur):
74 def mimesplit(stream, cur):
75 def msgfp(m):
75 def msgfp(m):
76 fp = cStringIO.StringIO()
76 fp = cStringIO.StringIO()
77 g = email.Generator.Generator(fp, mangle_from_=False)
77 g = email.Generator.Generator(fp, mangle_from_=False)
78 g.flatten(m)
78 g.flatten(m)
79 fp.seek(0)
79 fp.seek(0)
80 return fp
80 return fp
81
81
82 for line in stream:
82 for line in stream:
83 cur.append(line)
83 cur.append(line)
84 c = chunk(cur)
84 c = chunk(cur)
85
85
86 m = email.Parser.Parser().parse(c)
86 m = email.Parser.Parser().parse(c)
87 if not m.is_multipart():
87 if not m.is_multipart():
88 yield msgfp(m)
88 yield msgfp(m)
89 else:
89 else:
90 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
91 for part in m.walk():
91 for part in m.walk():
92 ct = part.get_content_type()
92 ct = part.get_content_type()
93 if ct not in ok_types:
93 if ct not in ok_types:
94 continue
94 continue
95 yield msgfp(part)
95 yield msgfp(part)
96
96
97 def headersplit(stream, cur):
97 def headersplit(stream, cur):
98 inheader = False
98 inheader = False
99
99
100 for line in stream:
100 for line in stream:
101 if not inheader and isheader(line, inheader):
101 if not inheader and isheader(line, inheader):
102 yield chunk(cur)
102 yield chunk(cur)
103 cur = []
103 cur = []
104 inheader = True
104 inheader = True
105 if inheader and not isheader(line, inheader):
105 if inheader and not isheader(line, inheader):
106 inheader = False
106 inheader = False
107
107
108 cur.append(line)
108 cur.append(line)
109
109
110 if cur:
110 if cur:
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 def remainder(cur):
113 def remainder(cur):
114 yield chunk(cur)
114 yield chunk(cur)
115
115
116 class fiter(object):
116 class fiter(object):
117 def __init__(self, fp):
117 def __init__(self, fp):
118 self.fp = fp
118 self.fp = fp
119
119
120 def __iter__(self):
120 def __iter__(self):
121 return self
121 return self
122
122
123 def next(self):
123 def next(self):
124 l = self.fp.readline()
124 l = self.fp.readline()
125 if not l:
125 if not l:
126 raise StopIteration
126 raise StopIteration
127 return l
127 return l
128
128
129 inheader = False
129 inheader = False
130 cur = []
130 cur = []
131
131
132 mimeheaders = ['content-type']
132 mimeheaders = ['content-type']
133
133
134 if not util.safehasattr(stream, 'next'):
134 if not util.safehasattr(stream, 'next'):
135 # http responses, for example, have readline but not next
135 # http responses, for example, have readline but not next
136 stream = fiter(stream)
136 stream = fiter(stream)
137
137
138 for line in stream:
138 for line in stream:
139 cur.append(line)
139 cur.append(line)
140 if line.startswith('# HG changeset patch'):
140 if line.startswith('# HG changeset patch'):
141 return hgsplit(stream, cur)
141 return hgsplit(stream, cur)
142 elif line.startswith('From '):
142 elif line.startswith('From '):
143 return mboxsplit(stream, cur)
143 return mboxsplit(stream, cur)
144 elif isheader(line, inheader):
144 elif isheader(line, inheader):
145 inheader = True
145 inheader = True
146 if line.split(':', 1)[0].lower() in mimeheaders:
146 if line.split(':', 1)[0].lower() in mimeheaders:
147 # let email parser handle this
147 # let email parser handle this
148 return mimesplit(stream, cur)
148 return mimesplit(stream, cur)
149 elif line.startswith('--- ') and inheader:
149 elif line.startswith('--- ') and inheader:
150 # No evil headers seen by diff start, split by hand
150 # No evil headers seen by diff start, split by hand
151 return headersplit(stream, cur)
151 return headersplit(stream, cur)
152 # Not enough info, keep reading
152 # Not enough info, keep reading
153
153
154 # if we are here, we have a very plain patch
154 # if we are here, we have a very plain patch
155 return remainder(cur)
155 return remainder(cur)
156
156
157 def extract(ui, fileobj):
157 def extract(ui, fileobj):
158 '''extract patch from data read from fileobj.
158 '''extract patch from data read from fileobj.
159
159
160 patch can be a normal patch or contained in an email message.
160 patch can be a normal patch or contained in an email message.
161
161
162 return tuple (filename, message, user, date, branch, node, p1, p2).
162 return tuple (filename, message, user, date, branch, node, p1, p2).
163 Any item in the returned tuple can be None. If filename is None,
163 Any item in the returned tuple can be None. If filename is None,
164 fileobj did not contain a patch. Caller must unlink filename when done.'''
164 fileobj did not contain a patch. Caller must unlink filename when done.'''
165
165
166 # attempt to detect the start of a patch
166 # attempt to detect the start of a patch
167 # (this heuristic is borrowed from quilt)
167 # (this heuristic is borrowed from quilt)
168 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
169 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
170 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'---[ \t].*?^\+\+\+[ \t]|'
171 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
172
172
173 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
174 tmpfp = os.fdopen(fd, 'w')
174 tmpfp = os.fdopen(fd, 'w')
175 try:
175 try:
176 msg = email.Parser.Parser().parse(fileobj)
176 msg = email.Parser.Parser().parse(fileobj)
177
177
178 subject = msg['Subject']
178 subject = msg['Subject']
179 user = msg['From']
179 user = msg['From']
180 if not subject and not user:
180 if not subject and not user:
181 # Not an email, restore parsed headers if any
181 # Not an email, restore parsed headers if any
182 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
183
183
184 # should try to parse msg['Date']
184 # should try to parse msg['Date']
185 date = None
185 date = None
186 nodeid = None
186 nodeid = None
187 branch = None
187 branch = None
188 parents = []
188 parents = []
189
189
190 if subject:
190 if subject:
191 if subject.startswith('[PATCH'):
191 if subject.startswith('[PATCH'):
192 pend = subject.find(']')
192 pend = subject.find(']')
193 if pend >= 0:
193 if pend >= 0:
194 subject = subject[pend + 1:].lstrip()
194 subject = subject[pend + 1:].lstrip()
195 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 subject = re.sub(r'\n[ \t]+', ' ', subject)
196 ui.debug('Subject: %s\n' % subject)
196 ui.debug('Subject: %s\n' % subject)
197 if user:
197 if user:
198 ui.debug('From: %s\n' % user)
198 ui.debug('From: %s\n' % user)
199 diffs_seen = 0
199 diffs_seen = 0
200 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
201 message = ''
201 message = ''
202 for part in msg.walk():
202 for part in msg.walk():
203 content_type = part.get_content_type()
203 content_type = part.get_content_type()
204 ui.debug('Content-Type: %s\n' % content_type)
204 ui.debug('Content-Type: %s\n' % content_type)
205 if content_type not in ok_types:
205 if content_type not in ok_types:
206 continue
206 continue
207 payload = part.get_payload(decode=True)
207 payload = part.get_payload(decode=True)
208 m = diffre.search(payload)
208 m = diffre.search(payload)
209 if m:
209 if m:
210 hgpatch = False
210 hgpatch = False
211 hgpatchheader = False
211 hgpatchheader = False
212 ignoretext = False
212 ignoretext = False
213
213
214 ui.debug('found patch at byte %d\n' % m.start(0))
214 ui.debug('found patch at byte %d\n' % m.start(0))
215 diffs_seen += 1
215 diffs_seen += 1
216 cfp = cStringIO.StringIO()
216 cfp = cStringIO.StringIO()
217 for line in payload[:m.start(0)].splitlines():
217 for line in payload[:m.start(0)].splitlines():
218 if line.startswith('# HG changeset patch') and not hgpatch:
218 if line.startswith('# HG changeset patch') and not hgpatch:
219 ui.debug('patch generated by hg export\n')
219 ui.debug('patch generated by hg export\n')
220 hgpatch = True
220 hgpatch = True
221 hgpatchheader = True
221 hgpatchheader = True
222 # drop earlier commit message content
222 # drop earlier commit message content
223 cfp.seek(0)
223 cfp.seek(0)
224 cfp.truncate()
224 cfp.truncate()
225 subject = None
225 subject = None
226 elif hgpatchheader:
226 elif hgpatchheader:
227 if line.startswith('# User '):
227 if line.startswith('# User '):
228 user = line[7:]
228 user = line[7:]
229 ui.debug('From: %s\n' % user)
229 ui.debug('From: %s\n' % user)
230 elif line.startswith("# Date "):
230 elif line.startswith("# Date "):
231 date = line[7:]
231 date = line[7:]
232 elif line.startswith("# Branch "):
232 elif line.startswith("# Branch "):
233 branch = line[9:]
233 branch = line[9:]
234 elif line.startswith("# Node ID "):
234 elif line.startswith("# Node ID "):
235 nodeid = line[10:]
235 nodeid = line[10:]
236 elif line.startswith("# Parent "):
236 elif line.startswith("# Parent "):
237 parents.append(line[9:].lstrip())
237 parents.append(line[9:].lstrip())
238 elif not line.startswith("# "):
238 elif not line.startswith("# "):
239 hgpatchheader = False
239 hgpatchheader = False
240 elif line == '---':
240 elif line == '---':
241 ignoretext = True
241 ignoretext = True
242 if not hgpatchheader and not ignoretext:
242 if not hgpatchheader and not ignoretext:
243 cfp.write(line)
243 cfp.write(line)
244 cfp.write('\n')
244 cfp.write('\n')
245 message = cfp.getvalue()
245 message = cfp.getvalue()
246 if tmpfp:
246 if tmpfp:
247 tmpfp.write(payload)
247 tmpfp.write(payload)
248 if not payload.endswith('\n'):
248 if not payload.endswith('\n'):
249 tmpfp.write('\n')
249 tmpfp.write('\n')
250 elif not diffs_seen and message and content_type == 'text/plain':
250 elif not diffs_seen and message and content_type == 'text/plain':
251 message += '\n' + payload
251 message += '\n' + payload
252 except: # re-raises
252 except: # re-raises
253 tmpfp.close()
253 tmpfp.close()
254 os.unlink(tmpname)
254 os.unlink(tmpname)
255 raise
255 raise
256
256
257 if subject and not message.startswith(subject):
257 if subject and not message.startswith(subject):
258 message = '%s\n%s' % (subject, message)
258 message = '%s\n%s' % (subject, message)
259 tmpfp.close()
259 tmpfp.close()
260 if not diffs_seen:
260 if not diffs_seen:
261 os.unlink(tmpname)
261 os.unlink(tmpname)
262 return None, message, user, date, branch, None, None, None
262 return None, message, user, date, branch, None, None, None
263
263
264 if parents:
264 if parents:
265 p1 = parents.pop(0)
265 p1 = parents.pop(0)
266 else:
266 else:
267 p1 = None
267 p1 = None
268
268
269 if parents:
269 if parents:
270 p2 = parents.pop(0)
270 p2 = parents.pop(0)
271 else:
271 else:
272 p2 = None
272 p2 = None
273
273
274 return tmpname, message, user, date, branch, nodeid, p1, p2
274 return tmpname, message, user, date, branch, nodeid, p1, p2
275
275
276 class patchmeta(object):
276 class patchmeta(object):
277 """Patched file metadata
277 """Patched file metadata
278
278
279 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
279 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
280 or COPY. 'path' is patched file path. 'oldpath' is set to the
280 or COPY. 'path' is patched file path. 'oldpath' is set to the
281 origin file when 'op' is either COPY or RENAME, None otherwise. If
281 origin file when 'op' is either COPY or RENAME, None otherwise. If
282 file mode is changed, 'mode' is a tuple (islink, isexec) where
282 file mode is changed, 'mode' is a tuple (islink, isexec) where
283 'islink' is True if the file is a symlink and 'isexec' is True if
283 'islink' is True if the file is a symlink and 'isexec' is True if
284 the file is executable. Otherwise, 'mode' is None.
284 the file is executable. Otherwise, 'mode' is None.
285 """
285 """
286 def __init__(self, path):
286 def __init__(self, path):
287 self.path = path
287 self.path = path
288 self.oldpath = None
288 self.oldpath = None
289 self.mode = None
289 self.mode = None
290 self.op = 'MODIFY'
290 self.op = 'MODIFY'
291 self.binary = False
291 self.binary = False
292
292
293 def setmode(self, mode):
293 def setmode(self, mode):
294 islink = mode & 020000
294 islink = mode & 020000
295 isexec = mode & 0100
295 isexec = mode & 0100
296 self.mode = (islink, isexec)
296 self.mode = (islink, isexec)
297
297
298 def copy(self):
298 def copy(self):
299 other = patchmeta(self.path)
299 other = patchmeta(self.path)
300 other.oldpath = self.oldpath
300 other.oldpath = self.oldpath
301 other.mode = self.mode
301 other.mode = self.mode
302 other.op = self.op
302 other.op = self.op
303 other.binary = self.binary
303 other.binary = self.binary
304 return other
304 return other
305
305
306 def _ispatchinga(self, afile):
306 def _ispatchinga(self, afile):
307 if afile == '/dev/null':
307 if afile == '/dev/null':
308 return self.op == 'ADD'
308 return self.op == 'ADD'
309 return afile == 'a/' + (self.oldpath or self.path)
309 return afile == 'a/' + (self.oldpath or self.path)
310
310
311 def _ispatchingb(self, bfile):
311 def _ispatchingb(self, bfile):
312 if bfile == '/dev/null':
312 if bfile == '/dev/null':
313 return self.op == 'DELETE'
313 return self.op == 'DELETE'
314 return bfile == 'b/' + self.path
314 return bfile == 'b/' + self.path
315
315
316 def ispatching(self, afile, bfile):
316 def ispatching(self, afile, bfile):
317 return self._ispatchinga(afile) and self._ispatchingb(bfile)
317 return self._ispatchinga(afile) and self._ispatchingb(bfile)
318
318
319 def __repr__(self):
319 def __repr__(self):
320 return "<patchmeta %s %r>" % (self.op, self.path)
320 return "<patchmeta %s %r>" % (self.op, self.path)
321
321
322 def readgitpatch(lr):
322 def readgitpatch(lr):
323 """extract git-style metadata about patches from <patchname>"""
323 """extract git-style metadata about patches from <patchname>"""
324
324
325 # Filter patch for git information
325 # Filter patch for git information
326 gp = None
326 gp = None
327 gitpatches = []
327 gitpatches = []
328 for line in lr:
328 for line in lr:
329 line = line.rstrip(' \r\n')
329 line = line.rstrip(' \r\n')
330 if line.startswith('diff --git a/'):
330 if line.startswith('diff --git a/'):
331 m = gitre.match(line)
331 m = gitre.match(line)
332 if m:
332 if m:
333 if gp:
333 if gp:
334 gitpatches.append(gp)
334 gitpatches.append(gp)
335 dst = m.group(2)
335 dst = m.group(2)
336 gp = patchmeta(dst)
336 gp = patchmeta(dst)
337 elif gp:
337 elif gp:
338 if line.startswith('--- '):
338 if line.startswith('--- '):
339 gitpatches.append(gp)
339 gitpatches.append(gp)
340 gp = None
340 gp = None
341 continue
341 continue
342 if line.startswith('rename from '):
342 if line.startswith('rename from '):
343 gp.op = 'RENAME'
343 gp.op = 'RENAME'
344 gp.oldpath = line[12:]
344 gp.oldpath = line[12:]
345 elif line.startswith('rename to '):
345 elif line.startswith('rename to '):
346 gp.path = line[10:]
346 gp.path = line[10:]
347 elif line.startswith('copy from '):
347 elif line.startswith('copy from '):
348 gp.op = 'COPY'
348 gp.op = 'COPY'
349 gp.oldpath = line[10:]
349 gp.oldpath = line[10:]
350 elif line.startswith('copy to '):
350 elif line.startswith('copy to '):
351 gp.path = line[8:]
351 gp.path = line[8:]
352 elif line.startswith('deleted file'):
352 elif line.startswith('deleted file'):
353 gp.op = 'DELETE'
353 gp.op = 'DELETE'
354 elif line.startswith('new file mode '):
354 elif line.startswith('new file mode '):
355 gp.op = 'ADD'
355 gp.op = 'ADD'
356 gp.setmode(int(line[-6:], 8))
356 gp.setmode(int(line[-6:], 8))
357 elif line.startswith('new mode '):
357 elif line.startswith('new mode '):
358 gp.setmode(int(line[-6:], 8))
358 gp.setmode(int(line[-6:], 8))
359 elif line.startswith('GIT binary patch'):
359 elif line.startswith('GIT binary patch'):
360 gp.binary = True
360 gp.binary = True
361 if gp:
361 if gp:
362 gitpatches.append(gp)
362 gitpatches.append(gp)
363
363
364 return gitpatches
364 return gitpatches
365
365
366 class linereader(object):
366 class linereader(object):
367 # simple class to allow pushing lines back into the input stream
367 # simple class to allow pushing lines back into the input stream
368 def __init__(self, fp):
368 def __init__(self, fp):
369 self.fp = fp
369 self.fp = fp
370 self.buf = []
370 self.buf = []
371
371
372 def push(self, line):
372 def push(self, line):
373 if line is not None:
373 if line is not None:
374 self.buf.append(line)
374 self.buf.append(line)
375
375
376 def readline(self):
376 def readline(self):
377 if self.buf:
377 if self.buf:
378 l = self.buf[0]
378 l = self.buf[0]
379 del self.buf[0]
379 del self.buf[0]
380 return l
380 return l
381 return self.fp.readline()
381 return self.fp.readline()
382
382
383 def __iter__(self):
383 def __iter__(self):
384 while True:
384 while True:
385 l = self.readline()
385 l = self.readline()
386 if not l:
386 if not l:
387 break
387 break
388 yield l
388 yield l
389
389
390 class abstractbackend(object):
390 class abstractbackend(object):
391 def __init__(self, ui):
391 def __init__(self, ui):
392 self.ui = ui
392 self.ui = ui
393
393
394 def getfile(self, fname):
394 def getfile(self, fname):
395 """Return target file data and flags as a (data, (islink,
395 """Return target file data and flags as a (data, (islink,
396 isexec)) tuple. Data is None if file is missing/deleted.
396 isexec)) tuple. Data is None if file is missing/deleted.
397 """
397 """
398 raise NotImplementedError
398 raise NotImplementedError
399
399
400 def setfile(self, fname, data, mode, copysource):
400 def setfile(self, fname, data, mode, copysource):
401 """Write data to target file fname and set its mode. mode is a
401 """Write data to target file fname and set its mode. mode is a
402 (islink, isexec) tuple. If data is None, the file content should
402 (islink, isexec) tuple. If data is None, the file content should
403 be left unchanged. If the file is modified after being copied,
403 be left unchanged. If the file is modified after being copied,
404 copysource is set to the original file name.
404 copysource is set to the original file name.
405 """
405 """
406 raise NotImplementedError
406 raise NotImplementedError
407
407
408 def unlink(self, fname):
408 def unlink(self, fname):
409 """Unlink target file."""
409 """Unlink target file."""
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 def writerej(self, fname, failed, total, lines):
412 def writerej(self, fname, failed, total, lines):
413 """Write rejected lines for fname. total is the number of hunks
413 """Write rejected lines for fname. total is the number of hunks
414 which failed to apply and total the total number of hunks for this
414 which failed to apply and total the total number of hunks for this
415 files.
415 files.
416 """
416 """
417 pass
417 pass
418
418
419 def exists(self, fname):
419 def exists(self, fname):
420 raise NotImplementedError
420 raise NotImplementedError
421
421
422 class fsbackend(abstractbackend):
422 class fsbackend(abstractbackend):
423 def __init__(self, ui, basedir):
423 def __init__(self, ui, basedir):
424 super(fsbackend, self).__init__(ui)
424 super(fsbackend, self).__init__(ui)
425 self.opener = scmutil.opener(basedir)
425 self.opener = scmutil.opener(basedir)
426
426
427 def _join(self, f):
427 def _join(self, f):
428 return os.path.join(self.opener.base, f)
428 return os.path.join(self.opener.base, f)
429
429
430 def getfile(self, fname):
430 def getfile(self, fname):
431 if self.opener.islink(fname):
431 if self.opener.islink(fname):
432 return (self.opener.readlink(fname), (True, False))
432 return (self.opener.readlink(fname), (True, False))
433
433
434 isexec = False
434 isexec = False
435 try:
435 try:
436 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
436 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
437 except OSError, e:
437 except OSError, e:
438 if e.errno != errno.ENOENT:
438 if e.errno != errno.ENOENT:
439 raise
439 raise
440 try:
440 try:
441 return (self.opener.read(fname), (False, isexec))
441 return (self.opener.read(fname), (False, isexec))
442 except IOError, e:
442 except IOError, e:
443 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
444 raise
444 raise
445 return None, None
445 return None, None
446
446
447 def setfile(self, fname, data, mode, copysource):
447 def setfile(self, fname, data, mode, copysource):
448 islink, isexec = mode
448 islink, isexec = mode
449 if data is None:
449 if data is None:
450 self.opener.setflags(fname, islink, isexec)
450 self.opener.setflags(fname, islink, isexec)
451 return
451 return
452 if islink:
452 if islink:
453 self.opener.symlink(data, fname)
453 self.opener.symlink(data, fname)
454 else:
454 else:
455 self.opener.write(fname, data)
455 self.opener.write(fname, data)
456 if isexec:
456 if isexec:
457 self.opener.setflags(fname, False, True)
457 self.opener.setflags(fname, False, True)
458
458
459 def unlink(self, fname):
459 def unlink(self, fname):
460 self.opener.unlinkpath(fname, ignoremissing=True)
460 self.opener.unlinkpath(fname, ignoremissing=True)
461
461
462 def writerej(self, fname, failed, total, lines):
462 def writerej(self, fname, failed, total, lines):
463 fname = fname + ".rej"
463 fname = fname + ".rej"
464 self.ui.warn(
464 self.ui.warn(
465 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
465 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
466 (failed, total, fname))
466 (failed, total, fname))
467 fp = self.opener(fname, 'w')
467 fp = self.opener(fname, 'w')
468 fp.writelines(lines)
468 fp.writelines(lines)
469 fp.close()
469 fp.close()
470
470
471 def exists(self, fname):
471 def exists(self, fname):
472 return self.opener.lexists(fname)
472 return self.opener.lexists(fname)
473
473
474 class workingbackend(fsbackend):
474 class workingbackend(fsbackend):
475 def __init__(self, ui, repo, similarity):
475 def __init__(self, ui, repo, similarity):
476 super(workingbackend, self).__init__(ui, repo.root)
476 super(workingbackend, self).__init__(ui, repo.root)
477 self.repo = repo
477 self.repo = repo
478 self.similarity = similarity
478 self.similarity = similarity
479 self.removed = set()
479 self.removed = set()
480 self.changed = set()
480 self.changed = set()
481 self.copied = []
481 self.copied = []
482
482
483 def _checkknown(self, fname):
483 def _checkknown(self, fname):
484 if self.repo.dirstate[fname] == '?' and self.exists(fname):
484 if self.repo.dirstate[fname] == '?' and self.exists(fname):
485 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
485 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
486
486
487 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
488 self._checkknown(fname)
488 self._checkknown(fname)
489 super(workingbackend, self).setfile(fname, data, mode, copysource)
489 super(workingbackend, self).setfile(fname, data, mode, copysource)
490 if copysource is not None:
490 if copysource is not None:
491 self.copied.append((copysource, fname))
491 self.copied.append((copysource, fname))
492 self.changed.add(fname)
492 self.changed.add(fname)
493
493
494 def unlink(self, fname):
494 def unlink(self, fname):
495 self._checkknown(fname)
495 self._checkknown(fname)
496 super(workingbackend, self).unlink(fname)
496 super(workingbackend, self).unlink(fname)
497 self.removed.add(fname)
497 self.removed.add(fname)
498 self.changed.add(fname)
498 self.changed.add(fname)
499
499
500 def close(self):
500 def close(self):
501 wctx = self.repo[None]
501 wctx = self.repo[None]
502 changed = set(self.changed)
502 changed = set(self.changed)
503 for src, dst in self.copied:
503 for src, dst in self.copied:
504 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
504 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
505 if self.removed:
505 if self.removed:
506 wctx.forget(sorted(self.removed))
506 wctx.forget(sorted(self.removed))
507 for f in self.removed:
507 for f in self.removed:
508 if f not in self.repo.dirstate:
508 if f not in self.repo.dirstate:
509 # File was deleted and no longer belongs to the
509 # File was deleted and no longer belongs to the
510 # dirstate, it was probably marked added then
510 # dirstate, it was probably marked added then
511 # deleted, and should not be considered by
511 # deleted, and should not be considered by
512 # marktouched().
512 # marktouched().
513 changed.discard(f)
513 changed.discard(f)
514 if changed:
514 if changed:
515 scmutil.marktouched(self.repo, changed, self.similarity)
515 scmutil.marktouched(self.repo, changed, self.similarity)
516 return sorted(self.changed)
516 return sorted(self.changed)
517
517
518 class filestore(object):
518 class filestore(object):
519 def __init__(self, maxsize=None):
519 def __init__(self, maxsize=None):
520 self.opener = None
520 self.opener = None
521 self.files = {}
521 self.files = {}
522 self.created = 0
522 self.created = 0
523 self.maxsize = maxsize
523 self.maxsize = maxsize
524 if self.maxsize is None:
524 if self.maxsize is None:
525 self.maxsize = 4*(2**20)
525 self.maxsize = 4*(2**20)
526 self.size = 0
526 self.size = 0
527 self.data = {}
527 self.data = {}
528
528
529 def setfile(self, fname, data, mode, copied=None):
529 def setfile(self, fname, data, mode, copied=None):
530 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
530 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
531 self.data[fname] = (data, mode, copied)
531 self.data[fname] = (data, mode, copied)
532 self.size += len(data)
532 self.size += len(data)
533 else:
533 else:
534 if self.opener is None:
534 if self.opener is None:
535 root = tempfile.mkdtemp(prefix='hg-patch-')
535 root = tempfile.mkdtemp(prefix='hg-patch-')
536 self.opener = scmutil.opener(root)
536 self.opener = scmutil.opener(root)
537 # Avoid filename issues with these simple names
537 # Avoid filename issues with these simple names
538 fn = str(self.created)
538 fn = str(self.created)
539 self.opener.write(fn, data)
539 self.opener.write(fn, data)
540 self.created += 1
540 self.created += 1
541 self.files[fname] = (fn, mode, copied)
541 self.files[fname] = (fn, mode, copied)
542
542
543 def getfile(self, fname):
543 def getfile(self, fname):
544 if fname in self.data:
544 if fname in self.data:
545 return self.data[fname]
545 return self.data[fname]
546 if not self.opener or fname not in self.files:
546 if not self.opener or fname not in self.files:
547 return None, None, None
547 return None, None, None
548 fn, mode, copied = self.files[fname]
548 fn, mode, copied = self.files[fname]
549 return self.opener.read(fn), mode, copied
549 return self.opener.read(fn), mode, copied
550
550
551 def close(self):
551 def close(self):
552 if self.opener:
552 if self.opener:
553 shutil.rmtree(self.opener.base)
553 shutil.rmtree(self.opener.base)
554
554
555 class repobackend(abstractbackend):
555 class repobackend(abstractbackend):
556 def __init__(self, ui, repo, ctx, store):
556 def __init__(self, ui, repo, ctx, store):
557 super(repobackend, self).__init__(ui)
557 super(repobackend, self).__init__(ui)
558 self.repo = repo
558 self.repo = repo
559 self.ctx = ctx
559 self.ctx = ctx
560 self.store = store
560 self.store = store
561 self.changed = set()
561 self.changed = set()
562 self.removed = set()
562 self.removed = set()
563 self.copied = {}
563 self.copied = {}
564
564
565 def _checkknown(self, fname):
565 def _checkknown(self, fname):
566 if fname not in self.ctx:
566 if fname not in self.ctx:
567 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
567 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
568
568
569 def getfile(self, fname):
569 def getfile(self, fname):
570 try:
570 try:
571 fctx = self.ctx[fname]
571 fctx = self.ctx[fname]
572 except error.LookupError:
572 except error.LookupError:
573 return None, None
573 return None, None
574 flags = fctx.flags()
574 flags = fctx.flags()
575 return fctx.data(), ('l' in flags, 'x' in flags)
575 return fctx.data(), ('l' in flags, 'x' in flags)
576
576
577 def setfile(self, fname, data, mode, copysource):
577 def setfile(self, fname, data, mode, copysource):
578 if copysource:
578 if copysource:
579 self._checkknown(copysource)
579 self._checkknown(copysource)
580 if data is None:
580 if data is None:
581 data = self.ctx[fname].data()
581 data = self.ctx[fname].data()
582 self.store.setfile(fname, data, mode, copysource)
582 self.store.setfile(fname, data, mode, copysource)
583 self.changed.add(fname)
583 self.changed.add(fname)
584 if copysource:
584 if copysource:
585 self.copied[fname] = copysource
585 self.copied[fname] = copysource
586
586
587 def unlink(self, fname):
587 def unlink(self, fname):
588 self._checkknown(fname)
588 self._checkknown(fname)
589 self.removed.add(fname)
589 self.removed.add(fname)
590
590
591 def exists(self, fname):
591 def exists(self, fname):
592 return fname in self.ctx
592 return fname in self.ctx
593
593
594 def close(self):
594 def close(self):
595 return self.changed | self.removed
595 return self.changed | self.removed
596
596
597 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
597 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
598 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
598 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
599 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
599 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
600 eolmodes = ['strict', 'crlf', 'lf', 'auto']
600 eolmodes = ['strict', 'crlf', 'lf', 'auto']
601
601
602 class patchfile(object):
602 class patchfile(object):
603 def __init__(self, ui, gp, backend, store, eolmode='strict'):
603 def __init__(self, ui, gp, backend, store, eolmode='strict'):
604 self.fname = gp.path
604 self.fname = gp.path
605 self.eolmode = eolmode
605 self.eolmode = eolmode
606 self.eol = None
606 self.eol = None
607 self.backend = backend
607 self.backend = backend
608 self.ui = ui
608 self.ui = ui
609 self.lines = []
609 self.lines = []
610 self.exists = False
610 self.exists = False
611 self.missing = True
611 self.missing = True
612 self.mode = gp.mode
612 self.mode = gp.mode
613 self.copysource = gp.oldpath
613 self.copysource = gp.oldpath
614 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
614 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
615 self.remove = gp.op == 'DELETE'
615 self.remove = gp.op == 'DELETE'
616 if self.copysource is None:
616 if self.copysource is None:
617 data, mode = backend.getfile(self.fname)
617 data, mode = backend.getfile(self.fname)
618 else:
618 else:
619 data, mode = store.getfile(self.copysource)[:2]
619 data, mode = store.getfile(self.copysource)[:2]
620 if data is not None:
620 if data is not None:
621 self.exists = self.copysource is None or backend.exists(self.fname)
621 self.exists = self.copysource is None or backend.exists(self.fname)
622 self.missing = False
622 self.missing = False
623 if data:
623 if data:
624 self.lines = mdiff.splitnewlines(data)
624 self.lines = mdiff.splitnewlines(data)
625 if self.mode is None:
625 if self.mode is None:
626 self.mode = mode
626 self.mode = mode
627 if self.lines:
627 if self.lines:
628 # Normalize line endings
628 # Normalize line endings
629 if self.lines[0].endswith('\r\n'):
629 if self.lines[0].endswith('\r\n'):
630 self.eol = '\r\n'
630 self.eol = '\r\n'
631 elif self.lines[0].endswith('\n'):
631 elif self.lines[0].endswith('\n'):
632 self.eol = '\n'
632 self.eol = '\n'
633 if eolmode != 'strict':
633 if eolmode != 'strict':
634 nlines = []
634 nlines = []
635 for l in self.lines:
635 for l in self.lines:
636 if l.endswith('\r\n'):
636 if l.endswith('\r\n'):
637 l = l[:-2] + '\n'
637 l = l[:-2] + '\n'
638 nlines.append(l)
638 nlines.append(l)
639 self.lines = nlines
639 self.lines = nlines
640 else:
640 else:
641 if self.create:
641 if self.create:
642 self.missing = False
642 self.missing = False
643 if self.mode is None:
643 if self.mode is None:
644 self.mode = (False, False)
644 self.mode = (False, False)
645 if self.missing:
645 if self.missing:
646 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
646 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
647
647
648 self.hash = {}
648 self.hash = {}
649 self.dirty = 0
649 self.dirty = 0
650 self.offset = 0
650 self.offset = 0
651 self.skew = 0
651 self.skew = 0
652 self.rej = []
652 self.rej = []
653 self.fileprinted = False
653 self.fileprinted = False
654 self.printfile(False)
654 self.printfile(False)
655 self.hunks = 0
655 self.hunks = 0
656
656
657 def writelines(self, fname, lines, mode):
657 def writelines(self, fname, lines, mode):
658 if self.eolmode == 'auto':
658 if self.eolmode == 'auto':
659 eol = self.eol
659 eol = self.eol
660 elif self.eolmode == 'crlf':
660 elif self.eolmode == 'crlf':
661 eol = '\r\n'
661 eol = '\r\n'
662 else:
662 else:
663 eol = '\n'
663 eol = '\n'
664
664
665 if self.eolmode != 'strict' and eol and eol != '\n':
665 if self.eolmode != 'strict' and eol and eol != '\n':
666 rawlines = []
666 rawlines = []
667 for l in lines:
667 for l in lines:
668 if l and l[-1] == '\n':
668 if l and l[-1] == '\n':
669 l = l[:-1] + eol
669 l = l[:-1] + eol
670 rawlines.append(l)
670 rawlines.append(l)
671 lines = rawlines
671 lines = rawlines
672
672
673 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
673 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
674
674
675 def printfile(self, warn):
675 def printfile(self, warn):
676 if self.fileprinted:
676 if self.fileprinted:
677 return
677 return
678 if warn or self.ui.verbose:
678 if warn or self.ui.verbose:
679 self.fileprinted = True
679 self.fileprinted = True
680 s = _("patching file %s\n") % self.fname
680 s = _("patching file %s\n") % self.fname
681 if warn:
681 if warn:
682 self.ui.warn(s)
682 self.ui.warn(s)
683 else:
683 else:
684 self.ui.note(s)
684 self.ui.note(s)
685
685
686
686
687 def findlines(self, l, linenum):
687 def findlines(self, l, linenum):
688 # looks through the hash and finds candidate lines. The
688 # looks through the hash and finds candidate lines. The
689 # result is a list of line numbers sorted based on distance
689 # result is a list of line numbers sorted based on distance
690 # from linenum
690 # from linenum
691
691
692 cand = self.hash.get(l, [])
692 cand = self.hash.get(l, [])
693 if len(cand) > 1:
693 if len(cand) > 1:
694 # resort our list of potentials forward then back.
694 # resort our list of potentials forward then back.
695 cand.sort(key=lambda x: abs(x - linenum))
695 cand.sort(key=lambda x: abs(x - linenum))
696 return cand
696 return cand
697
697
698 def write_rej(self):
698 def write_rej(self):
699 # our rejects are a little different from patch(1). This always
699 # our rejects are a little different from patch(1). This always
700 # creates rejects in the same form as the original patch. A file
700 # creates rejects in the same form as the original patch. A file
701 # header is inserted so that you can run the reject through patch again
701 # header is inserted so that you can run the reject through patch again
702 # without having to type the filename.
702 # without having to type the filename.
703 if not self.rej:
703 if not self.rej:
704 return
704 return
705 base = os.path.basename(self.fname)
705 base = os.path.basename(self.fname)
706 lines = ["--- %s\n+++ %s\n" % (base, base)]
706 lines = ["--- %s\n+++ %s\n" % (base, base)]
707 for x in self.rej:
707 for x in self.rej:
708 for l in x.hunk:
708 for l in x.hunk:
709 lines.append(l)
709 lines.append(l)
710 if l[-1] != '\n':
710 if l[-1] != '\n':
711 lines.append("\n\ No newline at end of file\n")
711 lines.append("\n\ No newline at end of file\n")
712 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
712 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
713
713
714 def apply(self, h):
714 def apply(self, h):
715 if not h.complete():
715 if not h.complete():
716 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
716 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
717 (h.number, h.desc, len(h.a), h.lena, len(h.b),
717 (h.number, h.desc, len(h.a), h.lena, len(h.b),
718 h.lenb))
718 h.lenb))
719
719
720 self.hunks += 1
720 self.hunks += 1
721
721
722 if self.missing:
722 if self.missing:
723 self.rej.append(h)
723 self.rej.append(h)
724 return -1
724 return -1
725
725
726 if self.exists and self.create:
726 if self.exists and self.create:
727 if self.copysource:
727 if self.copysource:
728 self.ui.warn(_("cannot create %s: destination already "
728 self.ui.warn(_("cannot create %s: destination already "
729 "exists\n") % self.fname)
729 "exists\n") % self.fname)
730 else:
730 else:
731 self.ui.warn(_("file %s already exists\n") % self.fname)
731 self.ui.warn(_("file %s already exists\n") % self.fname)
732 self.rej.append(h)
732 self.rej.append(h)
733 return -1
733 return -1
734
734
735 if isinstance(h, binhunk):
735 if isinstance(h, binhunk):
736 if self.remove:
736 if self.remove:
737 self.backend.unlink(self.fname)
737 self.backend.unlink(self.fname)
738 else:
738 else:
739 l = h.new(self.lines)
739 l = h.new(self.lines)
740 self.lines[:] = l
740 self.lines[:] = l
741 self.offset += len(l)
741 self.offset += len(l)
742 self.dirty = True
742 self.dirty = True
743 return 0
743 return 0
744
744
745 horig = h
745 horig = h
746 if (self.eolmode in ('crlf', 'lf')
746 if (self.eolmode in ('crlf', 'lf')
747 or self.eolmode == 'auto' and self.eol):
747 or self.eolmode == 'auto' and self.eol):
748 # If new eols are going to be normalized, then normalize
748 # If new eols are going to be normalized, then normalize
749 # hunk data before patching. Otherwise, preserve input
749 # hunk data before patching. Otherwise, preserve input
750 # line-endings.
750 # line-endings.
751 h = h.getnormalized()
751 h = h.getnormalized()
752
752
753 # fast case first, no offsets, no fuzz
753 # fast case first, no offsets, no fuzz
754 old, oldstart, new, newstart = h.fuzzit(0, False)
754 old, oldstart, new, newstart = h.fuzzit(0, False)
755 oldstart += self.offset
755 oldstart += self.offset
756 orig_start = oldstart
756 orig_start = oldstart
757 # if there's skew we want to emit the "(offset %d lines)" even
757 # if there's skew we want to emit the "(offset %d lines)" even
758 # when the hunk cleanly applies at start + skew, so skip the
758 # when the hunk cleanly applies at start + skew, so skip the
759 # fast case code
759 # fast case code
760 if (self.skew == 0 and
760 if (self.skew == 0 and
761 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
761 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
762 if self.remove:
762 if self.remove:
763 self.backend.unlink(self.fname)
763 self.backend.unlink(self.fname)
764 else:
764 else:
765 self.lines[oldstart:oldstart + len(old)] = new
765 self.lines[oldstart:oldstart + len(old)] = new
766 self.offset += len(new) - len(old)
766 self.offset += len(new) - len(old)
767 self.dirty = True
767 self.dirty = True
768 return 0
768 return 0
769
769
770 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
770 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
771 self.hash = {}
771 self.hash = {}
772 for x, s in enumerate(self.lines):
772 for x, s in enumerate(self.lines):
773 self.hash.setdefault(s, []).append(x)
773 self.hash.setdefault(s, []).append(x)
774
774
775 for fuzzlen in xrange(3):
775 for fuzzlen in xrange(3):
776 for toponly in [True, False]:
776 for toponly in [True, False]:
777 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
777 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
778 oldstart = oldstart + self.offset + self.skew
778 oldstart = oldstart + self.offset + self.skew
779 oldstart = min(oldstart, len(self.lines))
779 oldstart = min(oldstart, len(self.lines))
780 if old:
780 if old:
781 cand = self.findlines(old[0][1:], oldstart)
781 cand = self.findlines(old[0][1:], oldstart)
782 else:
782 else:
783 # Only adding lines with no or fuzzed context, just
783 # Only adding lines with no or fuzzed context, just
784 # take the skew in account
784 # take the skew in account
785 cand = [oldstart]
785 cand = [oldstart]
786
786
787 for l in cand:
787 for l in cand:
788 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
788 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
789 self.lines[l : l + len(old)] = new
789 self.lines[l : l + len(old)] = new
790 self.offset += len(new) - len(old)
790 self.offset += len(new) - len(old)
791 self.skew = l - orig_start
791 self.skew = l - orig_start
792 self.dirty = True
792 self.dirty = True
793 offset = l - orig_start - fuzzlen
793 offset = l - orig_start - fuzzlen
794 if fuzzlen:
794 if fuzzlen:
795 msg = _("Hunk #%d succeeded at %d "
795 msg = _("Hunk #%d succeeded at %d "
796 "with fuzz %d "
796 "with fuzz %d "
797 "(offset %d lines).\n")
797 "(offset %d lines).\n")
798 self.printfile(True)
798 self.printfile(True)
799 self.ui.warn(msg %
799 self.ui.warn(msg %
800 (h.number, l + 1, fuzzlen, offset))
800 (h.number, l + 1, fuzzlen, offset))
801 else:
801 else:
802 msg = _("Hunk #%d succeeded at %d "
802 msg = _("Hunk #%d succeeded at %d "
803 "(offset %d lines).\n")
803 "(offset %d lines).\n")
804 self.ui.note(msg % (h.number, l + 1, offset))
804 self.ui.note(msg % (h.number, l + 1, offset))
805 return fuzzlen
805 return fuzzlen
806 self.printfile(True)
806 self.printfile(True)
807 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
807 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
808 self.rej.append(horig)
808 self.rej.append(horig)
809 return -1
809 return -1
810
810
811 def close(self):
811 def close(self):
812 if self.dirty:
812 if self.dirty:
813 self.writelines(self.fname, self.lines, self.mode)
813 self.writelines(self.fname, self.lines, self.mode)
814 self.write_rej()
814 self.write_rej()
815 return len(self.rej)
815 return len(self.rej)
816
816
817 class header(object):
817 class header(object):
818 """patch header
818 """patch header
819 """
819 """
820 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
820 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
821 diff_re = re.compile('diff -r .* (.*)$')
821 diff_re = re.compile('diff -r .* (.*)$')
822 allhunks_re = re.compile('(?:index|deleted file) ')
822 allhunks_re = re.compile('(?:index|deleted file) ')
823 pretty_re = re.compile('(?:new file|deleted file) ')
823 pretty_re = re.compile('(?:new file|deleted file) ')
824 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
824 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
825
825
826 def __init__(self, header):
826 def __init__(self, header):
827 self.header = header
827 self.header = header
828 self.hunks = []
828 self.hunks = []
829
829
830 def binary(self):
830 def binary(self):
831 return util.any(h.startswith('index ') for h in self.header)
831 return util.any(h.startswith('index ') for h in self.header)
832
832
833 def pretty(self, fp):
833 def pretty(self, fp):
834 for h in self.header:
834 for h in self.header:
835 if h.startswith('index '):
835 if h.startswith('index '):
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
837 break
837 break
838 if self.pretty_re.match(h):
838 if self.pretty_re.match(h):
839 fp.write(h)
839 fp.write(h)
840 if self.binary():
840 if self.binary():
841 fp.write(_('this is a binary file\n'))
841 fp.write(_('this is a binary file\n'))
842 break
842 break
843 if h.startswith('---'):
843 if h.startswith('---'):
844 fp.write(_('%d hunks, %d lines changed\n') %
844 fp.write(_('%d hunks, %d lines changed\n') %
845 (len(self.hunks),
845 (len(self.hunks),
846 sum([max(h.added, h.removed) for h in self.hunks])))
846 sum([max(h.added, h.removed) for h in self.hunks])))
847 break
847 break
848 fp.write(h)
848 fp.write(h)
849
849
850 def write(self, fp):
850 def write(self, fp):
851 fp.write(''.join(self.header))
851 fp.write(''.join(self.header))
852
852
853 def allhunks(self):
853 def allhunks(self):
854 return util.any(self.allhunks_re.match(h) for h in self.header)
854 return util.any(self.allhunks_re.match(h) for h in self.header)
855
855
856 def files(self):
856 def files(self):
857 match = self.diffgit_re.match(self.header[0])
857 match = self.diffgit_re.match(self.header[0])
858 if match:
858 if match:
859 fromfile, tofile = match.groups()
859 fromfile, tofile = match.groups()
860 if fromfile == tofile:
860 if fromfile == tofile:
861 return [fromfile]
861 return [fromfile]
862 return [fromfile, tofile]
862 return [fromfile, tofile]
863 else:
863 else:
864 return self.diff_re.match(self.header[0]).groups()
864 return self.diff_re.match(self.header[0]).groups()
865
865
866 def filename(self):
866 def filename(self):
867 return self.files()[-1]
867 return self.files()[-1]
868
868
869 def __repr__(self):
869 def __repr__(self):
870 return '<header %s>' % (' '.join(map(repr, self.files())))
870 return '<header %s>' % (' '.join(map(repr, self.files())))
871
871
872 def special(self):
872 def special(self):
873 return util.any(self.special_re.match(h) for h in self.header)
873 return util.any(self.special_re.match(h) for h in self.header)
874
874
875 class recordhunk(object):
875 class recordhunk(object):
876 """patch hunk
876 """patch hunk
877
877
878 XXX shouldn't we merge this with the other hunk class?
878 XXX shouldn't we merge this with the other hunk class?
879 """
879 """
880 maxcontext = 3
880 maxcontext = 3
881
881
882 def __init__(self, header, fromline, toline, proc, before, hunk, after):
882 def __init__(self, header, fromline, toline, proc, before, hunk, after):
883 def trimcontext(number, lines):
883 def trimcontext(number, lines):
884 delta = len(lines) - self.maxcontext
884 delta = len(lines) - self.maxcontext
885 if False and delta > 0:
885 if False and delta > 0:
886 return number + delta, lines[:self.maxcontext]
886 return number + delta, lines[:self.maxcontext]
887 return number, lines
887 return number, lines
888
888
889 self.header = header
889 self.header = header
890 self.fromline, self.before = trimcontext(fromline, before)
890 self.fromline, self.before = trimcontext(fromline, before)
891 self.toline, self.after = trimcontext(toline, after)
891 self.toline, self.after = trimcontext(toline, after)
892 self.proc = proc
892 self.proc = proc
893 self.hunk = hunk
893 self.hunk = hunk
894 self.added, self.removed = self.countchanges(self.hunk)
894 self.added, self.removed = self.countchanges(self.hunk)
895
895
896 def __eq__(self, v):
896 def __eq__(self, v):
897 if not isinstance(v, recordhunk):
897 if not isinstance(v, recordhunk):
898 return False
898 return False
899
899
900 return ((v.hunk == self.hunk) and
900 return ((v.hunk == self.hunk) and
901 (v.proc == self.proc) and
901 (v.proc == self.proc) and
902 (self.fromline == v.fromline) and
902 (self.fromline == v.fromline) and
903 (self.header.files() == v.header.files()))
903 (self.header.files() == v.header.files()))
904
904
905 def __hash__(self):
905 def __hash__(self):
906 return hash((tuple(self.hunk),
906 return hash((tuple(self.hunk),
907 tuple(self.header.files()),
907 tuple(self.header.files()),
908 self.fromline,
908 self.fromline,
909 self.proc))
909 self.proc))
910
910
911 def countchanges(self, hunk):
911 def countchanges(self, hunk):
912 """hunk -> (n+,n-)"""
912 """hunk -> (n+,n-)"""
913 add = len([h for h in hunk if h[0] == '+'])
913 add = len([h for h in hunk if h[0] == '+'])
914 rem = len([h for h in hunk if h[0] == '-'])
914 rem = len([h for h in hunk if h[0] == '-'])
915 return add, rem
915 return add, rem
916
916
917 def write(self, fp):
917 def write(self, fp):
918 delta = len(self.before) + len(self.after)
918 delta = len(self.before) + len(self.after)
919 if self.after and self.after[-1] == '\\ No newline at end of file\n':
919 if self.after and self.after[-1] == '\\ No newline at end of file\n':
920 delta -= 1
920 delta -= 1
921 fromlen = delta + self.removed
921 fromlen = delta + self.removed
922 tolen = delta + self.added
922 tolen = delta + self.added
923 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
923 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
924 (self.fromline, fromlen, self.toline, tolen,
924 (self.fromline, fromlen, self.toline, tolen,
925 self.proc and (' ' + self.proc)))
925 self.proc and (' ' + self.proc)))
926 fp.write(''.join(self.before + self.hunk + self.after))
926 fp.write(''.join(self.before + self.hunk + self.after))
927
927
928 pretty = write
928 pretty = write
929
929
930 def filename(self):
930 def filename(self):
931 return self.header.filename()
931 return self.header.filename()
932
932
933 def __repr__(self):
933 def __repr__(self):
934 return '<hunk %r@%d>' % (self.filename(), self.fromline)
934 return '<hunk %r@%d>' % (self.filename(), self.fromline)
935
935
936 def filterpatch(ui, headers):
936 def filterpatch(ui, headers):
937 """Interactively filter patch chunks into applied-only chunks"""
937 """Interactively filter patch chunks into applied-only chunks"""
938
938
939 def prompt(skipfile, skipall, query, chunk):
939 def prompt(skipfile, skipall, query, chunk):
940 """prompt query, and process base inputs
940 """prompt query, and process base inputs
941
941
942 - y/n for the rest of file
942 - y/n for the rest of file
943 - y/n for the rest
943 - y/n for the rest
944 - ? (help)
944 - ? (help)
945 - q (quit)
945 - q (quit)
946
946
947 Return True/False and possibly updated skipfile and skipall.
947 Return True/False and possibly updated skipfile and skipall.
948 """
948 """
949 newpatches = None
949 newpatches = None
950 if skipall is not None:
950 if skipall is not None:
951 return skipall, skipfile, skipall, newpatches
951 return skipall, skipfile, skipall, newpatches
952 if skipfile is not None:
952 if skipfile is not None:
953 return skipfile, skipfile, skipall, newpatches
953 return skipfile, skipfile, skipall, newpatches
954 while True:
954 while True:
955 resps = _('[Ynesfdaq?]'
955 resps = _('[Ynesfdaq?]'
956 '$$ &Yes, record this change'
956 '$$ &Yes, record this change'
957 '$$ &No, skip this change'
957 '$$ &No, skip this change'
958 '$$ &Edit this change manually'
958 '$$ &Edit this change manually'
959 '$$ &Skip remaining changes to this file'
959 '$$ &Skip remaining changes to this file'
960 '$$ Record remaining changes to this &file'
960 '$$ Record remaining changes to this &file'
961 '$$ &Done, skip remaining changes and files'
961 '$$ &Done, skip remaining changes and files'
962 '$$ Record &all changes to all remaining files'
962 '$$ Record &all changes to all remaining files'
963 '$$ &Quit, recording no changes'
963 '$$ &Quit, recording no changes'
964 '$$ &? (display help)')
964 '$$ &? (display help)')
965 r = ui.promptchoice("%s %s" % (query, resps))
965 r = ui.promptchoice("%s %s" % (query, resps))
966 ui.write("\n")
966 ui.write("\n")
967 if r == 8: # ?
967 if r == 8: # ?
968 for c, t in ui.extractchoices(resps)[1]:
968 for c, t in ui.extractchoices(resps)[1]:
969 ui.write('%s - %s\n' % (c, t.lower()))
969 ui.write('%s - %s\n' % (c, t.lower()))
970 continue
970 continue
971 elif r == 0: # yes
971 elif r == 0: # yes
972 ret = True
972 ret = True
973 elif r == 1: # no
973 elif r == 1: # no
974 ret = False
974 ret = False
975 elif r == 2: # Edit patch
975 elif r == 2: # Edit patch
976 if chunk is None:
976 if chunk is None:
977 ui.write(_('cannot edit patch for whole file'))
977 ui.write(_('cannot edit patch for whole file'))
978 ui.write("\n")
978 ui.write("\n")
979 continue
979 continue
980 if chunk.header.binary():
980 if chunk.header.binary():
981 ui.write(_('cannot edit patch for binary file'))
981 ui.write(_('cannot edit patch for binary file'))
982 ui.write("\n")
982 ui.write("\n")
983 continue
983 continue
984 # Patch comment based on the Git one (based on comment at end of
984 # Patch comment based on the Git one (based on comment at end of
985 # http://mercurial.selenic.com/wiki/RecordExtension)
985 # http://mercurial.selenic.com/wiki/RecordExtension)
986 phelp = '---' + _("""
986 phelp = '---' + _("""
987 To remove '-' lines, make them ' ' lines (context).
987 To remove '-' lines, make them ' ' lines (context).
988 To remove '+' lines, delete them.
988 To remove '+' lines, delete them.
989 Lines starting with # will be removed from the patch.
989 Lines starting with # will be removed from the patch.
990
990
991 If the patch applies cleanly, the edited hunk will immediately be
991 If the patch applies cleanly, the edited hunk will immediately be
992 added to the record list. If it does not apply cleanly, a rejects
992 added to the record list. If it does not apply cleanly, a rejects
993 file will be generated: you can use that when you try again. If
993 file will be generated: you can use that when you try again. If
994 all lines of the hunk are removed, then the edit is aborted and
994 all lines of the hunk are removed, then the edit is aborted and
995 the hunk is left unchanged.
995 the hunk is left unchanged.
996 """)
996 """)
997 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
997 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
998 suffix=".diff", text=True)
998 suffix=".diff", text=True)
999 ncpatchfp = None
999 ncpatchfp = None
1000 try:
1000 try:
1001 # Write the initial patch
1001 # Write the initial patch
1002 f = os.fdopen(patchfd, "w")
1002 f = os.fdopen(patchfd, "w")
1003 chunk.header.write(f)
1003 chunk.header.write(f)
1004 chunk.write(f)
1004 chunk.write(f)
1005 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1005 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1006 f.close()
1006 f.close()
1007 # Start the editor and wait for it to complete
1007 # Start the editor and wait for it to complete
1008 editor = ui.geteditor()
1008 editor = ui.geteditor()
1009 ui.system("%s \"%s\"" % (editor, patchfn),
1009 ui.system("%s \"%s\"" % (editor, patchfn),
1010 environ={'HGUSER': ui.username()},
1010 environ={'HGUSER': ui.username()},
1011 onerr=util.Abort, errprefix=_("edit failed"))
1011 onerr=util.Abort, errprefix=_("edit failed"))
1012 # Remove comment lines
1012 # Remove comment lines
1013 patchfp = open(patchfn)
1013 patchfp = open(patchfn)
1014 ncpatchfp = cStringIO.StringIO()
1014 ncpatchfp = cStringIO.StringIO()
1015 for line in patchfp:
1015 for line in patchfp:
1016 if not line.startswith('#'):
1016 if not line.startswith('#'):
1017 ncpatchfp.write(line)
1017 ncpatchfp.write(line)
1018 patchfp.close()
1018 patchfp.close()
1019 ncpatchfp.seek(0)
1019 ncpatchfp.seek(0)
1020 newpatches = parsepatch(ncpatchfp)
1020 newpatches = parsepatch(ncpatchfp)
1021 finally:
1021 finally:
1022 os.unlink(patchfn)
1022 os.unlink(patchfn)
1023 del ncpatchfp
1023 del ncpatchfp
1024 # Signal that the chunk shouldn't be applied as-is, but
1024 # Signal that the chunk shouldn't be applied as-is, but
1025 # provide the new patch to be used instead.
1025 # provide the new patch to be used instead.
1026 ret = False
1026 ret = False
1027 elif r == 3: # Skip
1027 elif r == 3: # Skip
1028 ret = skipfile = False
1028 ret = skipfile = False
1029 elif r == 4: # file (Record remaining)
1029 elif r == 4: # file (Record remaining)
1030 ret = skipfile = True
1030 ret = skipfile = True
1031 elif r == 5: # done, skip remaining
1031 elif r == 5: # done, skip remaining
1032 ret = skipall = False
1032 ret = skipall = False
1033 elif r == 6: # all
1033 elif r == 6: # all
1034 ret = skipall = True
1034 ret = skipall = True
1035 elif r == 7: # quit
1035 elif r == 7: # quit
1036 raise util.Abort(_('user quit'))
1036 raise util.Abort(_('user quit'))
1037 return ret, skipfile, skipall, newpatches
1037 return ret, skipfile, skipall, newpatches
1038
1038
1039 seen = set()
1039 seen = set()
1040 applied = {} # 'filename' -> [] of chunks
1040 applied = {} # 'filename' -> [] of chunks
1041 skipfile, skipall = None, None
1041 skipfile, skipall = None, None
1042 pos, total = 1, sum(len(h.hunks) for h in headers)
1042 pos, total = 1, sum(len(h.hunks) for h in headers)
1043 for h in headers:
1043 for h in headers:
1044 pos += len(h.hunks)
1044 pos += len(h.hunks)
1045 skipfile = None
1045 skipfile = None
1046 fixoffset = 0
1046 fixoffset = 0
1047 hdr = ''.join(h.header)
1047 hdr = ''.join(h.header)
1048 if hdr in seen:
1048 if hdr in seen:
1049 continue
1049 continue
1050 seen.add(hdr)
1050 seen.add(hdr)
1051 if skipall is None:
1051 if skipall is None:
1052 h.pretty(ui)
1052 h.pretty(ui)
1053 msg = (_('examine changes to %s?') %
1053 msg = (_('examine changes to %s?') %
1054 _(' and ').join("'%s'" % f for f in h.files()))
1054 _(' and ').join("'%s'" % f for f in h.files()))
1055 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1055 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1056 if not r:
1056 if not r:
1057 continue
1057 continue
1058 applied[h.filename()] = [h]
1058 applied[h.filename()] = [h]
1059 if h.allhunks():
1059 if h.allhunks():
1060 applied[h.filename()] += h.hunks
1060 applied[h.filename()] += h.hunks
1061 continue
1061 continue
1062 for i, chunk in enumerate(h.hunks):
1062 for i, chunk in enumerate(h.hunks):
1063 if skipfile is None and skipall is None:
1063 if skipfile is None and skipall is None:
1064 chunk.pretty(ui)
1064 chunk.pretty(ui)
1065 if total == 1:
1065 if total == 1:
1066 msg = _("record this change to '%s'?") % chunk.filename()
1066 msg = _("record this change to '%s'?") % chunk.filename()
1067 else:
1067 else:
1068 idx = pos - len(h.hunks) + i
1068 idx = pos - len(h.hunks) + i
1069 msg = _("record change %d/%d to '%s'?") % (idx, total,
1069 msg = _("record change %d/%d to '%s'?") % (idx, total,
1070 chunk.filename())
1070 chunk.filename())
1071 r, skipfile, skipall, newpatches = prompt(skipfile,
1071 r, skipfile, skipall, newpatches = prompt(skipfile,
1072 skipall, msg, chunk)
1072 skipall, msg, chunk)
1073 if r:
1073 if r:
1074 if fixoffset:
1074 if fixoffset:
1075 chunk = copy.copy(chunk)
1075 chunk = copy.copy(chunk)
1076 chunk.toline += fixoffset
1076 chunk.toline += fixoffset
1077 applied[chunk.filename()].append(chunk)
1077 applied[chunk.filename()].append(chunk)
1078 elif newpatches is not None:
1078 elif newpatches is not None:
1079 for newpatch in newpatches:
1079 for newpatch in newpatches:
1080 for newhunk in newpatch.hunks:
1080 for newhunk in newpatch.hunks:
1081 if fixoffset:
1081 if fixoffset:
1082 newhunk.toline += fixoffset
1082 newhunk.toline += fixoffset
1083 applied[newhunk.filename()].append(newhunk)
1083 applied[newhunk.filename()].append(newhunk)
1084 else:
1084 else:
1085 fixoffset += chunk.removed - chunk.added
1085 fixoffset += chunk.removed - chunk.added
1086 return sum([h for h in applied.itervalues()
1086 return sum([h for h in applied.itervalues()
1087 if h[0].special() or len(h) > 1], [])
1087 if h[0].special() or len(h) > 1], [])
1088 class hunk(object):
1088 class hunk(object):
1089 def __init__(self, desc, num, lr, context):
1089 def __init__(self, desc, num, lr, context):
1090 self.number = num
1090 self.number = num
1091 self.desc = desc
1091 self.desc = desc
1092 self.hunk = [desc]
1092 self.hunk = [desc]
1093 self.a = []
1093 self.a = []
1094 self.b = []
1094 self.b = []
1095 self.starta = self.lena = None
1095 self.starta = self.lena = None
1096 self.startb = self.lenb = None
1096 self.startb = self.lenb = None
1097 if lr is not None:
1097 if lr is not None:
1098 if context:
1098 if context:
1099 self.read_context_hunk(lr)
1099 self.read_context_hunk(lr)
1100 else:
1100 else:
1101 self.read_unified_hunk(lr)
1101 self.read_unified_hunk(lr)
1102
1102
1103 def getnormalized(self):
1103 def getnormalized(self):
1104 """Return a copy with line endings normalized to LF."""
1104 """Return a copy with line endings normalized to LF."""
1105
1105
1106 def normalize(lines):
1106 def normalize(lines):
1107 nlines = []
1107 nlines = []
1108 for line in lines:
1108 for line in lines:
1109 if line.endswith('\r\n'):
1109 if line.endswith('\r\n'):
1110 line = line[:-2] + '\n'
1110 line = line[:-2] + '\n'
1111 nlines.append(line)
1111 nlines.append(line)
1112 return nlines
1112 return nlines
1113
1113
1114 # Dummy object, it is rebuilt manually
1114 # Dummy object, it is rebuilt manually
1115 nh = hunk(self.desc, self.number, None, None)
1115 nh = hunk(self.desc, self.number, None, None)
1116 nh.number = self.number
1116 nh.number = self.number
1117 nh.desc = self.desc
1117 nh.desc = self.desc
1118 nh.hunk = self.hunk
1118 nh.hunk = self.hunk
1119 nh.a = normalize(self.a)
1119 nh.a = normalize(self.a)
1120 nh.b = normalize(self.b)
1120 nh.b = normalize(self.b)
1121 nh.starta = self.starta
1121 nh.starta = self.starta
1122 nh.startb = self.startb
1122 nh.startb = self.startb
1123 nh.lena = self.lena
1123 nh.lena = self.lena
1124 nh.lenb = self.lenb
1124 nh.lenb = self.lenb
1125 return nh
1125 return nh
1126
1126
1127 def read_unified_hunk(self, lr):
1127 def read_unified_hunk(self, lr):
1128 m = unidesc.match(self.desc)
1128 m = unidesc.match(self.desc)
1129 if not m:
1129 if not m:
1130 raise PatchError(_("bad hunk #%d") % self.number)
1130 raise PatchError(_("bad hunk #%d") % self.number)
1131 self.starta, self.lena, self.startb, self.lenb = m.groups()
1131 self.starta, self.lena, self.startb, self.lenb = m.groups()
1132 if self.lena is None:
1132 if self.lena is None:
1133 self.lena = 1
1133 self.lena = 1
1134 else:
1134 else:
1135 self.lena = int(self.lena)
1135 self.lena = int(self.lena)
1136 if self.lenb is None:
1136 if self.lenb is None:
1137 self.lenb = 1
1137 self.lenb = 1
1138 else:
1138 else:
1139 self.lenb = int(self.lenb)
1139 self.lenb = int(self.lenb)
1140 self.starta = int(self.starta)
1140 self.starta = int(self.starta)
1141 self.startb = int(self.startb)
1141 self.startb = int(self.startb)
1142 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1142 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1143 self.b)
1143 self.b)
1144 # if we hit eof before finishing out the hunk, the last line will
1144 # if we hit eof before finishing out the hunk, the last line will
1145 # be zero length. Lets try to fix it up.
1145 # be zero length. Lets try to fix it up.
1146 while len(self.hunk[-1]) == 0:
1146 while len(self.hunk[-1]) == 0:
1147 del self.hunk[-1]
1147 del self.hunk[-1]
1148 del self.a[-1]
1148 del self.a[-1]
1149 del self.b[-1]
1149 del self.b[-1]
1150 self.lena -= 1
1150 self.lena -= 1
1151 self.lenb -= 1
1151 self.lenb -= 1
1152 self._fixnewline(lr)
1152 self._fixnewline(lr)
1153
1153
1154 def read_context_hunk(self, lr):
1154 def read_context_hunk(self, lr):
1155 self.desc = lr.readline()
1155 self.desc = lr.readline()
1156 m = contextdesc.match(self.desc)
1156 m = contextdesc.match(self.desc)
1157 if not m:
1157 if not m:
1158 raise PatchError(_("bad hunk #%d") % self.number)
1158 raise PatchError(_("bad hunk #%d") % self.number)
1159 self.starta, aend = m.groups()
1159 self.starta, aend = m.groups()
1160 self.starta = int(self.starta)
1160 self.starta = int(self.starta)
1161 if aend is None:
1161 if aend is None:
1162 aend = self.starta
1162 aend = self.starta
1163 self.lena = int(aend) - self.starta
1163 self.lena = int(aend) - self.starta
1164 if self.starta:
1164 if self.starta:
1165 self.lena += 1
1165 self.lena += 1
1166 for x in xrange(self.lena):
1166 for x in xrange(self.lena):
1167 l = lr.readline()
1167 l = lr.readline()
1168 if l.startswith('---'):
1168 if l.startswith('---'):
1169 # lines addition, old block is empty
1169 # lines addition, old block is empty
1170 lr.push(l)
1170 lr.push(l)
1171 break
1171 break
1172 s = l[2:]
1172 s = l[2:]
1173 if l.startswith('- ') or l.startswith('! '):
1173 if l.startswith('- ') or l.startswith('! '):
1174 u = '-' + s
1174 u = '-' + s
1175 elif l.startswith(' '):
1175 elif l.startswith(' '):
1176 u = ' ' + s
1176 u = ' ' + s
1177 else:
1177 else:
1178 raise PatchError(_("bad hunk #%d old text line %d") %
1178 raise PatchError(_("bad hunk #%d old text line %d") %
1179 (self.number, x))
1179 (self.number, x))
1180 self.a.append(u)
1180 self.a.append(u)
1181 self.hunk.append(u)
1181 self.hunk.append(u)
1182
1182
1183 l = lr.readline()
1183 l = lr.readline()
1184 if l.startswith('\ '):
1184 if l.startswith('\ '):
1185 s = self.a[-1][:-1]
1185 s = self.a[-1][:-1]
1186 self.a[-1] = s
1186 self.a[-1] = s
1187 self.hunk[-1] = s
1187 self.hunk[-1] = s
1188 l = lr.readline()
1188 l = lr.readline()
1189 m = contextdesc.match(l)
1189 m = contextdesc.match(l)
1190 if not m:
1190 if not m:
1191 raise PatchError(_("bad hunk #%d") % self.number)
1191 raise PatchError(_("bad hunk #%d") % self.number)
1192 self.startb, bend = m.groups()
1192 self.startb, bend = m.groups()
1193 self.startb = int(self.startb)
1193 self.startb = int(self.startb)
1194 if bend is None:
1194 if bend is None:
1195 bend = self.startb
1195 bend = self.startb
1196 self.lenb = int(bend) - self.startb
1196 self.lenb = int(bend) - self.startb
1197 if self.startb:
1197 if self.startb:
1198 self.lenb += 1
1198 self.lenb += 1
1199 hunki = 1
1199 hunki = 1
1200 for x in xrange(self.lenb):
1200 for x in xrange(self.lenb):
1201 l = lr.readline()
1201 l = lr.readline()
1202 if l.startswith('\ '):
1202 if l.startswith('\ '):
1203 # XXX: the only way to hit this is with an invalid line range.
1203 # XXX: the only way to hit this is with an invalid line range.
1204 # The no-eol marker is not counted in the line range, but I
1204 # The no-eol marker is not counted in the line range, but I
1205 # guess there are diff(1) out there which behave differently.
1205 # guess there are diff(1) out there which behave differently.
1206 s = self.b[-1][:-1]
1206 s = self.b[-1][:-1]
1207 self.b[-1] = s
1207 self.b[-1] = s
1208 self.hunk[hunki - 1] = s
1208 self.hunk[hunki - 1] = s
1209 continue
1209 continue
1210 if not l:
1210 if not l:
1211 # line deletions, new block is empty and we hit EOF
1211 # line deletions, new block is empty and we hit EOF
1212 lr.push(l)
1212 lr.push(l)
1213 break
1213 break
1214 s = l[2:]
1214 s = l[2:]
1215 if l.startswith('+ ') or l.startswith('! '):
1215 if l.startswith('+ ') or l.startswith('! '):
1216 u = '+' + s
1216 u = '+' + s
1217 elif l.startswith(' '):
1217 elif l.startswith(' '):
1218 u = ' ' + s
1218 u = ' ' + s
1219 elif len(self.b) == 0:
1219 elif len(self.b) == 0:
1220 # line deletions, new block is empty
1220 # line deletions, new block is empty
1221 lr.push(l)
1221 lr.push(l)
1222 break
1222 break
1223 else:
1223 else:
1224 raise PatchError(_("bad hunk #%d old text line %d") %
1224 raise PatchError(_("bad hunk #%d old text line %d") %
1225 (self.number, x))
1225 (self.number, x))
1226 self.b.append(s)
1226 self.b.append(s)
1227 while True:
1227 while True:
1228 if hunki >= len(self.hunk):
1228 if hunki >= len(self.hunk):
1229 h = ""
1229 h = ""
1230 else:
1230 else:
1231 h = self.hunk[hunki]
1231 h = self.hunk[hunki]
1232 hunki += 1
1232 hunki += 1
1233 if h == u:
1233 if h == u:
1234 break
1234 break
1235 elif h.startswith('-'):
1235 elif h.startswith('-'):
1236 continue
1236 continue
1237 else:
1237 else:
1238 self.hunk.insert(hunki - 1, u)
1238 self.hunk.insert(hunki - 1, u)
1239 break
1239 break
1240
1240
1241 if not self.a:
1241 if not self.a:
1242 # this happens when lines were only added to the hunk
1242 # this happens when lines were only added to the hunk
1243 for x in self.hunk:
1243 for x in self.hunk:
1244 if x.startswith('-') or x.startswith(' '):
1244 if x.startswith('-') or x.startswith(' '):
1245 self.a.append(x)
1245 self.a.append(x)
1246 if not self.b:
1246 if not self.b:
1247 # this happens when lines were only deleted from the hunk
1247 # this happens when lines were only deleted from the hunk
1248 for x in self.hunk:
1248 for x in self.hunk:
1249 if x.startswith('+') or x.startswith(' '):
1249 if x.startswith('+') or x.startswith(' '):
1250 self.b.append(x[1:])
1250 self.b.append(x[1:])
1251 # @@ -start,len +start,len @@
1251 # @@ -start,len +start,len @@
1252 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1252 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1253 self.startb, self.lenb)
1253 self.startb, self.lenb)
1254 self.hunk[0] = self.desc
1254 self.hunk[0] = self.desc
1255 self._fixnewline(lr)
1255 self._fixnewline(lr)
1256
1256
1257 def _fixnewline(self, lr):
1257 def _fixnewline(self, lr):
1258 l = lr.readline()
1258 l = lr.readline()
1259 if l.startswith('\ '):
1259 if l.startswith('\ '):
1260 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1260 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1261 else:
1261 else:
1262 lr.push(l)
1262 lr.push(l)
1263
1263
1264 def complete(self):
1264 def complete(self):
1265 return len(self.a) == self.lena and len(self.b) == self.lenb
1265 return len(self.a) == self.lena and len(self.b) == self.lenb
1266
1266
1267 def _fuzzit(self, old, new, fuzz, toponly):
1267 def _fuzzit(self, old, new, fuzz, toponly):
1268 # this removes context lines from the top and bottom of list 'l'. It
1268 # this removes context lines from the top and bottom of list 'l'. It
1269 # checks the hunk to make sure only context lines are removed, and then
1269 # checks the hunk to make sure only context lines are removed, and then
1270 # returns a new shortened list of lines.
1270 # returns a new shortened list of lines.
1271 fuzz = min(fuzz, len(old))
1271 fuzz = min(fuzz, len(old))
1272 if fuzz:
1272 if fuzz:
1273 top = 0
1273 top = 0
1274 bot = 0
1274 bot = 0
1275 hlen = len(self.hunk)
1275 hlen = len(self.hunk)
1276 for x in xrange(hlen - 1):
1276 for x in xrange(hlen - 1):
1277 # the hunk starts with the @@ line, so use x+1
1277 # the hunk starts with the @@ line, so use x+1
1278 if self.hunk[x + 1][0] == ' ':
1278 if self.hunk[x + 1][0] == ' ':
1279 top += 1
1279 top += 1
1280 else:
1280 else:
1281 break
1281 break
1282 if not toponly:
1282 if not toponly:
1283 for x in xrange(hlen - 1):
1283 for x in xrange(hlen - 1):
1284 if self.hunk[hlen - bot - 1][0] == ' ':
1284 if self.hunk[hlen - bot - 1][0] == ' ':
1285 bot += 1
1285 bot += 1
1286 else:
1286 else:
1287 break
1287 break
1288
1288
1289 bot = min(fuzz, bot)
1289 bot = min(fuzz, bot)
1290 top = min(fuzz, top)
1290 top = min(fuzz, top)
1291 return old[top:len(old) - bot], new[top:len(new) - bot], top
1291 return old[top:len(old) - bot], new[top:len(new) - bot], top
1292 return old, new, 0
1292 return old, new, 0
1293
1293
1294 def fuzzit(self, fuzz, toponly):
1294 def fuzzit(self, fuzz, toponly):
1295 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1295 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1296 oldstart = self.starta + top
1296 oldstart = self.starta + top
1297 newstart = self.startb + top
1297 newstart = self.startb + top
1298 # zero length hunk ranges already have their start decremented
1298 # zero length hunk ranges already have their start decremented
1299 if self.lena and oldstart > 0:
1299 if self.lena and oldstart > 0:
1300 oldstart -= 1
1300 oldstart -= 1
1301 if self.lenb and newstart > 0:
1301 if self.lenb and newstart > 0:
1302 newstart -= 1
1302 newstart -= 1
1303 return old, oldstart, new, newstart
1303 return old, oldstart, new, newstart
1304
1304
1305 class binhunk(object):
1305 class binhunk(object):
1306 'A binary patch file.'
1306 'A binary patch file.'
1307 def __init__(self, lr, fname):
1307 def __init__(self, lr, fname):
1308 self.text = None
1308 self.text = None
1309 self.delta = False
1309 self.delta = False
1310 self.hunk = ['GIT binary patch\n']
1310 self.hunk = ['GIT binary patch\n']
1311 self._fname = fname
1311 self._fname = fname
1312 self._read(lr)
1312 self._read(lr)
1313
1313
1314 def complete(self):
1314 def complete(self):
1315 return self.text is not None
1315 return self.text is not None
1316
1316
1317 def new(self, lines):
1317 def new(self, lines):
1318 if self.delta:
1318 if self.delta:
1319 return [applybindelta(self.text, ''.join(lines))]
1319 return [applybindelta(self.text, ''.join(lines))]
1320 return [self.text]
1320 return [self.text]
1321
1321
1322 def _read(self, lr):
1322 def _read(self, lr):
1323 def getline(lr, hunk):
1323 def getline(lr, hunk):
1324 l = lr.readline()
1324 l = lr.readline()
1325 hunk.append(l)
1325 hunk.append(l)
1326 return l.rstrip('\r\n')
1326 return l.rstrip('\r\n')
1327
1327
1328 size = 0
1328 size = 0
1329 while True:
1329 while True:
1330 line = getline(lr, self.hunk)
1330 line = getline(lr, self.hunk)
1331 if not line:
1331 if not line:
1332 raise PatchError(_('could not extract "%s" binary data')
1332 raise PatchError(_('could not extract "%s" binary data')
1333 % self._fname)
1333 % self._fname)
1334 if line.startswith('literal '):
1334 if line.startswith('literal '):
1335 size = int(line[8:].rstrip())
1335 size = int(line[8:].rstrip())
1336 break
1336 break
1337 if line.startswith('delta '):
1337 if line.startswith('delta '):
1338 size = int(line[6:].rstrip())
1338 size = int(line[6:].rstrip())
1339 self.delta = True
1339 self.delta = True
1340 break
1340 break
1341 dec = []
1341 dec = []
1342 line = getline(lr, self.hunk)
1342 line = getline(lr, self.hunk)
1343 while len(line) > 1:
1343 while len(line) > 1:
1344 l = line[0]
1344 l = line[0]
1345 if l <= 'Z' and l >= 'A':
1345 if l <= 'Z' and l >= 'A':
1346 l = ord(l) - ord('A') + 1
1346 l = ord(l) - ord('A') + 1
1347 else:
1347 else:
1348 l = ord(l) - ord('a') + 27
1348 l = ord(l) - ord('a') + 27
1349 try:
1349 try:
1350 dec.append(base85.b85decode(line[1:])[:l])
1350 dec.append(base85.b85decode(line[1:])[:l])
1351 except ValueError, e:
1351 except ValueError, e:
1352 raise PatchError(_('could not decode "%s" binary patch: %s')
1352 raise PatchError(_('could not decode "%s" binary patch: %s')
1353 % (self._fname, str(e)))
1353 % (self._fname, str(e)))
1354 line = getline(lr, self.hunk)
1354 line = getline(lr, self.hunk)
1355 text = zlib.decompress(''.join(dec))
1355 text = zlib.decompress(''.join(dec))
1356 if len(text) != size:
1356 if len(text) != size:
1357 raise PatchError(_('"%s" length is %d bytes, should be %d')
1357 raise PatchError(_('"%s" length is %d bytes, should be %d')
1358 % (self._fname, len(text), size))
1358 % (self._fname, len(text), size))
1359 self.text = text
1359 self.text = text
1360
1360
1361 def parsefilename(str):
1361 def parsefilename(str):
1362 # --- filename \t|space stuff
1362 # --- filename \t|space stuff
1363 s = str[4:].rstrip('\r\n')
1363 s = str[4:].rstrip('\r\n')
1364 i = s.find('\t')
1364 i = s.find('\t')
1365 if i < 0:
1365 if i < 0:
1366 i = s.find(' ')
1366 i = s.find(' ')
1367 if i < 0:
1367 if i < 0:
1368 return s
1368 return s
1369 return s[:i]
1369 return s[:i]
1370
1370
1371 def parsepatch(originalchunks):
1371 def parsepatch(originalchunks):
1372 """patch -> [] of headers -> [] of hunks """
1372 """patch -> [] of headers -> [] of hunks """
1373 class parser(object):
1373 class parser(object):
1374 """patch parsing state machine"""
1374 """patch parsing state machine"""
1375 def __init__(self):
1375 def __init__(self):
1376 self.fromline = 0
1376 self.fromline = 0
1377 self.toline = 0
1377 self.toline = 0
1378 self.proc = ''
1378 self.proc = ''
1379 self.header = None
1379 self.header = None
1380 self.context = []
1380 self.context = []
1381 self.before = []
1381 self.before = []
1382 self.hunk = []
1382 self.hunk = []
1383 self.headers = []
1383 self.headers = []
1384
1384
1385 def addrange(self, limits):
1385 def addrange(self, limits):
1386 fromstart, fromend, tostart, toend, proc = limits
1386 fromstart, fromend, tostart, toend, proc = limits
1387 self.fromline = int(fromstart)
1387 self.fromline = int(fromstart)
1388 self.toline = int(tostart)
1388 self.toline = int(tostart)
1389 self.proc = proc
1389 self.proc = proc
1390
1390
1391 def addcontext(self, context):
1391 def addcontext(self, context):
1392 if self.hunk:
1392 if self.hunk:
1393 h = recordhunk(self.header, self.fromline, self.toline,
1393 h = recordhunk(self.header, self.fromline, self.toline,
1394 self.proc, self.before, self.hunk, context)
1394 self.proc, self.before, self.hunk, context)
1395 self.header.hunks.append(h)
1395 self.header.hunks.append(h)
1396 self.fromline += len(self.before) + h.removed
1396 self.fromline += len(self.before) + h.removed
1397 self.toline += len(self.before) + h.added
1397 self.toline += len(self.before) + h.added
1398 self.before = []
1398 self.before = []
1399 self.hunk = []
1399 self.hunk = []
1400 self.proc = ''
1400 self.proc = ''
1401 self.context = context
1401 self.context = context
1402
1402
1403 def addhunk(self, hunk):
1403 def addhunk(self, hunk):
1404 if self.context:
1404 if self.context:
1405 self.before = self.context
1405 self.before = self.context
1406 self.context = []
1406 self.context = []
1407 self.hunk = hunk
1407 self.hunk = hunk
1408
1408
1409 def newfile(self, hdr):
1409 def newfile(self, hdr):
1410 self.addcontext([])
1410 self.addcontext([])
1411 h = header(hdr)
1411 h = header(hdr)
1412 self.headers.append(h)
1412 self.headers.append(h)
1413 self.header = h
1413 self.header = h
1414
1414
1415 def addother(self, line):
1415 def addother(self, line):
1416 pass # 'other' lines are ignored
1416 pass # 'other' lines are ignored
1417
1417
1418 def finished(self):
1418 def finished(self):
1419 self.addcontext([])
1419 self.addcontext([])
1420 return self.headers
1420 return self.headers
1421
1421
1422 transitions = {
1422 transitions = {
1423 'file': {'context': addcontext,
1423 'file': {'context': addcontext,
1424 'file': newfile,
1424 'file': newfile,
1425 'hunk': addhunk,
1425 'hunk': addhunk,
1426 'range': addrange},
1426 'range': addrange},
1427 'context': {'file': newfile,
1427 'context': {'file': newfile,
1428 'hunk': addhunk,
1428 'hunk': addhunk,
1429 'range': addrange,
1429 'range': addrange,
1430 'other': addother},
1430 'other': addother},
1431 'hunk': {'context': addcontext,
1431 'hunk': {'context': addcontext,
1432 'file': newfile,
1432 'file': newfile,
1433 'range': addrange},
1433 'range': addrange},
1434 'range': {'context': addcontext,
1434 'range': {'context': addcontext,
1435 'hunk': addhunk},
1435 'hunk': addhunk},
1436 'other': {'other': addother},
1436 'other': {'other': addother},
1437 }
1437 }
1438
1438
1439 p = parser()
1439 p = parser()
1440 fp = cStringIO.StringIO()
1440 fp = cStringIO.StringIO()
1441 fp.write(''.join(originalchunks))
1441 fp.write(''.join(originalchunks))
1442 fp.seek(0)
1442 fp.seek(0)
1443
1443
1444 state = 'context'
1444 state = 'context'
1445 for newstate, data in scanpatch(fp):
1445 for newstate, data in scanpatch(fp):
1446 try:
1446 try:
1447 p.transitions[state][newstate](p, data)
1447 p.transitions[state][newstate](p, data)
1448 except KeyError:
1448 except KeyError:
1449 raise PatchError('unhandled transition: %s -> %s' %
1449 raise PatchError('unhandled transition: %s -> %s' %
1450 (state, newstate))
1450 (state, newstate))
1451 state = newstate
1451 state = newstate
1452 del fp
1452 del fp
1453 return p.finished()
1453 return p.finished()
1454
1454
1455 def pathtransform(path, strip, prefix):
1455 def pathtransform(path, strip, prefix):
1456 '''turn a path from a patch into a path suitable for the repository
1456 '''turn a path from a patch into a path suitable for the repository
1457
1457
1458 prefix, if not empty, is expected to be normalized with a / at the end.
1458 prefix, if not empty, is expected to be normalized with a / at the end.
1459
1459
1460 Returns (stripped components, path in repository).
1460 Returns (stripped components, path in repository).
1461
1461
1462 >>> pathtransform('a/b/c', 0, '')
1462 >>> pathtransform('a/b/c', 0, '')
1463 ('', 'a/b/c')
1463 ('', 'a/b/c')
1464 >>> pathtransform(' a/b/c ', 0, '')
1464 >>> pathtransform(' a/b/c ', 0, '')
1465 ('', ' a/b/c')
1465 ('', ' a/b/c')
1466 >>> pathtransform(' a/b/c ', 2, '')
1466 >>> pathtransform(' a/b/c ', 2, '')
1467 ('a/b/', 'c')
1467 ('a/b/', 'c')
1468 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1468 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1469 ('a//b/', 'd/e/c')
1469 ('a//b/', 'd/e/c')
1470 >>> pathtransform('a/b/c', 3, '')
1470 >>> pathtransform('a/b/c', 3, '')
1471 Traceback (most recent call last):
1471 Traceback (most recent call last):
1472 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1472 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1473 '''
1473 '''
1474 pathlen = len(path)
1474 pathlen = len(path)
1475 i = 0
1475 i = 0
1476 if strip == 0:
1476 if strip == 0:
1477 return '', path.rstrip()
1477 return '', path.rstrip()
1478 count = strip
1478 count = strip
1479 while count > 0:
1479 while count > 0:
1480 i = path.find('/', i)
1480 i = path.find('/', i)
1481 if i == -1:
1481 if i == -1:
1482 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1482 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1483 (count, strip, path))
1483 (count, strip, path))
1484 i += 1
1484 i += 1
1485 # consume '//' in the path
1485 # consume '//' in the path
1486 while i < pathlen - 1 and path[i] == '/':
1486 while i < pathlen - 1 and path[i] == '/':
1487 i += 1
1487 i += 1
1488 count -= 1
1488 count -= 1
1489 return path[:i].lstrip(), prefix + path[i:].rstrip()
1489 return path[:i].lstrip(), prefix + path[i:].rstrip()
1490
1490
1491 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1491 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1492 nulla = afile_orig == "/dev/null"
1492 nulla = afile_orig == "/dev/null"
1493 nullb = bfile_orig == "/dev/null"
1493 nullb = bfile_orig == "/dev/null"
1494 create = nulla and hunk.starta == 0 and hunk.lena == 0
1494 create = nulla and hunk.starta == 0 and hunk.lena == 0
1495 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1495 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1496 abase, afile = pathtransform(afile_orig, strip, prefix)
1496 abase, afile = pathtransform(afile_orig, strip, prefix)
1497 gooda = not nulla and backend.exists(afile)
1497 gooda = not nulla and backend.exists(afile)
1498 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1498 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1499 if afile == bfile:
1499 if afile == bfile:
1500 goodb = gooda
1500 goodb = gooda
1501 else:
1501 else:
1502 goodb = not nullb and backend.exists(bfile)
1502 goodb = not nullb and backend.exists(bfile)
1503 missing = not goodb and not gooda and not create
1503 missing = not goodb and not gooda and not create
1504
1504
1505 # some diff programs apparently produce patches where the afile is
1505 # some diff programs apparently produce patches where the afile is
1506 # not /dev/null, but afile starts with bfile
1506 # not /dev/null, but afile starts with bfile
1507 abasedir = afile[:afile.rfind('/') + 1]
1507 abasedir = afile[:afile.rfind('/') + 1]
1508 bbasedir = bfile[:bfile.rfind('/') + 1]
1508 bbasedir = bfile[:bfile.rfind('/') + 1]
1509 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1509 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1510 and hunk.starta == 0 and hunk.lena == 0):
1510 and hunk.starta == 0 and hunk.lena == 0):
1511 create = True
1511 create = True
1512 missing = False
1512 missing = False
1513
1513
1514 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1514 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1515 # diff is between a file and its backup. In this case, the original
1515 # diff is between a file and its backup. In this case, the original
1516 # file should be patched (see original mpatch code).
1516 # file should be patched (see original mpatch code).
1517 isbackup = (abase == bbase and bfile.startswith(afile))
1517 isbackup = (abase == bbase and bfile.startswith(afile))
1518 fname = None
1518 fname = None
1519 if not missing:
1519 if not missing:
1520 if gooda and goodb:
1520 if gooda and goodb:
1521 if isbackup:
1521 if isbackup:
1522 fname = afile
1522 fname = afile
1523 else:
1523 else:
1524 fname = bfile
1524 fname = bfile
1525 elif gooda:
1525 elif gooda:
1526 fname = afile
1526 fname = afile
1527
1527
1528 if not fname:
1528 if not fname:
1529 if not nullb:
1529 if not nullb:
1530 if isbackup:
1530 if isbackup:
1531 fname = afile
1531 fname = afile
1532 else:
1532 else:
1533 fname = bfile
1533 fname = bfile
1534 elif not nulla:
1534 elif not nulla:
1535 fname = afile
1535 fname = afile
1536 else:
1536 else:
1537 raise PatchError(_("undefined source and destination files"))
1537 raise PatchError(_("undefined source and destination files"))
1538
1538
1539 gp = patchmeta(fname)
1539 gp = patchmeta(fname)
1540 if create:
1540 if create:
1541 gp.op = 'ADD'
1541 gp.op = 'ADD'
1542 elif remove:
1542 elif remove:
1543 gp.op = 'DELETE'
1543 gp.op = 'DELETE'
1544 return gp
1544 return gp
1545
1545
1546 def scanpatch(fp):
1546 def scanpatch(fp):
1547 """like patch.iterhunks, but yield different events
1547 """like patch.iterhunks, but yield different events
1548
1548
1549 - ('file', [header_lines + fromfile + tofile])
1549 - ('file', [header_lines + fromfile + tofile])
1550 - ('context', [context_lines])
1550 - ('context', [context_lines])
1551 - ('hunk', [hunk_lines])
1551 - ('hunk', [hunk_lines])
1552 - ('range', (-start,len, +start,len, proc))
1552 - ('range', (-start,len, +start,len, proc))
1553 """
1553 """
1554 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1554 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1555 lr = linereader(fp)
1555 lr = linereader(fp)
1556
1556
1557 def scanwhile(first, p):
1557 def scanwhile(first, p):
1558 """scan lr while predicate holds"""
1558 """scan lr while predicate holds"""
1559 lines = [first]
1559 lines = [first]
1560 while True:
1560 while True:
1561 line = lr.readline()
1561 line = lr.readline()
1562 if not line:
1562 if not line:
1563 break
1563 break
1564 if p(line):
1564 if p(line):
1565 lines.append(line)
1565 lines.append(line)
1566 else:
1566 else:
1567 lr.push(line)
1567 lr.push(line)
1568 break
1568 break
1569 return lines
1569 return lines
1570
1570
1571 while True:
1571 while True:
1572 line = lr.readline()
1572 line = lr.readline()
1573 if not line:
1573 if not line:
1574 break
1574 break
1575 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1575 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1576 def notheader(line):
1576 def notheader(line):
1577 s = line.split(None, 1)
1577 s = line.split(None, 1)
1578 return not s or s[0] not in ('---', 'diff')
1578 return not s or s[0] not in ('---', 'diff')
1579 header = scanwhile(line, notheader)
1579 header = scanwhile(line, notheader)
1580 fromfile = lr.readline()
1580 fromfile = lr.readline()
1581 if fromfile.startswith('---'):
1581 if fromfile.startswith('---'):
1582 tofile = lr.readline()
1582 tofile = lr.readline()
1583 header += [fromfile, tofile]
1583 header += [fromfile, tofile]
1584 else:
1584 else:
1585 lr.push(fromfile)
1585 lr.push(fromfile)
1586 yield 'file', header
1586 yield 'file', header
1587 elif line[0] == ' ':
1587 elif line[0] == ' ':
1588 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1588 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1589 elif line[0] in '-+':
1589 elif line[0] in '-+':
1590 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1590 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1591 else:
1591 else:
1592 m = lines_re.match(line)
1592 m = lines_re.match(line)
1593 if m:
1593 if m:
1594 yield 'range', m.groups()
1594 yield 'range', m.groups()
1595 else:
1595 else:
1596 yield 'other', line
1596 yield 'other', line
1597
1597
1598 def scangitpatch(lr, firstline):
1598 def scangitpatch(lr, firstline):
1599 """
1599 """
1600 Git patches can emit:
1600 Git patches can emit:
1601 - rename a to b
1601 - rename a to b
1602 - change b
1602 - change b
1603 - copy a to c
1603 - copy a to c
1604 - change c
1604 - change c
1605
1605
1606 We cannot apply this sequence as-is, the renamed 'a' could not be
1606 We cannot apply this sequence as-is, the renamed 'a' could not be
1607 found for it would have been renamed already. And we cannot copy
1607 found for it would have been renamed already. And we cannot copy
1608 from 'b' instead because 'b' would have been changed already. So
1608 from 'b' instead because 'b' would have been changed already. So
1609 we scan the git patch for copy and rename commands so we can
1609 we scan the git patch for copy and rename commands so we can
1610 perform the copies ahead of time.
1610 perform the copies ahead of time.
1611 """
1611 """
1612 pos = 0
1612 pos = 0
1613 try:
1613 try:
1614 pos = lr.fp.tell()
1614 pos = lr.fp.tell()
1615 fp = lr.fp
1615 fp = lr.fp
1616 except IOError:
1616 except IOError:
1617 fp = cStringIO.StringIO(lr.fp.read())
1617 fp = cStringIO.StringIO(lr.fp.read())
1618 gitlr = linereader(fp)
1618 gitlr = linereader(fp)
1619 gitlr.push(firstline)
1619 gitlr.push(firstline)
1620 gitpatches = readgitpatch(gitlr)
1620 gitpatches = readgitpatch(gitlr)
1621 fp.seek(pos)
1621 fp.seek(pos)
1622 return gitpatches
1622 return gitpatches
1623
1623
1624 def iterhunks(fp):
1624 def iterhunks(fp):
1625 """Read a patch and yield the following events:
1625 """Read a patch and yield the following events:
1626 - ("file", afile, bfile, firsthunk): select a new target file.
1626 - ("file", afile, bfile, firsthunk): select a new target file.
1627 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1627 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1628 "file" event.
1628 "file" event.
1629 - ("git", gitchanges): current diff is in git format, gitchanges
1629 - ("git", gitchanges): current diff is in git format, gitchanges
1630 maps filenames to gitpatch records. Unique event.
1630 maps filenames to gitpatch records. Unique event.
1631 """
1631 """
1632 afile = ""
1632 afile = ""
1633 bfile = ""
1633 bfile = ""
1634 state = None
1634 state = None
1635 hunknum = 0
1635 hunknum = 0
1636 emitfile = newfile = False
1636 emitfile = newfile = False
1637 gitpatches = None
1637 gitpatches = None
1638
1638
1639 # our states
1639 # our states
1640 BFILE = 1
1640 BFILE = 1
1641 context = None
1641 context = None
1642 lr = linereader(fp)
1642 lr = linereader(fp)
1643
1643
1644 while True:
1644 while True:
1645 x = lr.readline()
1645 x = lr.readline()
1646 if not x:
1646 if not x:
1647 break
1647 break
1648 if state == BFILE and (
1648 if state == BFILE and (
1649 (not context and x[0] == '@')
1649 (not context and x[0] == '@')
1650 or (context is not False and x.startswith('***************'))
1650 or (context is not False and x.startswith('***************'))
1651 or x.startswith('GIT binary patch')):
1651 or x.startswith('GIT binary patch')):
1652 gp = None
1652 gp = None
1653 if (gitpatches and
1653 if (gitpatches and
1654 gitpatches[-1].ispatching(afile, bfile)):
1654 gitpatches[-1].ispatching(afile, bfile)):
1655 gp = gitpatches.pop()
1655 gp = gitpatches.pop()
1656 if x.startswith('GIT binary patch'):
1656 if x.startswith('GIT binary patch'):
1657 h = binhunk(lr, gp.path)
1657 h = binhunk(lr, gp.path)
1658 else:
1658 else:
1659 if context is None and x.startswith('***************'):
1659 if context is None and x.startswith('***************'):
1660 context = True
1660 context = True
1661 h = hunk(x, hunknum + 1, lr, context)
1661 h = hunk(x, hunknum + 1, lr, context)
1662 hunknum += 1
1662 hunknum += 1
1663 if emitfile:
1663 if emitfile:
1664 emitfile = False
1664 emitfile = False
1665 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1665 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1666 yield 'hunk', h
1666 yield 'hunk', h
1667 elif x.startswith('diff --git a/'):
1667 elif x.startswith('diff --git a/'):
1668 m = gitre.match(x.rstrip(' \r\n'))
1668 m = gitre.match(x.rstrip(' \r\n'))
1669 if not m:
1669 if not m:
1670 continue
1670 continue
1671 if gitpatches is None:
1671 if gitpatches is None:
1672 # scan whole input for git metadata
1672 # scan whole input for git metadata
1673 gitpatches = scangitpatch(lr, x)
1673 gitpatches = scangitpatch(lr, x)
1674 yield 'git', [g.copy() for g in gitpatches
1674 yield 'git', [g.copy() for g in gitpatches
1675 if g.op in ('COPY', 'RENAME')]
1675 if g.op in ('COPY', 'RENAME')]
1676 gitpatches.reverse()
1676 gitpatches.reverse()
1677 afile = 'a/' + m.group(1)
1677 afile = 'a/' + m.group(1)
1678 bfile = 'b/' + m.group(2)
1678 bfile = 'b/' + m.group(2)
1679 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1679 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1680 gp = gitpatches.pop()
1680 gp = gitpatches.pop()
1681 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1681 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1682 if not gitpatches:
1682 if not gitpatches:
1683 raise PatchError(_('failed to synchronize metadata for "%s"')
1683 raise PatchError(_('failed to synchronize metadata for "%s"')
1684 % afile[2:])
1684 % afile[2:])
1685 gp = gitpatches[-1]
1685 gp = gitpatches[-1]
1686 newfile = True
1686 newfile = True
1687 elif x.startswith('---'):
1687 elif x.startswith('---'):
1688 # check for a unified diff
1688 # check for a unified diff
1689 l2 = lr.readline()
1689 l2 = lr.readline()
1690 if not l2.startswith('+++'):
1690 if not l2.startswith('+++'):
1691 lr.push(l2)
1691 lr.push(l2)
1692 continue
1692 continue
1693 newfile = True
1693 newfile = True
1694 context = False
1694 context = False
1695 afile = parsefilename(x)
1695 afile = parsefilename(x)
1696 bfile = parsefilename(l2)
1696 bfile = parsefilename(l2)
1697 elif x.startswith('***'):
1697 elif x.startswith('***'):
1698 # check for a context diff
1698 # check for a context diff
1699 l2 = lr.readline()
1699 l2 = lr.readline()
1700 if not l2.startswith('---'):
1700 if not l2.startswith('---'):
1701 lr.push(l2)
1701 lr.push(l2)
1702 continue
1702 continue
1703 l3 = lr.readline()
1703 l3 = lr.readline()
1704 lr.push(l3)
1704 lr.push(l3)
1705 if not l3.startswith("***************"):
1705 if not l3.startswith("***************"):
1706 lr.push(l2)
1706 lr.push(l2)
1707 continue
1707 continue
1708 newfile = True
1708 newfile = True
1709 context = True
1709 context = True
1710 afile = parsefilename(x)
1710 afile = parsefilename(x)
1711 bfile = parsefilename(l2)
1711 bfile = parsefilename(l2)
1712
1712
1713 if newfile:
1713 if newfile:
1714 newfile = False
1714 newfile = False
1715 emitfile = True
1715 emitfile = True
1716 state = BFILE
1716 state = BFILE
1717 hunknum = 0
1717 hunknum = 0
1718
1718
1719 while gitpatches:
1719 while gitpatches:
1720 gp = gitpatches.pop()
1720 gp = gitpatches.pop()
1721 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1721 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1722
1722
1723 def applybindelta(binchunk, data):
1723 def applybindelta(binchunk, data):
1724 """Apply a binary delta hunk
1724 """Apply a binary delta hunk
1725 The algorithm used is the algorithm from git's patch-delta.c
1725 The algorithm used is the algorithm from git's patch-delta.c
1726 """
1726 """
1727 def deltahead(binchunk):
1727 def deltahead(binchunk):
1728 i = 0
1728 i = 0
1729 for c in binchunk:
1729 for c in binchunk:
1730 i += 1
1730 i += 1
1731 if not (ord(c) & 0x80):
1731 if not (ord(c) & 0x80):
1732 return i
1732 return i
1733 return i
1733 return i
1734 out = ""
1734 out = ""
1735 s = deltahead(binchunk)
1735 s = deltahead(binchunk)
1736 binchunk = binchunk[s:]
1736 binchunk = binchunk[s:]
1737 s = deltahead(binchunk)
1737 s = deltahead(binchunk)
1738 binchunk = binchunk[s:]
1738 binchunk = binchunk[s:]
1739 i = 0
1739 i = 0
1740 while i < len(binchunk):
1740 while i < len(binchunk):
1741 cmd = ord(binchunk[i])
1741 cmd = ord(binchunk[i])
1742 i += 1
1742 i += 1
1743 if (cmd & 0x80):
1743 if (cmd & 0x80):
1744 offset = 0
1744 offset = 0
1745 size = 0
1745 size = 0
1746 if (cmd & 0x01):
1746 if (cmd & 0x01):
1747 offset = ord(binchunk[i])
1747 offset = ord(binchunk[i])
1748 i += 1
1748 i += 1
1749 if (cmd & 0x02):
1749 if (cmd & 0x02):
1750 offset |= ord(binchunk[i]) << 8
1750 offset |= ord(binchunk[i]) << 8
1751 i += 1
1751 i += 1
1752 if (cmd & 0x04):
1752 if (cmd & 0x04):
1753 offset |= ord(binchunk[i]) << 16
1753 offset |= ord(binchunk[i]) << 16
1754 i += 1
1754 i += 1
1755 if (cmd & 0x08):
1755 if (cmd & 0x08):
1756 offset |= ord(binchunk[i]) << 24
1756 offset |= ord(binchunk[i]) << 24
1757 i += 1
1757 i += 1
1758 if (cmd & 0x10):
1758 if (cmd & 0x10):
1759 size = ord(binchunk[i])
1759 size = ord(binchunk[i])
1760 i += 1
1760 i += 1
1761 if (cmd & 0x20):
1761 if (cmd & 0x20):
1762 size |= ord(binchunk[i]) << 8
1762 size |= ord(binchunk[i]) << 8
1763 i += 1
1763 i += 1
1764 if (cmd & 0x40):
1764 if (cmd & 0x40):
1765 size |= ord(binchunk[i]) << 16
1765 size |= ord(binchunk[i]) << 16
1766 i += 1
1766 i += 1
1767 if size == 0:
1767 if size == 0:
1768 size = 0x10000
1768 size = 0x10000
1769 offset_end = offset + size
1769 offset_end = offset + size
1770 out += data[offset:offset_end]
1770 out += data[offset:offset_end]
1771 elif cmd != 0:
1771 elif cmd != 0:
1772 offset_end = i + cmd
1772 offset_end = i + cmd
1773 out += binchunk[i:offset_end]
1773 out += binchunk[i:offset_end]
1774 i += cmd
1774 i += cmd
1775 else:
1775 else:
1776 raise PatchError(_('unexpected delta opcode 0'))
1776 raise PatchError(_('unexpected delta opcode 0'))
1777 return out
1777 return out
1778
1778
1779 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1779 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1780 """Reads a patch from fp and tries to apply it.
1780 """Reads a patch from fp and tries to apply it.
1781
1781
1782 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1782 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1783 there was any fuzz.
1783 there was any fuzz.
1784
1784
1785 If 'eolmode' is 'strict', the patch content and patched file are
1785 If 'eolmode' is 'strict', the patch content and patched file are
1786 read in binary mode. Otherwise, line endings are ignored when
1786 read in binary mode. Otherwise, line endings are ignored when
1787 patching then normalized according to 'eolmode'.
1787 patching then normalized according to 'eolmode'.
1788 """
1788 """
1789 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1789 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1790 prefix=prefix, eolmode=eolmode)
1790 prefix=prefix, eolmode=eolmode)
1791
1791
1792 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1792 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1793 eolmode='strict'):
1793 eolmode='strict'):
1794
1794
1795 if prefix:
1795 if prefix:
1796 # clean up double slashes, lack of trailing slashes, etc
1796 # clean up double slashes, lack of trailing slashes, etc
1797 prefix = util.normpath(prefix) + '/'
1797 prefix = util.normpath(prefix) + '/'
1798 def pstrip(p):
1798 def pstrip(p):
1799 return pathtransform(p, strip - 1, prefix)[1]
1799 return pathtransform(p, strip - 1, prefix)[1]
1800
1800
1801 rejects = 0
1801 rejects = 0
1802 err = 0
1802 err = 0
1803 current_file = None
1803 current_file = None
1804
1804
1805 for state, values in iterhunks(fp):
1805 for state, values in iterhunks(fp):
1806 if state == 'hunk':
1806 if state == 'hunk':
1807 if not current_file:
1807 if not current_file:
1808 continue
1808 continue
1809 ret = current_file.apply(values)
1809 ret = current_file.apply(values)
1810 if ret > 0:
1810 if ret > 0:
1811 err = 1
1811 err = 1
1812 elif state == 'file':
1812 elif state == 'file':
1813 if current_file:
1813 if current_file:
1814 rejects += current_file.close()
1814 rejects += current_file.close()
1815 current_file = None
1815 current_file = None
1816 afile, bfile, first_hunk, gp = values
1816 afile, bfile, first_hunk, gp = values
1817 if gp:
1817 if gp:
1818 gp.path = pstrip(gp.path)
1818 gp.path = pstrip(gp.path)
1819 if gp.oldpath:
1819 if gp.oldpath:
1820 gp.oldpath = pstrip(gp.oldpath)
1820 gp.oldpath = pstrip(gp.oldpath)
1821 else:
1821 else:
1822 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1822 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1823 prefix)
1823 prefix)
1824 if gp.op == 'RENAME':
1824 if gp.op == 'RENAME':
1825 backend.unlink(gp.oldpath)
1825 backend.unlink(gp.oldpath)
1826 if not first_hunk:
1826 if not first_hunk:
1827 if gp.op == 'DELETE':
1827 if gp.op == 'DELETE':
1828 backend.unlink(gp.path)
1828 backend.unlink(gp.path)
1829 continue
1829 continue
1830 data, mode = None, None
1830 data, mode = None, None
1831 if gp.op in ('RENAME', 'COPY'):
1831 if gp.op in ('RENAME', 'COPY'):
1832 data, mode = store.getfile(gp.oldpath)[:2]
1832 data, mode = store.getfile(gp.oldpath)[:2]
1833 # FIXME: failing getfile has never been handled here
1833 # FIXME: failing getfile has never been handled here
1834 assert data is not None
1834 assert data is not None
1835 if gp.mode:
1835 if gp.mode:
1836 mode = gp.mode
1836 mode = gp.mode
1837 if gp.op == 'ADD':
1837 if gp.op == 'ADD':
1838 # Added files without content have no hunk and
1838 # Added files without content have no hunk and
1839 # must be created
1839 # must be created
1840 data = ''
1840 data = ''
1841 if data or mode:
1841 if data or mode:
1842 if (gp.op in ('ADD', 'RENAME', 'COPY')
1842 if (gp.op in ('ADD', 'RENAME', 'COPY')
1843 and backend.exists(gp.path)):
1843 and backend.exists(gp.path)):
1844 raise PatchError(_("cannot create %s: destination "
1844 raise PatchError(_("cannot create %s: destination "
1845 "already exists") % gp.path)
1845 "already exists") % gp.path)
1846 backend.setfile(gp.path, data, mode, gp.oldpath)
1846 backend.setfile(gp.path, data, mode, gp.oldpath)
1847 continue
1847 continue
1848 try:
1848 try:
1849 current_file = patcher(ui, gp, backend, store,
1849 current_file = patcher(ui, gp, backend, store,
1850 eolmode=eolmode)
1850 eolmode=eolmode)
1851 except PatchError, inst:
1851 except PatchError, inst:
1852 ui.warn(str(inst) + '\n')
1852 ui.warn(str(inst) + '\n')
1853 current_file = None
1853 current_file = None
1854 rejects += 1
1854 rejects += 1
1855 continue
1855 continue
1856 elif state == 'git':
1856 elif state == 'git':
1857 for gp in values:
1857 for gp in values:
1858 path = pstrip(gp.oldpath)
1858 path = pstrip(gp.oldpath)
1859 data, mode = backend.getfile(path)
1859 data, mode = backend.getfile(path)
1860 if data is None:
1860 if data is None:
1861 # The error ignored here will trigger a getfile()
1861 # The error ignored here will trigger a getfile()
1862 # error in a place more appropriate for error
1862 # error in a place more appropriate for error
1863 # handling, and will not interrupt the patching
1863 # handling, and will not interrupt the patching
1864 # process.
1864 # process.
1865 pass
1865 pass
1866 else:
1866 else:
1867 store.setfile(path, data, mode)
1867 store.setfile(path, data, mode)
1868 else:
1868 else:
1869 raise util.Abort(_('unsupported parser state: %s') % state)
1869 raise util.Abort(_('unsupported parser state: %s') % state)
1870
1870
1871 if current_file:
1871 if current_file:
1872 rejects += current_file.close()
1872 rejects += current_file.close()
1873
1873
1874 if rejects:
1874 if rejects:
1875 return -1
1875 return -1
1876 return err
1876 return err
1877
1877
1878 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1878 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1879 similarity):
1879 similarity):
1880 """use <patcher> to apply <patchname> to the working directory.
1880 """use <patcher> to apply <patchname> to the working directory.
1881 returns whether patch was applied with fuzz factor."""
1881 returns whether patch was applied with fuzz factor."""
1882
1882
1883 fuzz = False
1883 fuzz = False
1884 args = []
1884 args = []
1885 cwd = repo.root
1885 cwd = repo.root
1886 if cwd:
1886 if cwd:
1887 args.append('-d %s' % util.shellquote(cwd))
1887 args.append('-d %s' % util.shellquote(cwd))
1888 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1888 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1889 util.shellquote(patchname)))
1889 util.shellquote(patchname)))
1890 try:
1890 try:
1891 for line in fp:
1891 for line in fp:
1892 line = line.rstrip()
1892 line = line.rstrip()
1893 ui.note(line + '\n')
1893 ui.note(line + '\n')
1894 if line.startswith('patching file '):
1894 if line.startswith('patching file '):
1895 pf = util.parsepatchoutput(line)
1895 pf = util.parsepatchoutput(line)
1896 printed_file = False
1896 printed_file = False
1897 files.add(pf)
1897 files.add(pf)
1898 elif line.find('with fuzz') >= 0:
1898 elif line.find('with fuzz') >= 0:
1899 fuzz = True
1899 fuzz = True
1900 if not printed_file:
1900 if not printed_file:
1901 ui.warn(pf + '\n')
1901 ui.warn(pf + '\n')
1902 printed_file = True
1902 printed_file = True
1903 ui.warn(line + '\n')
1903 ui.warn(line + '\n')
1904 elif line.find('saving rejects to file') >= 0:
1904 elif line.find('saving rejects to file') >= 0:
1905 ui.warn(line + '\n')
1905 ui.warn(line + '\n')
1906 elif line.find('FAILED') >= 0:
1906 elif line.find('FAILED') >= 0:
1907 if not printed_file:
1907 if not printed_file:
1908 ui.warn(pf + '\n')
1908 ui.warn(pf + '\n')
1909 printed_file = True
1909 printed_file = True
1910 ui.warn(line + '\n')
1910 ui.warn(line + '\n')
1911 finally:
1911 finally:
1912 if files:
1912 if files:
1913 scmutil.marktouched(repo, files, similarity)
1913 scmutil.marktouched(repo, files, similarity)
1914 code = fp.close()
1914 code = fp.close()
1915 if code:
1915 if code:
1916 raise PatchError(_("patch command failed: %s") %
1916 raise PatchError(_("patch command failed: %s") %
1917 util.explainexit(code)[0])
1917 util.explainexit(code)[0])
1918 return fuzz
1918 return fuzz
1919
1919
1920 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1920 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1921 eolmode='strict'):
1921 eolmode='strict'):
1922 if files is None:
1922 if files is None:
1923 files = set()
1923 files = set()
1924 if eolmode is None:
1924 if eolmode is None:
1925 eolmode = ui.config('patch', 'eol', 'strict')
1925 eolmode = ui.config('patch', 'eol', 'strict')
1926 if eolmode.lower() not in eolmodes:
1926 if eolmode.lower() not in eolmodes:
1927 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1927 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1928 eolmode = eolmode.lower()
1928 eolmode = eolmode.lower()
1929
1929
1930 store = filestore()
1930 store = filestore()
1931 try:
1931 try:
1932 fp = open(patchobj, 'rb')
1932 fp = open(patchobj, 'rb')
1933 except TypeError:
1933 except TypeError:
1934 fp = patchobj
1934 fp = patchobj
1935 try:
1935 try:
1936 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1936 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1937 eolmode=eolmode)
1937 eolmode=eolmode)
1938 finally:
1938 finally:
1939 if fp != patchobj:
1939 if fp != patchobj:
1940 fp.close()
1940 fp.close()
1941 files.update(backend.close())
1941 files.update(backend.close())
1942 store.close()
1942 store.close()
1943 if ret < 0:
1943 if ret < 0:
1944 raise PatchError(_('patch failed to apply'))
1944 raise PatchError(_('patch failed to apply'))
1945 return ret > 0
1945 return ret > 0
1946
1946
1947 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1947 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1948 eolmode='strict', similarity=0):
1948 eolmode='strict', similarity=0):
1949 """use builtin patch to apply <patchobj> to the working directory.
1949 """use builtin patch to apply <patchobj> to the working directory.
1950 returns whether patch was applied with fuzz factor."""
1950 returns whether patch was applied with fuzz factor."""
1951 backend = workingbackend(ui, repo, similarity)
1951 backend = workingbackend(ui, repo, similarity)
1952 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1952 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1953
1953
1954 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1954 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1955 eolmode='strict'):
1955 eolmode='strict'):
1956 backend = repobackend(ui, repo, ctx, store)
1956 backend = repobackend(ui, repo, ctx, store)
1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1958
1958
1959 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1959 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1960 similarity=0):
1960 similarity=0):
1961 """Apply <patchname> to the working directory.
1961 """Apply <patchname> to the working directory.
1962
1962
1963 'eolmode' specifies how end of lines should be handled. It can be:
1963 'eolmode' specifies how end of lines should be handled. It can be:
1964 - 'strict': inputs are read in binary mode, EOLs are preserved
1964 - 'strict': inputs are read in binary mode, EOLs are preserved
1965 - 'crlf': EOLs are ignored when patching and reset to CRLF
1965 - 'crlf': EOLs are ignored when patching and reset to CRLF
1966 - 'lf': EOLs are ignored when patching and reset to LF
1966 - 'lf': EOLs are ignored when patching and reset to LF
1967 - None: get it from user settings, default to 'strict'
1967 - None: get it from user settings, default to 'strict'
1968 'eolmode' is ignored when using an external patcher program.
1968 'eolmode' is ignored when using an external patcher program.
1969
1969
1970 Returns whether patch was applied with fuzz factor.
1970 Returns whether patch was applied with fuzz factor.
1971 """
1971 """
1972 patcher = ui.config('ui', 'patch')
1972 patcher = ui.config('ui', 'patch')
1973 if files is None:
1973 if files is None:
1974 files = set()
1974 files = set()
1975 if patcher:
1975 if patcher:
1976 return _externalpatch(ui, repo, patcher, patchname, strip,
1976 return _externalpatch(ui, repo, patcher, patchname, strip,
1977 files, similarity)
1977 files, similarity)
1978 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1978 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1979 similarity)
1979 similarity)
1980
1980
1981 def changedfiles(ui, repo, patchpath, strip=1):
1981 def changedfiles(ui, repo, patchpath, strip=1):
1982 backend = fsbackend(ui, repo.root)
1982 backend = fsbackend(ui, repo.root)
1983 fp = open(patchpath, 'rb')
1983 fp = open(patchpath, 'rb')
1984 try:
1984 try:
1985 changed = set()
1985 changed = set()
1986 for state, values in iterhunks(fp):
1986 for state, values in iterhunks(fp):
1987 if state == 'file':
1987 if state == 'file':
1988 afile, bfile, first_hunk, gp = values
1988 afile, bfile, first_hunk, gp = values
1989 if gp:
1989 if gp:
1990 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1990 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1991 if gp.oldpath:
1991 if gp.oldpath:
1992 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1992 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1993 else:
1993 else:
1994 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1994 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1995 '')
1995 '')
1996 changed.add(gp.path)
1996 changed.add(gp.path)
1997 if gp.op == 'RENAME':
1997 if gp.op == 'RENAME':
1998 changed.add(gp.oldpath)
1998 changed.add(gp.oldpath)
1999 elif state not in ('hunk', 'git'):
1999 elif state not in ('hunk', 'git'):
2000 raise util.Abort(_('unsupported parser state: %s') % state)
2000 raise util.Abort(_('unsupported parser state: %s') % state)
2001 return changed
2001 return changed
2002 finally:
2002 finally:
2003 fp.close()
2003 fp.close()
2004
2004
2005 class GitDiffRequired(Exception):
2005 class GitDiffRequired(Exception):
2006 pass
2006 pass
2007
2007
2008 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2008 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2009 '''return diffopts with all features supported and parsed'''
2009 '''return diffopts with all features supported and parsed'''
2010 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2010 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2011 git=True, whitespace=True, formatchanging=True)
2011 git=True, whitespace=True, formatchanging=True)
2012
2012
2013 diffopts = diffallopts
2013 diffopts = diffallopts
2014
2014
2015 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2015 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2016 whitespace=False, formatchanging=False):
2016 whitespace=False, formatchanging=False):
2017 '''return diffopts with only opted-in features parsed
2017 '''return diffopts with only opted-in features parsed
2018
2018
2019 Features:
2019 Features:
2020 - git: git-style diffs
2020 - git: git-style diffs
2021 - whitespace: whitespace options like ignoreblanklines and ignorews
2021 - whitespace: whitespace options like ignoreblanklines and ignorews
2022 - formatchanging: options that will likely break or cause correctness issues
2022 - formatchanging: options that will likely break or cause correctness issues
2023 with most diff parsers
2023 with most diff parsers
2024 '''
2024 '''
2025 def get(key, name=None, getter=ui.configbool, forceplain=None):
2025 def get(key, name=None, getter=ui.configbool, forceplain=None):
2026 if opts:
2026 if opts:
2027 v = opts.get(key)
2027 v = opts.get(key)
2028 if v:
2028 if v:
2029 return v
2029 return v
2030 if forceplain is not None and ui.plain():
2030 if forceplain is not None and ui.plain():
2031 return forceplain
2031 return forceplain
2032 return getter(section, name or key, None, untrusted=untrusted)
2032 return getter(section, name or key, None, untrusted=untrusted)
2033
2033
2034 # core options, expected to be understood by every diff parser
2034 # core options, expected to be understood by every diff parser
2035 buildopts = {
2035 buildopts = {
2036 'nodates': get('nodates'),
2036 'nodates': get('nodates'),
2037 'showfunc': get('show_function', 'showfunc'),
2037 'showfunc': get('show_function', 'showfunc'),
2038 'context': get('unified', getter=ui.config),
2038 'context': get('unified', getter=ui.config),
2039 }
2039 }
2040
2040
2041 if git:
2041 if git:
2042 buildopts['git'] = get('git')
2042 buildopts['git'] = get('git')
2043 if whitespace:
2043 if whitespace:
2044 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2044 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2045 buildopts['ignorewsamount'] = get('ignore_space_change',
2045 buildopts['ignorewsamount'] = get('ignore_space_change',
2046 'ignorewsamount')
2046 'ignorewsamount')
2047 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2047 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2048 'ignoreblanklines')
2048 'ignoreblanklines')
2049 if formatchanging:
2049 if formatchanging:
2050 buildopts['text'] = opts and opts.get('text')
2050 buildopts['text'] = opts and opts.get('text')
2051 buildopts['nobinary'] = get('nobinary')
2051 buildopts['nobinary'] = get('nobinary')
2052 buildopts['noprefix'] = get('noprefix', forceplain=False)
2052 buildopts['noprefix'] = get('noprefix', forceplain=False)
2053
2053
2054 return mdiff.diffopts(**buildopts)
2054 return mdiff.diffopts(**buildopts)
2055
2055
2056 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2056 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2057 losedatafn=None, prefix=''):
2057 losedatafn=None, prefix=''):
2058 '''yields diff of changes to files between two nodes, or node and
2058 '''yields diff of changes to files between two nodes, or node and
2059 working directory.
2059 working directory.
2060
2060
2061 if node1 is None, use first dirstate parent instead.
2061 if node1 is None, use first dirstate parent instead.
2062 if node2 is None, compare node1 with working directory.
2062 if node2 is None, compare node1 with working directory.
2063
2063
2064 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2064 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2065 every time some change cannot be represented with the current
2065 every time some change cannot be represented with the current
2066 patch format. Return False to upgrade to git patch format, True to
2066 patch format. Return False to upgrade to git patch format, True to
2067 accept the loss or raise an exception to abort the diff. It is
2067 accept the loss or raise an exception to abort the diff. It is
2068 called with the name of current file being diffed as 'fn'. If set
2068 called with the name of current file being diffed as 'fn'. If set
2069 to None, patches will always be upgraded to git format when
2069 to None, patches will always be upgraded to git format when
2070 necessary.
2070 necessary.
2071
2071
2072 prefix is a filename prefix that is prepended to all filenames on
2072 prefix is a filename prefix that is prepended to all filenames on
2073 display (used for subrepos).
2073 display (used for subrepos).
2074 '''
2074 '''
2075
2075
2076 if opts is None:
2076 if opts is None:
2077 opts = mdiff.defaultopts
2077 opts = mdiff.defaultopts
2078
2078
2079 if not node1 and not node2:
2079 if not node1 and not node2:
2080 node1 = repo.dirstate.p1()
2080 node1 = repo.dirstate.p1()
2081
2081
2082 def lrugetfilectx():
2082 def lrugetfilectx():
2083 cache = {}
2083 cache = {}
2084 order = util.deque()
2084 order = util.deque()
2085 def getfilectx(f, ctx):
2085 def getfilectx(f, ctx):
2086 fctx = ctx.filectx(f, filelog=cache.get(f))
2086 fctx = ctx.filectx(f, filelog=cache.get(f))
2087 if f not in cache:
2087 if f not in cache:
2088 if len(cache) > 20:
2088 if len(cache) > 20:
2089 del cache[order.popleft()]
2089 del cache[order.popleft()]
2090 cache[f] = fctx.filelog()
2090 cache[f] = fctx.filelog()
2091 else:
2091 else:
2092 order.remove(f)
2092 order.remove(f)
2093 order.append(f)
2093 order.append(f)
2094 return fctx
2094 return fctx
2095 return getfilectx
2095 return getfilectx
2096 getfilectx = lrugetfilectx()
2096 getfilectx = lrugetfilectx()
2097
2097
2098 ctx1 = repo[node1]
2098 ctx1 = repo[node1]
2099 ctx2 = repo[node2]
2099 ctx2 = repo[node2]
2100
2100
2101 if not changes:
2101 if not changes:
2102 changes = repo.status(ctx1, ctx2, match=match)
2102 changes = repo.status(ctx1, ctx2, match=match)
2103 modified, added, removed = changes[:3]
2103 modified, added, removed = changes[:3]
2104
2104
2105 if not modified and not added and not removed:
2105 if not modified and not added and not removed:
2106 return []
2106 return []
2107
2107
2108 if repo.ui.debugflag:
2108 if repo.ui.debugflag:
2109 hexfunc = hex
2109 hexfunc = hex
2110 else:
2110 else:
2111 hexfunc = short
2111 hexfunc = short
2112 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2112 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2113
2113
2114 copy = {}
2114 copy = {}
2115 if opts.git or opts.upgrade:
2115 if opts.git or opts.upgrade:
2116 copy = copies.pathcopies(ctx1, ctx2)
2116 copy = copies.pathcopies(ctx1, ctx2)
2117
2117
2118 def difffn(opts, losedata):
2118 def difffn(opts, losedata):
2119 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2119 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2120 copy, getfilectx, opts, losedata, prefix)
2120 copy, getfilectx, opts, losedata, prefix)
2121 if opts.upgrade and not opts.git:
2121 if opts.upgrade and not opts.git:
2122 try:
2122 try:
2123 def losedata(fn):
2123 def losedata(fn):
2124 if not losedatafn or not losedatafn(fn=fn):
2124 if not losedatafn or not losedatafn(fn=fn):
2125 raise GitDiffRequired
2125 raise GitDiffRequired
2126 # Buffer the whole output until we are sure it can be generated
2126 # Buffer the whole output until we are sure it can be generated
2127 return list(difffn(opts.copy(git=False), losedata))
2127 return list(difffn(opts.copy(git=False), losedata))
2128 except GitDiffRequired:
2128 except GitDiffRequired:
2129 return difffn(opts.copy(git=True), None)
2129 return difffn(opts.copy(git=True), None)
2130 else:
2130 else:
2131 return difffn(opts, None)
2131 return difffn(opts, None)
2132
2132
2133 def difflabel(func, *args, **kw):
2133 def difflabel(func, *args, **kw):
2134 '''yields 2-tuples of (output, label) based on the output of func()'''
2134 '''yields 2-tuples of (output, label) based on the output of func()'''
2135 headprefixes = [('diff', 'diff.diffline'),
2135 headprefixes = [('diff', 'diff.diffline'),
2136 ('copy', 'diff.extended'),
2136 ('copy', 'diff.extended'),
2137 ('rename', 'diff.extended'),
2137 ('rename', 'diff.extended'),
2138 ('old', 'diff.extended'),
2138 ('old', 'diff.extended'),
2139 ('new', 'diff.extended'),
2139 ('new', 'diff.extended'),
2140 ('deleted', 'diff.extended'),
2140 ('deleted', 'diff.extended'),
2141 ('---', 'diff.file_a'),
2141 ('---', 'diff.file_a'),
2142 ('+++', 'diff.file_b')]
2142 ('+++', 'diff.file_b')]
2143 textprefixes = [('@', 'diff.hunk'),
2143 textprefixes = [('@', 'diff.hunk'),
2144 ('-', 'diff.deleted'),
2144 ('-', 'diff.deleted'),
2145 ('+', 'diff.inserted')]
2145 ('+', 'diff.inserted')]
2146 head = False
2146 head = False
2147 for chunk in func(*args, **kw):
2147 for chunk in func(*args, **kw):
2148 lines = chunk.split('\n')
2148 lines = chunk.split('\n')
2149 for i, line in enumerate(lines):
2149 for i, line in enumerate(lines):
2150 if i != 0:
2150 if i != 0:
2151 yield ('\n', '')
2151 yield ('\n', '')
2152 if head:
2152 if head:
2153 if line.startswith('@'):
2153 if line.startswith('@'):
2154 head = False
2154 head = False
2155 else:
2155 else:
2156 if line and line[0] not in ' +-@\\':
2156 if line and line[0] not in ' +-@\\':
2157 head = True
2157 head = True
2158 stripline = line
2158 stripline = line
2159 diffline = False
2159 diffline = False
2160 if not head and line and line[0] in '+-':
2160 if not head and line and line[0] in '+-':
2161 # highlight tabs and trailing whitespace, but only in
2161 # highlight tabs and trailing whitespace, but only in
2162 # changed lines
2162 # changed lines
2163 stripline = line.rstrip()
2163 stripline = line.rstrip()
2164 diffline = True
2164 diffline = True
2165
2165
2166 prefixes = textprefixes
2166 prefixes = textprefixes
2167 if head:
2167 if head:
2168 prefixes = headprefixes
2168 prefixes = headprefixes
2169 for prefix, label in prefixes:
2169 for prefix, label in prefixes:
2170 if stripline.startswith(prefix):
2170 if stripline.startswith(prefix):
2171 if diffline:
2171 if diffline:
2172 for token in tabsplitter.findall(stripline):
2172 for token in tabsplitter.findall(stripline):
2173 if '\t' == token[0]:
2173 if '\t' == token[0]:
2174 yield (token, 'diff.tab')
2174 yield (token, 'diff.tab')
2175 else:
2175 else:
2176 yield (token, label)
2176 yield (token, label)
2177 else:
2177 else:
2178 yield (stripline, label)
2178 yield (stripline, label)
2179 break
2179 break
2180 else:
2180 else:
2181 yield (line, '')
2181 yield (line, '')
2182 if line != stripline:
2182 if line != stripline:
2183 yield (line[len(stripline):], 'diff.trailingwhitespace')
2183 yield (line[len(stripline):], 'diff.trailingwhitespace')
2184
2184
2185 def diffui(*args, **kw):
2185 def diffui(*args, **kw):
2186 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2186 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2187 return difflabel(diff, *args, **kw)
2187 return difflabel(diff, *args, **kw)
2188
2188
2189 def _filepairs(ctx1, modified, added, removed, copy, opts):
2189 def _filepairs(ctx1, modified, added, removed, copy, opts):
2190 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2190 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2191 before and f2 is the the name after. For added files, f1 will be None,
2191 before and f2 is the the name after. For added files, f1 will be None,
2192 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2192 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2193 or 'rename' (the latter two only if opts.git is set).'''
2193 or 'rename' (the latter two only if opts.git is set).'''
2194 gone = set()
2194 gone = set()
2195
2195
2196 copyto = dict([(v, k) for k, v in copy.items()])
2196 copyto = dict([(v, k) for k, v in copy.items()])
2197
2197
2198 addedset, removedset = set(added), set(removed)
2198 addedset, removedset = set(added), set(removed)
2199 # Fix up added, since merged-in additions appear as
2199 # Fix up added, since merged-in additions appear as
2200 # modifications during merges
2200 # modifications during merges
2201 for f in modified:
2201 for f in modified:
2202 if f not in ctx1:
2202 if f not in ctx1:
2203 addedset.add(f)
2203 addedset.add(f)
2204
2204
2205 for f in sorted(modified + added + removed):
2205 for f in sorted(modified + added + removed):
2206 copyop = None
2206 copyop = None
2207 f1, f2 = f, f
2207 f1, f2 = f, f
2208 if f in addedset:
2208 if f in addedset:
2209 f1 = None
2209 f1 = None
2210 if f in copy:
2210 if f in copy:
2211 if opts.git:
2211 if opts.git:
2212 f1 = copy[f]
2212 f1 = copy[f]
2213 if f1 in removedset and f1 not in gone:
2213 if f1 in removedset and f1 not in gone:
2214 copyop = 'rename'
2214 copyop = 'rename'
2215 gone.add(f1)
2215 gone.add(f1)
2216 else:
2216 else:
2217 copyop = 'copy'
2217 copyop = 'copy'
2218 elif f in removedset:
2218 elif f in removedset:
2219 f2 = None
2219 f2 = None
2220 if opts.git:
2220 if opts.git:
2221 # have we already reported a copy above?
2221 # have we already reported a copy above?
2222 if (f in copyto and copyto[f] in addedset
2222 if (f in copyto and copyto[f] in addedset
2223 and copy[copyto[f]] == f):
2223 and copy[copyto[f]] == f):
2224 continue
2224 continue
2225 yield f1, f2, copyop
2225 yield f1, f2, copyop
2226
2226
2227 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2227 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2228 copy, getfilectx, opts, losedatafn, prefix):
2228 copy, getfilectx, opts, losedatafn, prefix):
2229 '''given input data, generate a diff and yield it in blocks
2230
2231 If generating a diff would lose data like flags or binary data and
2232 losedatafn is not None, it will be called.
2233
2234 prefix is added to every path in the diff output.'''
2229
2235
2230 def gitindex(text):
2236 def gitindex(text):
2231 if not text:
2237 if not text:
2232 text = ""
2238 text = ""
2233 l = len(text)
2239 l = len(text)
2234 s = util.sha1('blob %d\0' % l)
2240 s = util.sha1('blob %d\0' % l)
2235 s.update(text)
2241 s.update(text)
2236 return s.hexdigest()
2242 return s.hexdigest()
2237
2243
2238 if opts.noprefix:
2244 if opts.noprefix:
2239 aprefix = bprefix = ''
2245 aprefix = bprefix = ''
2240 else:
2246 else:
2241 aprefix = 'a/'
2247 aprefix = 'a/'
2242 bprefix = 'b/'
2248 bprefix = 'b/'
2243
2249
2244 def diffline(f, revs):
2250 def diffline(f, revs):
2245 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2251 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2246 return 'diff %s %s' % (revinfo, f)
2252 return 'diff %s %s' % (revinfo, f)
2247
2253
2248 date1 = util.datestr(ctx1.date())
2254 date1 = util.datestr(ctx1.date())
2249 date2 = util.datestr(ctx2.date())
2255 date2 = util.datestr(ctx2.date())
2250
2256
2251 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2257 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2252
2258
2253 for f1, f2, copyop in _filepairs(
2259 for f1, f2, copyop in _filepairs(
2254 ctx1, modified, added, removed, copy, opts):
2260 ctx1, modified, added, removed, copy, opts):
2255 content1 = None
2261 content1 = None
2256 content2 = None
2262 content2 = None
2257 flag1 = None
2263 flag1 = None
2258 flag2 = None
2264 flag2 = None
2259 if f1:
2265 if f1:
2260 content1 = getfilectx(f1, ctx1).data()
2266 content1 = getfilectx(f1, ctx1).data()
2261 if opts.git or losedatafn:
2267 if opts.git or losedatafn:
2262 flag1 = ctx1.flags(f1)
2268 flag1 = ctx1.flags(f1)
2263 if f2:
2269 if f2:
2264 content2 = getfilectx(f2, ctx2).data()
2270 content2 = getfilectx(f2, ctx2).data()
2265 if opts.git or losedatafn:
2271 if opts.git or losedatafn:
2266 flag2 = ctx2.flags(f2)
2272 flag2 = ctx2.flags(f2)
2267 binary = False
2273 binary = False
2268 if opts.git or losedatafn:
2274 if opts.git or losedatafn:
2269 binary = util.binary(content1) or util.binary(content2)
2275 binary = util.binary(content1) or util.binary(content2)
2270
2276
2271 if losedatafn and not opts.git:
2277 if losedatafn and not opts.git:
2272 if (binary or
2278 if (binary or
2273 # copy/rename
2279 # copy/rename
2274 f2 in copy or
2280 f2 in copy or
2275 # empty file creation
2281 # empty file creation
2276 (not f1 and not content2) or
2282 (not f1 and not content2) or
2277 # empty file deletion
2283 # empty file deletion
2278 (not content1 and not f2) or
2284 (not content1 and not f2) or
2279 # create with flags
2285 # create with flags
2280 (not f1 and flag2) or
2286 (not f1 and flag2) or
2281 # change flags
2287 # change flags
2282 (f1 and f2 and flag1 != flag2)):
2288 (f1 and f2 and flag1 != flag2)):
2283 losedatafn(f2 or f1)
2289 losedatafn(f2 or f1)
2284
2290
2285 path1 = posixpath.join(prefix, f1 or f2)
2291 path1 = posixpath.join(prefix, f1 or f2)
2286 path2 = posixpath.join(prefix, f2 or f1)
2292 path2 = posixpath.join(prefix, f2 or f1)
2287 header = []
2293 header = []
2288 if opts.git:
2294 if opts.git:
2289 header.append('diff --git %s%s %s%s' %
2295 header.append('diff --git %s%s %s%s' %
2290 (aprefix, path1, bprefix, path2))
2296 (aprefix, path1, bprefix, path2))
2291 if not f1: # added
2297 if not f1: # added
2292 header.append('new file mode %s' % gitmode[flag2])
2298 header.append('new file mode %s' % gitmode[flag2])
2293 elif not f2: # removed
2299 elif not f2: # removed
2294 header.append('deleted file mode %s' % gitmode[flag1])
2300 header.append('deleted file mode %s' % gitmode[flag1])
2295 else: # modified/copied/renamed
2301 else: # modified/copied/renamed
2296 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2302 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2297 if mode1 != mode2:
2303 if mode1 != mode2:
2298 header.append('old mode %s' % mode1)
2304 header.append('old mode %s' % mode1)
2299 header.append('new mode %s' % mode2)
2305 header.append('new mode %s' % mode2)
2300 if copyop is not None:
2306 if copyop is not None:
2301 header.append('%s from %s' % (copyop, path1))
2307 header.append('%s from %s' % (copyop, path1))
2302 header.append('%s to %s' % (copyop, path2))
2308 header.append('%s to %s' % (copyop, path2))
2303 elif revs and not repo.ui.quiet:
2309 elif revs and not repo.ui.quiet:
2304 header.append(diffline(path1, revs))
2310 header.append(diffline(path1, revs))
2305
2311
2306 if binary and opts.git and not opts.nobinary:
2312 if binary and opts.git and not opts.nobinary:
2307 text = mdiff.b85diff(content1, content2)
2313 text = mdiff.b85diff(content1, content2)
2308 if text:
2314 if text:
2309 header.append('index %s..%s' %
2315 header.append('index %s..%s' %
2310 (gitindex(content1), gitindex(content2)))
2316 (gitindex(content1), gitindex(content2)))
2311 else:
2317 else:
2312 text = mdiff.unidiff(content1, date1,
2318 text = mdiff.unidiff(content1, date1,
2313 content2, date2,
2319 content2, date2,
2314 path1, path2, opts=opts)
2320 path1, path2, opts=opts)
2315 if header and (text or len(header) > 1):
2321 if header and (text or len(header) > 1):
2316 yield '\n'.join(header) + '\n'
2322 yield '\n'.join(header) + '\n'
2317 if text:
2323 if text:
2318 yield text
2324 yield text
2319
2325
2320 def diffstatsum(stats):
2326 def diffstatsum(stats):
2321 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2327 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2322 for f, a, r, b in stats:
2328 for f, a, r, b in stats:
2323 maxfile = max(maxfile, encoding.colwidth(f))
2329 maxfile = max(maxfile, encoding.colwidth(f))
2324 maxtotal = max(maxtotal, a + r)
2330 maxtotal = max(maxtotal, a + r)
2325 addtotal += a
2331 addtotal += a
2326 removetotal += r
2332 removetotal += r
2327 binary = binary or b
2333 binary = binary or b
2328
2334
2329 return maxfile, maxtotal, addtotal, removetotal, binary
2335 return maxfile, maxtotal, addtotal, removetotal, binary
2330
2336
2331 def diffstatdata(lines):
2337 def diffstatdata(lines):
2332 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2338 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2333
2339
2334 results = []
2340 results = []
2335 filename, adds, removes, isbinary = None, 0, 0, False
2341 filename, adds, removes, isbinary = None, 0, 0, False
2336
2342
2337 def addresult():
2343 def addresult():
2338 if filename:
2344 if filename:
2339 results.append((filename, adds, removes, isbinary))
2345 results.append((filename, adds, removes, isbinary))
2340
2346
2341 for line in lines:
2347 for line in lines:
2342 if line.startswith('diff'):
2348 if line.startswith('diff'):
2343 addresult()
2349 addresult()
2344 # set numbers to 0 anyway when starting new file
2350 # set numbers to 0 anyway when starting new file
2345 adds, removes, isbinary = 0, 0, False
2351 adds, removes, isbinary = 0, 0, False
2346 if line.startswith('diff --git a/'):
2352 if line.startswith('diff --git a/'):
2347 filename = gitre.search(line).group(2)
2353 filename = gitre.search(line).group(2)
2348 elif line.startswith('diff -r'):
2354 elif line.startswith('diff -r'):
2349 # format: "diff -r ... -r ... filename"
2355 # format: "diff -r ... -r ... filename"
2350 filename = diffre.search(line).group(1)
2356 filename = diffre.search(line).group(1)
2351 elif line.startswith('+') and not line.startswith('+++ '):
2357 elif line.startswith('+') and not line.startswith('+++ '):
2352 adds += 1
2358 adds += 1
2353 elif line.startswith('-') and not line.startswith('--- '):
2359 elif line.startswith('-') and not line.startswith('--- '):
2354 removes += 1
2360 removes += 1
2355 elif (line.startswith('GIT binary patch') or
2361 elif (line.startswith('GIT binary patch') or
2356 line.startswith('Binary file')):
2362 line.startswith('Binary file')):
2357 isbinary = True
2363 isbinary = True
2358 addresult()
2364 addresult()
2359 return results
2365 return results
2360
2366
2361 def diffstat(lines, width=80, git=False):
2367 def diffstat(lines, width=80, git=False):
2362 output = []
2368 output = []
2363 stats = diffstatdata(lines)
2369 stats = diffstatdata(lines)
2364 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2370 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2365
2371
2366 countwidth = len(str(maxtotal))
2372 countwidth = len(str(maxtotal))
2367 if hasbinary and countwidth < 3:
2373 if hasbinary and countwidth < 3:
2368 countwidth = 3
2374 countwidth = 3
2369 graphwidth = width - countwidth - maxname - 6
2375 graphwidth = width - countwidth - maxname - 6
2370 if graphwidth < 10:
2376 if graphwidth < 10:
2371 graphwidth = 10
2377 graphwidth = 10
2372
2378
2373 def scale(i):
2379 def scale(i):
2374 if maxtotal <= graphwidth:
2380 if maxtotal <= graphwidth:
2375 return i
2381 return i
2376 # If diffstat runs out of room it doesn't print anything,
2382 # If diffstat runs out of room it doesn't print anything,
2377 # which isn't very useful, so always print at least one + or -
2383 # which isn't very useful, so always print at least one + or -
2378 # if there were at least some changes.
2384 # if there were at least some changes.
2379 return max(i * graphwidth // maxtotal, int(bool(i)))
2385 return max(i * graphwidth // maxtotal, int(bool(i)))
2380
2386
2381 for filename, adds, removes, isbinary in stats:
2387 for filename, adds, removes, isbinary in stats:
2382 if isbinary:
2388 if isbinary:
2383 count = 'Bin'
2389 count = 'Bin'
2384 else:
2390 else:
2385 count = adds + removes
2391 count = adds + removes
2386 pluses = '+' * scale(adds)
2392 pluses = '+' * scale(adds)
2387 minuses = '-' * scale(removes)
2393 minuses = '-' * scale(removes)
2388 output.append(' %s%s | %*s %s%s\n' %
2394 output.append(' %s%s | %*s %s%s\n' %
2389 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2395 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2390 countwidth, count, pluses, minuses))
2396 countwidth, count, pluses, minuses))
2391
2397
2392 if stats:
2398 if stats:
2393 output.append(_(' %d files changed, %d insertions(+), '
2399 output.append(_(' %d files changed, %d insertions(+), '
2394 '%d deletions(-)\n')
2400 '%d deletions(-)\n')
2395 % (len(stats), totaladds, totalremoves))
2401 % (len(stats), totaladds, totalremoves))
2396
2402
2397 return ''.join(output)
2403 return ''.join(output)
2398
2404
2399 def diffstatui(*args, **kw):
2405 def diffstatui(*args, **kw):
2400 '''like diffstat(), but yields 2-tuples of (output, label) for
2406 '''like diffstat(), but yields 2-tuples of (output, label) for
2401 ui.write()
2407 ui.write()
2402 '''
2408 '''
2403
2409
2404 for line in diffstat(*args, **kw).splitlines():
2410 for line in diffstat(*args, **kw).splitlines():
2405 if line and line[-1] in '+-':
2411 if line and line[-1] in '+-':
2406 name, graph = line.rsplit(' ', 1)
2412 name, graph = line.rsplit(' ', 1)
2407 yield (name + ' ', '')
2413 yield (name + ' ', '')
2408 m = re.search(r'\++', graph)
2414 m = re.search(r'\++', graph)
2409 if m:
2415 if m:
2410 yield (m.group(0), 'diffstat.inserted')
2416 yield (m.group(0), 'diffstat.inserted')
2411 m = re.search(r'-+', graph)
2417 m = re.search(r'-+', graph)
2412 if m:
2418 if m:
2413 yield (m.group(0), 'diffstat.deleted')
2419 yield (m.group(0), 'diffstat.deleted')
2414 else:
2420 else:
2415 yield (line, '')
2421 yield (line, '')
2416 yield ('\n', '')
2422 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now