##// END OF EJS Templates
patch.pathtransform: add doctests...
Siddharth Agarwal -
r24243:daee2039 default
parent child Browse files
Show More
@@ -1,1966 +1,1982 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class hunk(object):
807 class hunk(object):
808 def __init__(self, desc, num, lr, context):
808 def __init__(self, desc, num, lr, context):
809 self.number = num
809 self.number = num
810 self.desc = desc
810 self.desc = desc
811 self.hunk = [desc]
811 self.hunk = [desc]
812 self.a = []
812 self.a = []
813 self.b = []
813 self.b = []
814 self.starta = self.lena = None
814 self.starta = self.lena = None
815 self.startb = self.lenb = None
815 self.startb = self.lenb = None
816 if lr is not None:
816 if lr is not None:
817 if context:
817 if context:
818 self.read_context_hunk(lr)
818 self.read_context_hunk(lr)
819 else:
819 else:
820 self.read_unified_hunk(lr)
820 self.read_unified_hunk(lr)
821
821
822 def getnormalized(self):
822 def getnormalized(self):
823 """Return a copy with line endings normalized to LF."""
823 """Return a copy with line endings normalized to LF."""
824
824
825 def normalize(lines):
825 def normalize(lines):
826 nlines = []
826 nlines = []
827 for line in lines:
827 for line in lines:
828 if line.endswith('\r\n'):
828 if line.endswith('\r\n'):
829 line = line[:-2] + '\n'
829 line = line[:-2] + '\n'
830 nlines.append(line)
830 nlines.append(line)
831 return nlines
831 return nlines
832
832
833 # Dummy object, it is rebuilt manually
833 # Dummy object, it is rebuilt manually
834 nh = hunk(self.desc, self.number, None, None)
834 nh = hunk(self.desc, self.number, None, None)
835 nh.number = self.number
835 nh.number = self.number
836 nh.desc = self.desc
836 nh.desc = self.desc
837 nh.hunk = self.hunk
837 nh.hunk = self.hunk
838 nh.a = normalize(self.a)
838 nh.a = normalize(self.a)
839 nh.b = normalize(self.b)
839 nh.b = normalize(self.b)
840 nh.starta = self.starta
840 nh.starta = self.starta
841 nh.startb = self.startb
841 nh.startb = self.startb
842 nh.lena = self.lena
842 nh.lena = self.lena
843 nh.lenb = self.lenb
843 nh.lenb = self.lenb
844 return nh
844 return nh
845
845
846 def read_unified_hunk(self, lr):
846 def read_unified_hunk(self, lr):
847 m = unidesc.match(self.desc)
847 m = unidesc.match(self.desc)
848 if not m:
848 if not m:
849 raise PatchError(_("bad hunk #%d") % self.number)
849 raise PatchError(_("bad hunk #%d") % self.number)
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 if self.lena is None:
851 if self.lena is None:
852 self.lena = 1
852 self.lena = 1
853 else:
853 else:
854 self.lena = int(self.lena)
854 self.lena = int(self.lena)
855 if self.lenb is None:
855 if self.lenb is None:
856 self.lenb = 1
856 self.lenb = 1
857 else:
857 else:
858 self.lenb = int(self.lenb)
858 self.lenb = int(self.lenb)
859 self.starta = int(self.starta)
859 self.starta = int(self.starta)
860 self.startb = int(self.startb)
860 self.startb = int(self.startb)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 self.b)
862 self.b)
863 # if we hit eof before finishing out the hunk, the last line will
863 # if we hit eof before finishing out the hunk, the last line will
864 # be zero length. Lets try to fix it up.
864 # be zero length. Lets try to fix it up.
865 while len(self.hunk[-1]) == 0:
865 while len(self.hunk[-1]) == 0:
866 del self.hunk[-1]
866 del self.hunk[-1]
867 del self.a[-1]
867 del self.a[-1]
868 del self.b[-1]
868 del self.b[-1]
869 self.lena -= 1
869 self.lena -= 1
870 self.lenb -= 1
870 self.lenb -= 1
871 self._fixnewline(lr)
871 self._fixnewline(lr)
872
872
873 def read_context_hunk(self, lr):
873 def read_context_hunk(self, lr):
874 self.desc = lr.readline()
874 self.desc = lr.readline()
875 m = contextdesc.match(self.desc)
875 m = contextdesc.match(self.desc)
876 if not m:
876 if not m:
877 raise PatchError(_("bad hunk #%d") % self.number)
877 raise PatchError(_("bad hunk #%d") % self.number)
878 self.starta, aend = m.groups()
878 self.starta, aend = m.groups()
879 self.starta = int(self.starta)
879 self.starta = int(self.starta)
880 if aend is None:
880 if aend is None:
881 aend = self.starta
881 aend = self.starta
882 self.lena = int(aend) - self.starta
882 self.lena = int(aend) - self.starta
883 if self.starta:
883 if self.starta:
884 self.lena += 1
884 self.lena += 1
885 for x in xrange(self.lena):
885 for x in xrange(self.lena):
886 l = lr.readline()
886 l = lr.readline()
887 if l.startswith('---'):
887 if l.startswith('---'):
888 # lines addition, old block is empty
888 # lines addition, old block is empty
889 lr.push(l)
889 lr.push(l)
890 break
890 break
891 s = l[2:]
891 s = l[2:]
892 if l.startswith('- ') or l.startswith('! '):
892 if l.startswith('- ') or l.startswith('! '):
893 u = '-' + s
893 u = '-' + s
894 elif l.startswith(' '):
894 elif l.startswith(' '):
895 u = ' ' + s
895 u = ' ' + s
896 else:
896 else:
897 raise PatchError(_("bad hunk #%d old text line %d") %
897 raise PatchError(_("bad hunk #%d old text line %d") %
898 (self.number, x))
898 (self.number, x))
899 self.a.append(u)
899 self.a.append(u)
900 self.hunk.append(u)
900 self.hunk.append(u)
901
901
902 l = lr.readline()
902 l = lr.readline()
903 if l.startswith('\ '):
903 if l.startswith('\ '):
904 s = self.a[-1][:-1]
904 s = self.a[-1][:-1]
905 self.a[-1] = s
905 self.a[-1] = s
906 self.hunk[-1] = s
906 self.hunk[-1] = s
907 l = lr.readline()
907 l = lr.readline()
908 m = contextdesc.match(l)
908 m = contextdesc.match(l)
909 if not m:
909 if not m:
910 raise PatchError(_("bad hunk #%d") % self.number)
910 raise PatchError(_("bad hunk #%d") % self.number)
911 self.startb, bend = m.groups()
911 self.startb, bend = m.groups()
912 self.startb = int(self.startb)
912 self.startb = int(self.startb)
913 if bend is None:
913 if bend is None:
914 bend = self.startb
914 bend = self.startb
915 self.lenb = int(bend) - self.startb
915 self.lenb = int(bend) - self.startb
916 if self.startb:
916 if self.startb:
917 self.lenb += 1
917 self.lenb += 1
918 hunki = 1
918 hunki = 1
919 for x in xrange(self.lenb):
919 for x in xrange(self.lenb):
920 l = lr.readline()
920 l = lr.readline()
921 if l.startswith('\ '):
921 if l.startswith('\ '):
922 # XXX: the only way to hit this is with an invalid line range.
922 # XXX: the only way to hit this is with an invalid line range.
923 # The no-eol marker is not counted in the line range, but I
923 # The no-eol marker is not counted in the line range, but I
924 # guess there are diff(1) out there which behave differently.
924 # guess there are diff(1) out there which behave differently.
925 s = self.b[-1][:-1]
925 s = self.b[-1][:-1]
926 self.b[-1] = s
926 self.b[-1] = s
927 self.hunk[hunki - 1] = s
927 self.hunk[hunki - 1] = s
928 continue
928 continue
929 if not l:
929 if not l:
930 # line deletions, new block is empty and we hit EOF
930 # line deletions, new block is empty and we hit EOF
931 lr.push(l)
931 lr.push(l)
932 break
932 break
933 s = l[2:]
933 s = l[2:]
934 if l.startswith('+ ') or l.startswith('! '):
934 if l.startswith('+ ') or l.startswith('! '):
935 u = '+' + s
935 u = '+' + s
936 elif l.startswith(' '):
936 elif l.startswith(' '):
937 u = ' ' + s
937 u = ' ' + s
938 elif len(self.b) == 0:
938 elif len(self.b) == 0:
939 # line deletions, new block is empty
939 # line deletions, new block is empty
940 lr.push(l)
940 lr.push(l)
941 break
941 break
942 else:
942 else:
943 raise PatchError(_("bad hunk #%d old text line %d") %
943 raise PatchError(_("bad hunk #%d old text line %d") %
944 (self.number, x))
944 (self.number, x))
945 self.b.append(s)
945 self.b.append(s)
946 while True:
946 while True:
947 if hunki >= len(self.hunk):
947 if hunki >= len(self.hunk):
948 h = ""
948 h = ""
949 else:
949 else:
950 h = self.hunk[hunki]
950 h = self.hunk[hunki]
951 hunki += 1
951 hunki += 1
952 if h == u:
952 if h == u:
953 break
953 break
954 elif h.startswith('-'):
954 elif h.startswith('-'):
955 continue
955 continue
956 else:
956 else:
957 self.hunk.insert(hunki - 1, u)
957 self.hunk.insert(hunki - 1, u)
958 break
958 break
959
959
960 if not self.a:
960 if not self.a:
961 # this happens when lines were only added to the hunk
961 # this happens when lines were only added to the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('-') or x.startswith(' '):
963 if x.startswith('-') or x.startswith(' '):
964 self.a.append(x)
964 self.a.append(x)
965 if not self.b:
965 if not self.b:
966 # this happens when lines were only deleted from the hunk
966 # this happens when lines were only deleted from the hunk
967 for x in self.hunk:
967 for x in self.hunk:
968 if x.startswith('+') or x.startswith(' '):
968 if x.startswith('+') or x.startswith(' '):
969 self.b.append(x[1:])
969 self.b.append(x[1:])
970 # @@ -start,len +start,len @@
970 # @@ -start,len +start,len @@
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 self.startb, self.lenb)
972 self.startb, self.lenb)
973 self.hunk[0] = self.desc
973 self.hunk[0] = self.desc
974 self._fixnewline(lr)
974 self._fixnewline(lr)
975
975
976 def _fixnewline(self, lr):
976 def _fixnewline(self, lr):
977 l = lr.readline()
977 l = lr.readline()
978 if l.startswith('\ '):
978 if l.startswith('\ '):
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 else:
980 else:
981 lr.push(l)
981 lr.push(l)
982
982
983 def complete(self):
983 def complete(self):
984 return len(self.a) == self.lena and len(self.b) == self.lenb
984 return len(self.a) == self.lena and len(self.b) == self.lenb
985
985
986 def _fuzzit(self, old, new, fuzz, toponly):
986 def _fuzzit(self, old, new, fuzz, toponly):
987 # this removes context lines from the top and bottom of list 'l'. It
987 # this removes context lines from the top and bottom of list 'l'. It
988 # checks the hunk to make sure only context lines are removed, and then
988 # checks the hunk to make sure only context lines are removed, and then
989 # returns a new shortened list of lines.
989 # returns a new shortened list of lines.
990 fuzz = min(fuzz, len(old))
990 fuzz = min(fuzz, len(old))
991 if fuzz:
991 if fuzz:
992 top = 0
992 top = 0
993 bot = 0
993 bot = 0
994 hlen = len(self.hunk)
994 hlen = len(self.hunk)
995 for x in xrange(hlen - 1):
995 for x in xrange(hlen - 1):
996 # the hunk starts with the @@ line, so use x+1
996 # the hunk starts with the @@ line, so use x+1
997 if self.hunk[x + 1][0] == ' ':
997 if self.hunk[x + 1][0] == ' ':
998 top += 1
998 top += 1
999 else:
999 else:
1000 break
1000 break
1001 if not toponly:
1001 if not toponly:
1002 for x in xrange(hlen - 1):
1002 for x in xrange(hlen - 1):
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 bot += 1
1004 bot += 1
1005 else:
1005 else:
1006 break
1006 break
1007
1007
1008 bot = min(fuzz, bot)
1008 bot = min(fuzz, bot)
1009 top = min(fuzz, top)
1009 top = min(fuzz, top)
1010 return old[top:len(old) - bot], new[top:len(new) - bot], top
1010 return old[top:len(old) - bot], new[top:len(new) - bot], top
1011 return old, new, 0
1011 return old, new, 0
1012
1012
1013 def fuzzit(self, fuzz, toponly):
1013 def fuzzit(self, fuzz, toponly):
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 oldstart = self.starta + top
1015 oldstart = self.starta + top
1016 newstart = self.startb + top
1016 newstart = self.startb + top
1017 # zero length hunk ranges already have their start decremented
1017 # zero length hunk ranges already have their start decremented
1018 if self.lena and oldstart > 0:
1018 if self.lena and oldstart > 0:
1019 oldstart -= 1
1019 oldstart -= 1
1020 if self.lenb and newstart > 0:
1020 if self.lenb and newstart > 0:
1021 newstart -= 1
1021 newstart -= 1
1022 return old, oldstart, new, newstart
1022 return old, oldstart, new, newstart
1023
1023
1024 class binhunk(object):
1024 class binhunk(object):
1025 'A binary patch file.'
1025 'A binary patch file.'
1026 def __init__(self, lr, fname):
1026 def __init__(self, lr, fname):
1027 self.text = None
1027 self.text = None
1028 self.delta = False
1028 self.delta = False
1029 self.hunk = ['GIT binary patch\n']
1029 self.hunk = ['GIT binary patch\n']
1030 self._fname = fname
1030 self._fname = fname
1031 self._read(lr)
1031 self._read(lr)
1032
1032
1033 def complete(self):
1033 def complete(self):
1034 return self.text is not None
1034 return self.text is not None
1035
1035
1036 def new(self, lines):
1036 def new(self, lines):
1037 if self.delta:
1037 if self.delta:
1038 return [applybindelta(self.text, ''.join(lines))]
1038 return [applybindelta(self.text, ''.join(lines))]
1039 return [self.text]
1039 return [self.text]
1040
1040
1041 def _read(self, lr):
1041 def _read(self, lr):
1042 def getline(lr, hunk):
1042 def getline(lr, hunk):
1043 l = lr.readline()
1043 l = lr.readline()
1044 hunk.append(l)
1044 hunk.append(l)
1045 return l.rstrip('\r\n')
1045 return l.rstrip('\r\n')
1046
1046
1047 size = 0
1047 size = 0
1048 while True:
1048 while True:
1049 line = getline(lr, self.hunk)
1049 line = getline(lr, self.hunk)
1050 if not line:
1050 if not line:
1051 raise PatchError(_('could not extract "%s" binary data')
1051 raise PatchError(_('could not extract "%s" binary data')
1052 % self._fname)
1052 % self._fname)
1053 if line.startswith('literal '):
1053 if line.startswith('literal '):
1054 size = int(line[8:].rstrip())
1054 size = int(line[8:].rstrip())
1055 break
1055 break
1056 if line.startswith('delta '):
1056 if line.startswith('delta '):
1057 size = int(line[6:].rstrip())
1057 size = int(line[6:].rstrip())
1058 self.delta = True
1058 self.delta = True
1059 break
1059 break
1060 dec = []
1060 dec = []
1061 line = getline(lr, self.hunk)
1061 line = getline(lr, self.hunk)
1062 while len(line) > 1:
1062 while len(line) > 1:
1063 l = line[0]
1063 l = line[0]
1064 if l <= 'Z' and l >= 'A':
1064 if l <= 'Z' and l >= 'A':
1065 l = ord(l) - ord('A') + 1
1065 l = ord(l) - ord('A') + 1
1066 else:
1066 else:
1067 l = ord(l) - ord('a') + 27
1067 l = ord(l) - ord('a') + 27
1068 try:
1068 try:
1069 dec.append(base85.b85decode(line[1:])[:l])
1069 dec.append(base85.b85decode(line[1:])[:l])
1070 except ValueError, e:
1070 except ValueError, e:
1071 raise PatchError(_('could not decode "%s" binary patch: %s')
1071 raise PatchError(_('could not decode "%s" binary patch: %s')
1072 % (self._fname, str(e)))
1072 % (self._fname, str(e)))
1073 line = getline(lr, self.hunk)
1073 line = getline(lr, self.hunk)
1074 text = zlib.decompress(''.join(dec))
1074 text = zlib.decompress(''.join(dec))
1075 if len(text) != size:
1075 if len(text) != size:
1076 raise PatchError(_('"%s" length is %d bytes, should be %d')
1076 raise PatchError(_('"%s" length is %d bytes, should be %d')
1077 % (self._fname, len(text), size))
1077 % (self._fname, len(text), size))
1078 self.text = text
1078 self.text = text
1079
1079
1080 def parsefilename(str):
1080 def parsefilename(str):
1081 # --- filename \t|space stuff
1081 # --- filename \t|space stuff
1082 s = str[4:].rstrip('\r\n')
1082 s = str[4:].rstrip('\r\n')
1083 i = s.find('\t')
1083 i = s.find('\t')
1084 if i < 0:
1084 if i < 0:
1085 i = s.find(' ')
1085 i = s.find(' ')
1086 if i < 0:
1086 if i < 0:
1087 return s
1087 return s
1088 return s[:i]
1088 return s[:i]
1089
1089
1090 def pathtransform(path, strip):
1090 def pathtransform(path, strip):
1091 '''turn a path from a patch into a path suitable for the repository
1092
1093 Returns (stripped components, path in repository).
1094
1095 >>> pathtransform('a/b/c', 0)
1096 ('', 'a/b/c')
1097 >>> pathtransform(' a/b/c ', 0)
1098 ('', ' a/b/c')
1099 >>> pathtransform(' a/b/c ', 2)
1100 ('a/b/', 'c')
1101 >>> pathtransform(' a//b/c ', 2)
1102 ('a//b/', 'c')
1103 >>> pathtransform('a/b/c', 3)
1104 Traceback (most recent call last):
1105 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1106 '''
1091 pathlen = len(path)
1107 pathlen = len(path)
1092 i = 0
1108 i = 0
1093 if strip == 0:
1109 if strip == 0:
1094 return '', path.rstrip()
1110 return '', path.rstrip()
1095 count = strip
1111 count = strip
1096 while count > 0:
1112 while count > 0:
1097 i = path.find('/', i)
1113 i = path.find('/', i)
1098 if i == -1:
1114 if i == -1:
1099 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1115 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1100 (count, strip, path))
1116 (count, strip, path))
1101 i += 1
1117 i += 1
1102 # consume '//' in the path
1118 # consume '//' in the path
1103 while i < pathlen - 1 and path[i] == '/':
1119 while i < pathlen - 1 and path[i] == '/':
1104 i += 1
1120 i += 1
1105 count -= 1
1121 count -= 1
1106 return path[:i].lstrip(), path[i:].rstrip()
1122 return path[:i].lstrip(), path[i:].rstrip()
1107
1123
1108 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1124 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1109 nulla = afile_orig == "/dev/null"
1125 nulla = afile_orig == "/dev/null"
1110 nullb = bfile_orig == "/dev/null"
1126 nullb = bfile_orig == "/dev/null"
1111 create = nulla and hunk.starta == 0 and hunk.lena == 0
1127 create = nulla and hunk.starta == 0 and hunk.lena == 0
1112 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1128 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1113 abase, afile = pathtransform(afile_orig, strip)
1129 abase, afile = pathtransform(afile_orig, strip)
1114 gooda = not nulla and backend.exists(afile)
1130 gooda = not nulla and backend.exists(afile)
1115 bbase, bfile = pathtransform(bfile_orig, strip)
1131 bbase, bfile = pathtransform(bfile_orig, strip)
1116 if afile == bfile:
1132 if afile == bfile:
1117 goodb = gooda
1133 goodb = gooda
1118 else:
1134 else:
1119 goodb = not nullb and backend.exists(bfile)
1135 goodb = not nullb and backend.exists(bfile)
1120 missing = not goodb and not gooda and not create
1136 missing = not goodb and not gooda and not create
1121
1137
1122 # some diff programs apparently produce patches where the afile is
1138 # some diff programs apparently produce patches where the afile is
1123 # not /dev/null, but afile starts with bfile
1139 # not /dev/null, but afile starts with bfile
1124 abasedir = afile[:afile.rfind('/') + 1]
1140 abasedir = afile[:afile.rfind('/') + 1]
1125 bbasedir = bfile[:bfile.rfind('/') + 1]
1141 bbasedir = bfile[:bfile.rfind('/') + 1]
1126 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1142 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1127 and hunk.starta == 0 and hunk.lena == 0):
1143 and hunk.starta == 0 and hunk.lena == 0):
1128 create = True
1144 create = True
1129 missing = False
1145 missing = False
1130
1146
1131 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1147 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1132 # diff is between a file and its backup. In this case, the original
1148 # diff is between a file and its backup. In this case, the original
1133 # file should be patched (see original mpatch code).
1149 # file should be patched (see original mpatch code).
1134 isbackup = (abase == bbase and bfile.startswith(afile))
1150 isbackup = (abase == bbase and bfile.startswith(afile))
1135 fname = None
1151 fname = None
1136 if not missing:
1152 if not missing:
1137 if gooda and goodb:
1153 if gooda and goodb:
1138 fname = isbackup and afile or bfile
1154 fname = isbackup and afile or bfile
1139 elif gooda:
1155 elif gooda:
1140 fname = afile
1156 fname = afile
1141
1157
1142 if not fname:
1158 if not fname:
1143 if not nullb:
1159 if not nullb:
1144 fname = isbackup and afile or bfile
1160 fname = isbackup and afile or bfile
1145 elif not nulla:
1161 elif not nulla:
1146 fname = afile
1162 fname = afile
1147 else:
1163 else:
1148 raise PatchError(_("undefined source and destination files"))
1164 raise PatchError(_("undefined source and destination files"))
1149
1165
1150 gp = patchmeta(fname)
1166 gp = patchmeta(fname)
1151 if create:
1167 if create:
1152 gp.op = 'ADD'
1168 gp.op = 'ADD'
1153 elif remove:
1169 elif remove:
1154 gp.op = 'DELETE'
1170 gp.op = 'DELETE'
1155 return gp
1171 return gp
1156
1172
1157 def scangitpatch(lr, firstline):
1173 def scangitpatch(lr, firstline):
1158 """
1174 """
1159 Git patches can emit:
1175 Git patches can emit:
1160 - rename a to b
1176 - rename a to b
1161 - change b
1177 - change b
1162 - copy a to c
1178 - copy a to c
1163 - change c
1179 - change c
1164
1180
1165 We cannot apply this sequence as-is, the renamed 'a' could not be
1181 We cannot apply this sequence as-is, the renamed 'a' could not be
1166 found for it would have been renamed already. And we cannot copy
1182 found for it would have been renamed already. And we cannot copy
1167 from 'b' instead because 'b' would have been changed already. So
1183 from 'b' instead because 'b' would have been changed already. So
1168 we scan the git patch for copy and rename commands so we can
1184 we scan the git patch for copy and rename commands so we can
1169 perform the copies ahead of time.
1185 perform the copies ahead of time.
1170 """
1186 """
1171 pos = 0
1187 pos = 0
1172 try:
1188 try:
1173 pos = lr.fp.tell()
1189 pos = lr.fp.tell()
1174 fp = lr.fp
1190 fp = lr.fp
1175 except IOError:
1191 except IOError:
1176 fp = cStringIO.StringIO(lr.fp.read())
1192 fp = cStringIO.StringIO(lr.fp.read())
1177 gitlr = linereader(fp)
1193 gitlr = linereader(fp)
1178 gitlr.push(firstline)
1194 gitlr.push(firstline)
1179 gitpatches = readgitpatch(gitlr)
1195 gitpatches = readgitpatch(gitlr)
1180 fp.seek(pos)
1196 fp.seek(pos)
1181 return gitpatches
1197 return gitpatches
1182
1198
1183 def iterhunks(fp):
1199 def iterhunks(fp):
1184 """Read a patch and yield the following events:
1200 """Read a patch and yield the following events:
1185 - ("file", afile, bfile, firsthunk): select a new target file.
1201 - ("file", afile, bfile, firsthunk): select a new target file.
1186 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1202 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1187 "file" event.
1203 "file" event.
1188 - ("git", gitchanges): current diff is in git format, gitchanges
1204 - ("git", gitchanges): current diff is in git format, gitchanges
1189 maps filenames to gitpatch records. Unique event.
1205 maps filenames to gitpatch records. Unique event.
1190 """
1206 """
1191 afile = ""
1207 afile = ""
1192 bfile = ""
1208 bfile = ""
1193 state = None
1209 state = None
1194 hunknum = 0
1210 hunknum = 0
1195 emitfile = newfile = False
1211 emitfile = newfile = False
1196 gitpatches = None
1212 gitpatches = None
1197
1213
1198 # our states
1214 # our states
1199 BFILE = 1
1215 BFILE = 1
1200 context = None
1216 context = None
1201 lr = linereader(fp)
1217 lr = linereader(fp)
1202
1218
1203 while True:
1219 while True:
1204 x = lr.readline()
1220 x = lr.readline()
1205 if not x:
1221 if not x:
1206 break
1222 break
1207 if state == BFILE and (
1223 if state == BFILE and (
1208 (not context and x[0] == '@')
1224 (not context and x[0] == '@')
1209 or (context is not False and x.startswith('***************'))
1225 or (context is not False and x.startswith('***************'))
1210 or x.startswith('GIT binary patch')):
1226 or x.startswith('GIT binary patch')):
1211 gp = None
1227 gp = None
1212 if (gitpatches and
1228 if (gitpatches and
1213 gitpatches[-1].ispatching(afile, bfile)):
1229 gitpatches[-1].ispatching(afile, bfile)):
1214 gp = gitpatches.pop()
1230 gp = gitpatches.pop()
1215 if x.startswith('GIT binary patch'):
1231 if x.startswith('GIT binary patch'):
1216 h = binhunk(lr, gp.path)
1232 h = binhunk(lr, gp.path)
1217 else:
1233 else:
1218 if context is None and x.startswith('***************'):
1234 if context is None and x.startswith('***************'):
1219 context = True
1235 context = True
1220 h = hunk(x, hunknum + 1, lr, context)
1236 h = hunk(x, hunknum + 1, lr, context)
1221 hunknum += 1
1237 hunknum += 1
1222 if emitfile:
1238 if emitfile:
1223 emitfile = False
1239 emitfile = False
1224 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1240 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1225 yield 'hunk', h
1241 yield 'hunk', h
1226 elif x.startswith('diff --git a/'):
1242 elif x.startswith('diff --git a/'):
1227 m = gitre.match(x.rstrip(' \r\n'))
1243 m = gitre.match(x.rstrip(' \r\n'))
1228 if not m:
1244 if not m:
1229 continue
1245 continue
1230 if gitpatches is None:
1246 if gitpatches is None:
1231 # scan whole input for git metadata
1247 # scan whole input for git metadata
1232 gitpatches = scangitpatch(lr, x)
1248 gitpatches = scangitpatch(lr, x)
1233 yield 'git', [g.copy() for g in gitpatches
1249 yield 'git', [g.copy() for g in gitpatches
1234 if g.op in ('COPY', 'RENAME')]
1250 if g.op in ('COPY', 'RENAME')]
1235 gitpatches.reverse()
1251 gitpatches.reverse()
1236 afile = 'a/' + m.group(1)
1252 afile = 'a/' + m.group(1)
1237 bfile = 'b/' + m.group(2)
1253 bfile = 'b/' + m.group(2)
1238 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1254 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1239 gp = gitpatches.pop()
1255 gp = gitpatches.pop()
1240 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1256 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1241 if not gitpatches:
1257 if not gitpatches:
1242 raise PatchError(_('failed to synchronize metadata for "%s"')
1258 raise PatchError(_('failed to synchronize metadata for "%s"')
1243 % afile[2:])
1259 % afile[2:])
1244 gp = gitpatches[-1]
1260 gp = gitpatches[-1]
1245 newfile = True
1261 newfile = True
1246 elif x.startswith('---'):
1262 elif x.startswith('---'):
1247 # check for a unified diff
1263 # check for a unified diff
1248 l2 = lr.readline()
1264 l2 = lr.readline()
1249 if not l2.startswith('+++'):
1265 if not l2.startswith('+++'):
1250 lr.push(l2)
1266 lr.push(l2)
1251 continue
1267 continue
1252 newfile = True
1268 newfile = True
1253 context = False
1269 context = False
1254 afile = parsefilename(x)
1270 afile = parsefilename(x)
1255 bfile = parsefilename(l2)
1271 bfile = parsefilename(l2)
1256 elif x.startswith('***'):
1272 elif x.startswith('***'):
1257 # check for a context diff
1273 # check for a context diff
1258 l2 = lr.readline()
1274 l2 = lr.readline()
1259 if not l2.startswith('---'):
1275 if not l2.startswith('---'):
1260 lr.push(l2)
1276 lr.push(l2)
1261 continue
1277 continue
1262 l3 = lr.readline()
1278 l3 = lr.readline()
1263 lr.push(l3)
1279 lr.push(l3)
1264 if not l3.startswith("***************"):
1280 if not l3.startswith("***************"):
1265 lr.push(l2)
1281 lr.push(l2)
1266 continue
1282 continue
1267 newfile = True
1283 newfile = True
1268 context = True
1284 context = True
1269 afile = parsefilename(x)
1285 afile = parsefilename(x)
1270 bfile = parsefilename(l2)
1286 bfile = parsefilename(l2)
1271
1287
1272 if newfile:
1288 if newfile:
1273 newfile = False
1289 newfile = False
1274 emitfile = True
1290 emitfile = True
1275 state = BFILE
1291 state = BFILE
1276 hunknum = 0
1292 hunknum = 0
1277
1293
1278 while gitpatches:
1294 while gitpatches:
1279 gp = gitpatches.pop()
1295 gp = gitpatches.pop()
1280 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1296 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1281
1297
1282 def applybindelta(binchunk, data):
1298 def applybindelta(binchunk, data):
1283 """Apply a binary delta hunk
1299 """Apply a binary delta hunk
1284 The algorithm used is the algorithm from git's patch-delta.c
1300 The algorithm used is the algorithm from git's patch-delta.c
1285 """
1301 """
1286 def deltahead(binchunk):
1302 def deltahead(binchunk):
1287 i = 0
1303 i = 0
1288 for c in binchunk:
1304 for c in binchunk:
1289 i += 1
1305 i += 1
1290 if not (ord(c) & 0x80):
1306 if not (ord(c) & 0x80):
1291 return i
1307 return i
1292 return i
1308 return i
1293 out = ""
1309 out = ""
1294 s = deltahead(binchunk)
1310 s = deltahead(binchunk)
1295 binchunk = binchunk[s:]
1311 binchunk = binchunk[s:]
1296 s = deltahead(binchunk)
1312 s = deltahead(binchunk)
1297 binchunk = binchunk[s:]
1313 binchunk = binchunk[s:]
1298 i = 0
1314 i = 0
1299 while i < len(binchunk):
1315 while i < len(binchunk):
1300 cmd = ord(binchunk[i])
1316 cmd = ord(binchunk[i])
1301 i += 1
1317 i += 1
1302 if (cmd & 0x80):
1318 if (cmd & 0x80):
1303 offset = 0
1319 offset = 0
1304 size = 0
1320 size = 0
1305 if (cmd & 0x01):
1321 if (cmd & 0x01):
1306 offset = ord(binchunk[i])
1322 offset = ord(binchunk[i])
1307 i += 1
1323 i += 1
1308 if (cmd & 0x02):
1324 if (cmd & 0x02):
1309 offset |= ord(binchunk[i]) << 8
1325 offset |= ord(binchunk[i]) << 8
1310 i += 1
1326 i += 1
1311 if (cmd & 0x04):
1327 if (cmd & 0x04):
1312 offset |= ord(binchunk[i]) << 16
1328 offset |= ord(binchunk[i]) << 16
1313 i += 1
1329 i += 1
1314 if (cmd & 0x08):
1330 if (cmd & 0x08):
1315 offset |= ord(binchunk[i]) << 24
1331 offset |= ord(binchunk[i]) << 24
1316 i += 1
1332 i += 1
1317 if (cmd & 0x10):
1333 if (cmd & 0x10):
1318 size = ord(binchunk[i])
1334 size = ord(binchunk[i])
1319 i += 1
1335 i += 1
1320 if (cmd & 0x20):
1336 if (cmd & 0x20):
1321 size |= ord(binchunk[i]) << 8
1337 size |= ord(binchunk[i]) << 8
1322 i += 1
1338 i += 1
1323 if (cmd & 0x40):
1339 if (cmd & 0x40):
1324 size |= ord(binchunk[i]) << 16
1340 size |= ord(binchunk[i]) << 16
1325 i += 1
1341 i += 1
1326 if size == 0:
1342 if size == 0:
1327 size = 0x10000
1343 size = 0x10000
1328 offset_end = offset + size
1344 offset_end = offset + size
1329 out += data[offset:offset_end]
1345 out += data[offset:offset_end]
1330 elif cmd != 0:
1346 elif cmd != 0:
1331 offset_end = i + cmd
1347 offset_end = i + cmd
1332 out += binchunk[i:offset_end]
1348 out += binchunk[i:offset_end]
1333 i += cmd
1349 i += cmd
1334 else:
1350 else:
1335 raise PatchError(_('unexpected delta opcode 0'))
1351 raise PatchError(_('unexpected delta opcode 0'))
1336 return out
1352 return out
1337
1353
1338 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1354 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1339 """Reads a patch from fp and tries to apply it.
1355 """Reads a patch from fp and tries to apply it.
1340
1356
1341 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1357 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1342 there was any fuzz.
1358 there was any fuzz.
1343
1359
1344 If 'eolmode' is 'strict', the patch content and patched file are
1360 If 'eolmode' is 'strict', the patch content and patched file are
1345 read in binary mode. Otherwise, line endings are ignored when
1361 read in binary mode. Otherwise, line endings are ignored when
1346 patching then normalized according to 'eolmode'.
1362 patching then normalized according to 'eolmode'.
1347 """
1363 """
1348 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1364 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1349 eolmode=eolmode)
1365 eolmode=eolmode)
1350
1366
1351 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1367 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1352 eolmode='strict'):
1368 eolmode='strict'):
1353
1369
1354 def pstrip(p):
1370 def pstrip(p):
1355 return pathtransform(p, strip - 1)[1]
1371 return pathtransform(p, strip - 1)[1]
1356
1372
1357 rejects = 0
1373 rejects = 0
1358 err = 0
1374 err = 0
1359 current_file = None
1375 current_file = None
1360
1376
1361 for state, values in iterhunks(fp):
1377 for state, values in iterhunks(fp):
1362 if state == 'hunk':
1378 if state == 'hunk':
1363 if not current_file:
1379 if not current_file:
1364 continue
1380 continue
1365 ret = current_file.apply(values)
1381 ret = current_file.apply(values)
1366 if ret > 0:
1382 if ret > 0:
1367 err = 1
1383 err = 1
1368 elif state == 'file':
1384 elif state == 'file':
1369 if current_file:
1385 if current_file:
1370 rejects += current_file.close()
1386 rejects += current_file.close()
1371 current_file = None
1387 current_file = None
1372 afile, bfile, first_hunk, gp = values
1388 afile, bfile, first_hunk, gp = values
1373 if gp:
1389 if gp:
1374 gp.path = pstrip(gp.path)
1390 gp.path = pstrip(gp.path)
1375 if gp.oldpath:
1391 if gp.oldpath:
1376 gp.oldpath = pstrip(gp.oldpath)
1392 gp.oldpath = pstrip(gp.oldpath)
1377 else:
1393 else:
1378 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1394 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1379 if gp.op == 'RENAME':
1395 if gp.op == 'RENAME':
1380 backend.unlink(gp.oldpath)
1396 backend.unlink(gp.oldpath)
1381 if not first_hunk:
1397 if not first_hunk:
1382 if gp.op == 'DELETE':
1398 if gp.op == 'DELETE':
1383 backend.unlink(gp.path)
1399 backend.unlink(gp.path)
1384 continue
1400 continue
1385 data, mode = None, None
1401 data, mode = None, None
1386 if gp.op in ('RENAME', 'COPY'):
1402 if gp.op in ('RENAME', 'COPY'):
1387 data, mode = store.getfile(gp.oldpath)[:2]
1403 data, mode = store.getfile(gp.oldpath)[:2]
1388 # FIXME: failing getfile has never been handled here
1404 # FIXME: failing getfile has never been handled here
1389 assert data is not None
1405 assert data is not None
1390 if gp.mode:
1406 if gp.mode:
1391 mode = gp.mode
1407 mode = gp.mode
1392 if gp.op == 'ADD':
1408 if gp.op == 'ADD':
1393 # Added files without content have no hunk and
1409 # Added files without content have no hunk and
1394 # must be created
1410 # must be created
1395 data = ''
1411 data = ''
1396 if data or mode:
1412 if data or mode:
1397 if (gp.op in ('ADD', 'RENAME', 'COPY')
1413 if (gp.op in ('ADD', 'RENAME', 'COPY')
1398 and backend.exists(gp.path)):
1414 and backend.exists(gp.path)):
1399 raise PatchError(_("cannot create %s: destination "
1415 raise PatchError(_("cannot create %s: destination "
1400 "already exists") % gp.path)
1416 "already exists") % gp.path)
1401 backend.setfile(gp.path, data, mode, gp.oldpath)
1417 backend.setfile(gp.path, data, mode, gp.oldpath)
1402 continue
1418 continue
1403 try:
1419 try:
1404 current_file = patcher(ui, gp, backend, store,
1420 current_file = patcher(ui, gp, backend, store,
1405 eolmode=eolmode)
1421 eolmode=eolmode)
1406 except PatchError, inst:
1422 except PatchError, inst:
1407 ui.warn(str(inst) + '\n')
1423 ui.warn(str(inst) + '\n')
1408 current_file = None
1424 current_file = None
1409 rejects += 1
1425 rejects += 1
1410 continue
1426 continue
1411 elif state == 'git':
1427 elif state == 'git':
1412 for gp in values:
1428 for gp in values:
1413 path = pstrip(gp.oldpath)
1429 path = pstrip(gp.oldpath)
1414 data, mode = backend.getfile(path)
1430 data, mode = backend.getfile(path)
1415 if data is None:
1431 if data is None:
1416 # The error ignored here will trigger a getfile()
1432 # The error ignored here will trigger a getfile()
1417 # error in a place more appropriate for error
1433 # error in a place more appropriate for error
1418 # handling, and will not interrupt the patching
1434 # handling, and will not interrupt the patching
1419 # process.
1435 # process.
1420 pass
1436 pass
1421 else:
1437 else:
1422 store.setfile(path, data, mode)
1438 store.setfile(path, data, mode)
1423 else:
1439 else:
1424 raise util.Abort(_('unsupported parser state: %s') % state)
1440 raise util.Abort(_('unsupported parser state: %s') % state)
1425
1441
1426 if current_file:
1442 if current_file:
1427 rejects += current_file.close()
1443 rejects += current_file.close()
1428
1444
1429 if rejects:
1445 if rejects:
1430 return -1
1446 return -1
1431 return err
1447 return err
1432
1448
1433 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1449 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1434 similarity):
1450 similarity):
1435 """use <patcher> to apply <patchname> to the working directory.
1451 """use <patcher> to apply <patchname> to the working directory.
1436 returns whether patch was applied with fuzz factor."""
1452 returns whether patch was applied with fuzz factor."""
1437
1453
1438 fuzz = False
1454 fuzz = False
1439 args = []
1455 args = []
1440 cwd = repo.root
1456 cwd = repo.root
1441 if cwd:
1457 if cwd:
1442 args.append('-d %s' % util.shellquote(cwd))
1458 args.append('-d %s' % util.shellquote(cwd))
1443 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1459 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1444 util.shellquote(patchname)))
1460 util.shellquote(patchname)))
1445 try:
1461 try:
1446 for line in fp:
1462 for line in fp:
1447 line = line.rstrip()
1463 line = line.rstrip()
1448 ui.note(line + '\n')
1464 ui.note(line + '\n')
1449 if line.startswith('patching file '):
1465 if line.startswith('patching file '):
1450 pf = util.parsepatchoutput(line)
1466 pf = util.parsepatchoutput(line)
1451 printed_file = False
1467 printed_file = False
1452 files.add(pf)
1468 files.add(pf)
1453 elif line.find('with fuzz') >= 0:
1469 elif line.find('with fuzz') >= 0:
1454 fuzz = True
1470 fuzz = True
1455 if not printed_file:
1471 if not printed_file:
1456 ui.warn(pf + '\n')
1472 ui.warn(pf + '\n')
1457 printed_file = True
1473 printed_file = True
1458 ui.warn(line + '\n')
1474 ui.warn(line + '\n')
1459 elif line.find('saving rejects to file') >= 0:
1475 elif line.find('saving rejects to file') >= 0:
1460 ui.warn(line + '\n')
1476 ui.warn(line + '\n')
1461 elif line.find('FAILED') >= 0:
1477 elif line.find('FAILED') >= 0:
1462 if not printed_file:
1478 if not printed_file:
1463 ui.warn(pf + '\n')
1479 ui.warn(pf + '\n')
1464 printed_file = True
1480 printed_file = True
1465 ui.warn(line + '\n')
1481 ui.warn(line + '\n')
1466 finally:
1482 finally:
1467 if files:
1483 if files:
1468 scmutil.marktouched(repo, files, similarity)
1484 scmutil.marktouched(repo, files, similarity)
1469 code = fp.close()
1485 code = fp.close()
1470 if code:
1486 if code:
1471 raise PatchError(_("patch command failed: %s") %
1487 raise PatchError(_("patch command failed: %s") %
1472 util.explainexit(code)[0])
1488 util.explainexit(code)[0])
1473 return fuzz
1489 return fuzz
1474
1490
1475 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1491 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1476 if files is None:
1492 if files is None:
1477 files = set()
1493 files = set()
1478 if eolmode is None:
1494 if eolmode is None:
1479 eolmode = ui.config('patch', 'eol', 'strict')
1495 eolmode = ui.config('patch', 'eol', 'strict')
1480 if eolmode.lower() not in eolmodes:
1496 if eolmode.lower() not in eolmodes:
1481 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1497 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1482 eolmode = eolmode.lower()
1498 eolmode = eolmode.lower()
1483
1499
1484 store = filestore()
1500 store = filestore()
1485 try:
1501 try:
1486 fp = open(patchobj, 'rb')
1502 fp = open(patchobj, 'rb')
1487 except TypeError:
1503 except TypeError:
1488 fp = patchobj
1504 fp = patchobj
1489 try:
1505 try:
1490 ret = applydiff(ui, fp, backend, store, strip=strip,
1506 ret = applydiff(ui, fp, backend, store, strip=strip,
1491 eolmode=eolmode)
1507 eolmode=eolmode)
1492 finally:
1508 finally:
1493 if fp != patchobj:
1509 if fp != patchobj:
1494 fp.close()
1510 fp.close()
1495 files.update(backend.close())
1511 files.update(backend.close())
1496 store.close()
1512 store.close()
1497 if ret < 0:
1513 if ret < 0:
1498 raise PatchError(_('patch failed to apply'))
1514 raise PatchError(_('patch failed to apply'))
1499 return ret > 0
1515 return ret > 0
1500
1516
1501 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1517 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1502 similarity=0):
1518 similarity=0):
1503 """use builtin patch to apply <patchobj> to the working directory.
1519 """use builtin patch to apply <patchobj> to the working directory.
1504 returns whether patch was applied with fuzz factor."""
1520 returns whether patch was applied with fuzz factor."""
1505 backend = workingbackend(ui, repo, similarity)
1521 backend = workingbackend(ui, repo, similarity)
1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1522 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1507
1523
1508 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1524 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1509 eolmode='strict'):
1525 eolmode='strict'):
1510 backend = repobackend(ui, repo, ctx, store)
1526 backend = repobackend(ui, repo, ctx, store)
1511 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1527 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1512
1528
1513 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1529 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1514 similarity=0):
1530 similarity=0):
1515 """Apply <patchname> to the working directory.
1531 """Apply <patchname> to the working directory.
1516
1532
1517 'eolmode' specifies how end of lines should be handled. It can be:
1533 'eolmode' specifies how end of lines should be handled. It can be:
1518 - 'strict': inputs are read in binary mode, EOLs are preserved
1534 - 'strict': inputs are read in binary mode, EOLs are preserved
1519 - 'crlf': EOLs are ignored when patching and reset to CRLF
1535 - 'crlf': EOLs are ignored when patching and reset to CRLF
1520 - 'lf': EOLs are ignored when patching and reset to LF
1536 - 'lf': EOLs are ignored when patching and reset to LF
1521 - None: get it from user settings, default to 'strict'
1537 - None: get it from user settings, default to 'strict'
1522 'eolmode' is ignored when using an external patcher program.
1538 'eolmode' is ignored when using an external patcher program.
1523
1539
1524 Returns whether patch was applied with fuzz factor.
1540 Returns whether patch was applied with fuzz factor.
1525 """
1541 """
1526 patcher = ui.config('ui', 'patch')
1542 patcher = ui.config('ui', 'patch')
1527 if files is None:
1543 if files is None:
1528 files = set()
1544 files = set()
1529 if patcher:
1545 if patcher:
1530 return _externalpatch(ui, repo, patcher, patchname, strip,
1546 return _externalpatch(ui, repo, patcher, patchname, strip,
1531 files, similarity)
1547 files, similarity)
1532 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1548 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1533 similarity)
1549 similarity)
1534
1550
1535 def changedfiles(ui, repo, patchpath, strip=1):
1551 def changedfiles(ui, repo, patchpath, strip=1):
1536 backend = fsbackend(ui, repo.root)
1552 backend = fsbackend(ui, repo.root)
1537 fp = open(patchpath, 'rb')
1553 fp = open(patchpath, 'rb')
1538 try:
1554 try:
1539 changed = set()
1555 changed = set()
1540 for state, values in iterhunks(fp):
1556 for state, values in iterhunks(fp):
1541 if state == 'file':
1557 if state == 'file':
1542 afile, bfile, first_hunk, gp = values
1558 afile, bfile, first_hunk, gp = values
1543 if gp:
1559 if gp:
1544 gp.path = pathtransform(gp.path, strip - 1)[1]
1560 gp.path = pathtransform(gp.path, strip - 1)[1]
1545 if gp.oldpath:
1561 if gp.oldpath:
1546 gp.oldpath = pathtransform(gp.oldpath, strip - 1)[1]
1562 gp.oldpath = pathtransform(gp.oldpath, strip - 1)[1]
1547 else:
1563 else:
1548 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1564 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1549 changed.add(gp.path)
1565 changed.add(gp.path)
1550 if gp.op == 'RENAME':
1566 if gp.op == 'RENAME':
1551 changed.add(gp.oldpath)
1567 changed.add(gp.oldpath)
1552 elif state not in ('hunk', 'git'):
1568 elif state not in ('hunk', 'git'):
1553 raise util.Abort(_('unsupported parser state: %s') % state)
1569 raise util.Abort(_('unsupported parser state: %s') % state)
1554 return changed
1570 return changed
1555 finally:
1571 finally:
1556 fp.close()
1572 fp.close()
1557
1573
1558 class GitDiffRequired(Exception):
1574 class GitDiffRequired(Exception):
1559 pass
1575 pass
1560
1576
1561 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1577 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1562 '''return diffopts with all features supported and parsed'''
1578 '''return diffopts with all features supported and parsed'''
1563 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1579 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1564 git=True, whitespace=True, formatchanging=True)
1580 git=True, whitespace=True, formatchanging=True)
1565
1581
1566 diffopts = diffallopts
1582 diffopts = diffallopts
1567
1583
1568 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1584 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1569 whitespace=False, formatchanging=False):
1585 whitespace=False, formatchanging=False):
1570 '''return diffopts with only opted-in features parsed
1586 '''return diffopts with only opted-in features parsed
1571
1587
1572 Features:
1588 Features:
1573 - git: git-style diffs
1589 - git: git-style diffs
1574 - whitespace: whitespace options like ignoreblanklines and ignorews
1590 - whitespace: whitespace options like ignoreblanklines and ignorews
1575 - formatchanging: options that will likely break or cause correctness issues
1591 - formatchanging: options that will likely break or cause correctness issues
1576 with most diff parsers
1592 with most diff parsers
1577 '''
1593 '''
1578 def get(key, name=None, getter=ui.configbool, forceplain=None):
1594 def get(key, name=None, getter=ui.configbool, forceplain=None):
1579 if opts:
1595 if opts:
1580 v = opts.get(key)
1596 v = opts.get(key)
1581 if v:
1597 if v:
1582 return v
1598 return v
1583 if forceplain is not None and ui.plain():
1599 if forceplain is not None and ui.plain():
1584 return forceplain
1600 return forceplain
1585 return getter(section, name or key, None, untrusted=untrusted)
1601 return getter(section, name or key, None, untrusted=untrusted)
1586
1602
1587 # core options, expected to be understood by every diff parser
1603 # core options, expected to be understood by every diff parser
1588 buildopts = {
1604 buildopts = {
1589 'nodates': get('nodates'),
1605 'nodates': get('nodates'),
1590 'showfunc': get('show_function', 'showfunc'),
1606 'showfunc': get('show_function', 'showfunc'),
1591 'context': get('unified', getter=ui.config),
1607 'context': get('unified', getter=ui.config),
1592 }
1608 }
1593
1609
1594 if git:
1610 if git:
1595 buildopts['git'] = get('git')
1611 buildopts['git'] = get('git')
1596 if whitespace:
1612 if whitespace:
1597 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1613 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1598 buildopts['ignorewsamount'] = get('ignore_space_change',
1614 buildopts['ignorewsamount'] = get('ignore_space_change',
1599 'ignorewsamount')
1615 'ignorewsamount')
1600 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1616 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1601 'ignoreblanklines')
1617 'ignoreblanklines')
1602 if formatchanging:
1618 if formatchanging:
1603 buildopts['text'] = opts and opts.get('text')
1619 buildopts['text'] = opts and opts.get('text')
1604 buildopts['nobinary'] = get('nobinary')
1620 buildopts['nobinary'] = get('nobinary')
1605 buildopts['noprefix'] = get('noprefix', forceplain=False)
1621 buildopts['noprefix'] = get('noprefix', forceplain=False)
1606
1622
1607 return mdiff.diffopts(**buildopts)
1623 return mdiff.diffopts(**buildopts)
1608
1624
1609 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1625 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1610 losedatafn=None, prefix=''):
1626 losedatafn=None, prefix=''):
1611 '''yields diff of changes to files between two nodes, or node and
1627 '''yields diff of changes to files between two nodes, or node and
1612 working directory.
1628 working directory.
1613
1629
1614 if node1 is None, use first dirstate parent instead.
1630 if node1 is None, use first dirstate parent instead.
1615 if node2 is None, compare node1 with working directory.
1631 if node2 is None, compare node1 with working directory.
1616
1632
1617 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1633 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1618 every time some change cannot be represented with the current
1634 every time some change cannot be represented with the current
1619 patch format. Return False to upgrade to git patch format, True to
1635 patch format. Return False to upgrade to git patch format, True to
1620 accept the loss or raise an exception to abort the diff. It is
1636 accept the loss or raise an exception to abort the diff. It is
1621 called with the name of current file being diffed as 'fn'. If set
1637 called with the name of current file being diffed as 'fn'. If set
1622 to None, patches will always be upgraded to git format when
1638 to None, patches will always be upgraded to git format when
1623 necessary.
1639 necessary.
1624
1640
1625 prefix is a filename prefix that is prepended to all filenames on
1641 prefix is a filename prefix that is prepended to all filenames on
1626 display (used for subrepos).
1642 display (used for subrepos).
1627 '''
1643 '''
1628
1644
1629 if opts is None:
1645 if opts is None:
1630 opts = mdiff.defaultopts
1646 opts = mdiff.defaultopts
1631
1647
1632 if not node1 and not node2:
1648 if not node1 and not node2:
1633 node1 = repo.dirstate.p1()
1649 node1 = repo.dirstate.p1()
1634
1650
1635 def lrugetfilectx():
1651 def lrugetfilectx():
1636 cache = {}
1652 cache = {}
1637 order = util.deque()
1653 order = util.deque()
1638 def getfilectx(f, ctx):
1654 def getfilectx(f, ctx):
1639 fctx = ctx.filectx(f, filelog=cache.get(f))
1655 fctx = ctx.filectx(f, filelog=cache.get(f))
1640 if f not in cache:
1656 if f not in cache:
1641 if len(cache) > 20:
1657 if len(cache) > 20:
1642 del cache[order.popleft()]
1658 del cache[order.popleft()]
1643 cache[f] = fctx.filelog()
1659 cache[f] = fctx.filelog()
1644 else:
1660 else:
1645 order.remove(f)
1661 order.remove(f)
1646 order.append(f)
1662 order.append(f)
1647 return fctx
1663 return fctx
1648 return getfilectx
1664 return getfilectx
1649 getfilectx = lrugetfilectx()
1665 getfilectx = lrugetfilectx()
1650
1666
1651 ctx1 = repo[node1]
1667 ctx1 = repo[node1]
1652 ctx2 = repo[node2]
1668 ctx2 = repo[node2]
1653
1669
1654 if not changes:
1670 if not changes:
1655 changes = repo.status(ctx1, ctx2, match=match)
1671 changes = repo.status(ctx1, ctx2, match=match)
1656 modified, added, removed = changes[:3]
1672 modified, added, removed = changes[:3]
1657
1673
1658 if not modified and not added and not removed:
1674 if not modified and not added and not removed:
1659 return []
1675 return []
1660
1676
1661 hexfunc = repo.ui.debugflag and hex or short
1677 hexfunc = repo.ui.debugflag and hex or short
1662 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1678 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1663
1679
1664 copy = {}
1680 copy = {}
1665 if opts.git or opts.upgrade:
1681 if opts.git or opts.upgrade:
1666 copy = copies.pathcopies(ctx1, ctx2)
1682 copy = copies.pathcopies(ctx1, ctx2)
1667
1683
1668 def difffn(opts, losedata):
1684 def difffn(opts, losedata):
1669 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1685 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1670 copy, getfilectx, opts, losedata, prefix)
1686 copy, getfilectx, opts, losedata, prefix)
1671 if opts.upgrade and not opts.git:
1687 if opts.upgrade and not opts.git:
1672 try:
1688 try:
1673 def losedata(fn):
1689 def losedata(fn):
1674 if not losedatafn or not losedatafn(fn=fn):
1690 if not losedatafn or not losedatafn(fn=fn):
1675 raise GitDiffRequired
1691 raise GitDiffRequired
1676 # Buffer the whole output until we are sure it can be generated
1692 # Buffer the whole output until we are sure it can be generated
1677 return list(difffn(opts.copy(git=False), losedata))
1693 return list(difffn(opts.copy(git=False), losedata))
1678 except GitDiffRequired:
1694 except GitDiffRequired:
1679 return difffn(opts.copy(git=True), None)
1695 return difffn(opts.copy(git=True), None)
1680 else:
1696 else:
1681 return difffn(opts, None)
1697 return difffn(opts, None)
1682
1698
1683 def difflabel(func, *args, **kw):
1699 def difflabel(func, *args, **kw):
1684 '''yields 2-tuples of (output, label) based on the output of func()'''
1700 '''yields 2-tuples of (output, label) based on the output of func()'''
1685 headprefixes = [('diff', 'diff.diffline'),
1701 headprefixes = [('diff', 'diff.diffline'),
1686 ('copy', 'diff.extended'),
1702 ('copy', 'diff.extended'),
1687 ('rename', 'diff.extended'),
1703 ('rename', 'diff.extended'),
1688 ('old', 'diff.extended'),
1704 ('old', 'diff.extended'),
1689 ('new', 'diff.extended'),
1705 ('new', 'diff.extended'),
1690 ('deleted', 'diff.extended'),
1706 ('deleted', 'diff.extended'),
1691 ('---', 'diff.file_a'),
1707 ('---', 'diff.file_a'),
1692 ('+++', 'diff.file_b')]
1708 ('+++', 'diff.file_b')]
1693 textprefixes = [('@', 'diff.hunk'),
1709 textprefixes = [('@', 'diff.hunk'),
1694 ('-', 'diff.deleted'),
1710 ('-', 'diff.deleted'),
1695 ('+', 'diff.inserted')]
1711 ('+', 'diff.inserted')]
1696 head = False
1712 head = False
1697 for chunk in func(*args, **kw):
1713 for chunk in func(*args, **kw):
1698 lines = chunk.split('\n')
1714 lines = chunk.split('\n')
1699 for i, line in enumerate(lines):
1715 for i, line in enumerate(lines):
1700 if i != 0:
1716 if i != 0:
1701 yield ('\n', '')
1717 yield ('\n', '')
1702 if head:
1718 if head:
1703 if line.startswith('@'):
1719 if line.startswith('@'):
1704 head = False
1720 head = False
1705 else:
1721 else:
1706 if line and line[0] not in ' +-@\\':
1722 if line and line[0] not in ' +-@\\':
1707 head = True
1723 head = True
1708 stripline = line
1724 stripline = line
1709 diffline = False
1725 diffline = False
1710 if not head and line and line[0] in '+-':
1726 if not head and line and line[0] in '+-':
1711 # highlight tabs and trailing whitespace, but only in
1727 # highlight tabs and trailing whitespace, but only in
1712 # changed lines
1728 # changed lines
1713 stripline = line.rstrip()
1729 stripline = line.rstrip()
1714 diffline = True
1730 diffline = True
1715
1731
1716 prefixes = textprefixes
1732 prefixes = textprefixes
1717 if head:
1733 if head:
1718 prefixes = headprefixes
1734 prefixes = headprefixes
1719 for prefix, label in prefixes:
1735 for prefix, label in prefixes:
1720 if stripline.startswith(prefix):
1736 if stripline.startswith(prefix):
1721 if diffline:
1737 if diffline:
1722 for token in tabsplitter.findall(stripline):
1738 for token in tabsplitter.findall(stripline):
1723 if '\t' == token[0]:
1739 if '\t' == token[0]:
1724 yield (token, 'diff.tab')
1740 yield (token, 'diff.tab')
1725 else:
1741 else:
1726 yield (token, label)
1742 yield (token, label)
1727 else:
1743 else:
1728 yield (stripline, label)
1744 yield (stripline, label)
1729 break
1745 break
1730 else:
1746 else:
1731 yield (line, '')
1747 yield (line, '')
1732 if line != stripline:
1748 if line != stripline:
1733 yield (line[len(stripline):], 'diff.trailingwhitespace')
1749 yield (line[len(stripline):], 'diff.trailingwhitespace')
1734
1750
1735 def diffui(*args, **kw):
1751 def diffui(*args, **kw):
1736 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1752 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1737 return difflabel(diff, *args, **kw)
1753 return difflabel(diff, *args, **kw)
1738
1754
1739 def _filepairs(ctx1, modified, added, removed, copy, opts):
1755 def _filepairs(ctx1, modified, added, removed, copy, opts):
1740 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1756 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1741 before and f2 is the the name after. For added files, f1 will be None,
1757 before and f2 is the the name after. For added files, f1 will be None,
1742 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1758 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1743 or 'rename' (the latter two only if opts.git is set).'''
1759 or 'rename' (the latter two only if opts.git is set).'''
1744 gone = set()
1760 gone = set()
1745
1761
1746 copyto = dict([(v, k) for k, v in copy.items()])
1762 copyto = dict([(v, k) for k, v in copy.items()])
1747
1763
1748 addedset, removedset = set(added), set(removed)
1764 addedset, removedset = set(added), set(removed)
1749 # Fix up added, since merged-in additions appear as
1765 # Fix up added, since merged-in additions appear as
1750 # modifications during merges
1766 # modifications during merges
1751 for f in modified:
1767 for f in modified:
1752 if f not in ctx1:
1768 if f not in ctx1:
1753 addedset.add(f)
1769 addedset.add(f)
1754
1770
1755 for f in sorted(modified + added + removed):
1771 for f in sorted(modified + added + removed):
1756 copyop = None
1772 copyop = None
1757 f1, f2 = f, f
1773 f1, f2 = f, f
1758 if f in addedset:
1774 if f in addedset:
1759 f1 = None
1775 f1 = None
1760 if f in copy:
1776 if f in copy:
1761 if opts.git:
1777 if opts.git:
1762 f1 = copy[f]
1778 f1 = copy[f]
1763 if f1 in removedset and f1 not in gone:
1779 if f1 in removedset and f1 not in gone:
1764 copyop = 'rename'
1780 copyop = 'rename'
1765 gone.add(f1)
1781 gone.add(f1)
1766 else:
1782 else:
1767 copyop = 'copy'
1783 copyop = 'copy'
1768 elif f in removedset:
1784 elif f in removedset:
1769 f2 = None
1785 f2 = None
1770 if opts.git:
1786 if opts.git:
1771 # have we already reported a copy above?
1787 # have we already reported a copy above?
1772 if (f in copyto and copyto[f] in addedset
1788 if (f in copyto and copyto[f] in addedset
1773 and copy[copyto[f]] == f):
1789 and copy[copyto[f]] == f):
1774 continue
1790 continue
1775 yield f1, f2, copyop
1791 yield f1, f2, copyop
1776
1792
1777 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1793 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1778 copy, getfilectx, opts, losedatafn, prefix):
1794 copy, getfilectx, opts, losedatafn, prefix):
1779
1795
1780 def gitindex(text):
1796 def gitindex(text):
1781 if not text:
1797 if not text:
1782 text = ""
1798 text = ""
1783 l = len(text)
1799 l = len(text)
1784 s = util.sha1('blob %d\0' % l)
1800 s = util.sha1('blob %d\0' % l)
1785 s.update(text)
1801 s.update(text)
1786 return s.hexdigest()
1802 return s.hexdigest()
1787
1803
1788 if opts.noprefix:
1804 if opts.noprefix:
1789 aprefix = bprefix = ''
1805 aprefix = bprefix = ''
1790 else:
1806 else:
1791 aprefix = 'a/'
1807 aprefix = 'a/'
1792 bprefix = 'b/'
1808 bprefix = 'b/'
1793
1809
1794 def diffline(f, revs):
1810 def diffline(f, revs):
1795 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1811 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1796 return 'diff %s %s' % (revinfo, f)
1812 return 'diff %s %s' % (revinfo, f)
1797
1813
1798 date1 = util.datestr(ctx1.date())
1814 date1 = util.datestr(ctx1.date())
1799 date2 = util.datestr(ctx2.date())
1815 date2 = util.datestr(ctx2.date())
1800
1816
1801 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1817 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1802
1818
1803 for f1, f2, copyop in _filepairs(
1819 for f1, f2, copyop in _filepairs(
1804 ctx1, modified, added, removed, copy, opts):
1820 ctx1, modified, added, removed, copy, opts):
1805 content1 = None
1821 content1 = None
1806 content2 = None
1822 content2 = None
1807 flag1 = None
1823 flag1 = None
1808 flag2 = None
1824 flag2 = None
1809 if f1:
1825 if f1:
1810 content1 = getfilectx(f1, ctx1).data()
1826 content1 = getfilectx(f1, ctx1).data()
1811 if opts.git or losedatafn:
1827 if opts.git or losedatafn:
1812 flag1 = ctx1.flags(f1)
1828 flag1 = ctx1.flags(f1)
1813 if f2:
1829 if f2:
1814 content2 = getfilectx(f2, ctx2).data()
1830 content2 = getfilectx(f2, ctx2).data()
1815 if opts.git or losedatafn:
1831 if opts.git or losedatafn:
1816 flag2 = ctx2.flags(f2)
1832 flag2 = ctx2.flags(f2)
1817 binary = False
1833 binary = False
1818 if opts.git or losedatafn:
1834 if opts.git or losedatafn:
1819 binary = util.binary(content1) or util.binary(content2)
1835 binary = util.binary(content1) or util.binary(content2)
1820
1836
1821 if losedatafn and not opts.git:
1837 if losedatafn and not opts.git:
1822 if (binary or
1838 if (binary or
1823 # copy/rename
1839 # copy/rename
1824 f2 in copy or
1840 f2 in copy or
1825 # empty file creation
1841 # empty file creation
1826 (not f1 and not content2) or
1842 (not f1 and not content2) or
1827 # empty file deletion
1843 # empty file deletion
1828 (not content1 and not f2) or
1844 (not content1 and not f2) or
1829 # create with flags
1845 # create with flags
1830 (not f1 and flag2) or
1846 (not f1 and flag2) or
1831 # change flags
1847 # change flags
1832 (f1 and f2 and flag1 != flag2)):
1848 (f1 and f2 and flag1 != flag2)):
1833 losedatafn(f2 or f1)
1849 losedatafn(f2 or f1)
1834
1850
1835 path1 = posixpath.join(prefix, f1 or f2)
1851 path1 = posixpath.join(prefix, f1 or f2)
1836 path2 = posixpath.join(prefix, f2 or f1)
1852 path2 = posixpath.join(prefix, f2 or f1)
1837 header = []
1853 header = []
1838 if opts.git:
1854 if opts.git:
1839 header.append('diff --git %s%s %s%s' %
1855 header.append('diff --git %s%s %s%s' %
1840 (aprefix, path1, bprefix, path2))
1856 (aprefix, path1, bprefix, path2))
1841 if not f1: # added
1857 if not f1: # added
1842 header.append('new file mode %s' % gitmode[flag2])
1858 header.append('new file mode %s' % gitmode[flag2])
1843 elif not f2: # removed
1859 elif not f2: # removed
1844 header.append('deleted file mode %s' % gitmode[flag1])
1860 header.append('deleted file mode %s' % gitmode[flag1])
1845 else: # modified/copied/renamed
1861 else: # modified/copied/renamed
1846 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1862 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1847 if mode1 != mode2:
1863 if mode1 != mode2:
1848 header.append('old mode %s' % mode1)
1864 header.append('old mode %s' % mode1)
1849 header.append('new mode %s' % mode2)
1865 header.append('new mode %s' % mode2)
1850 if copyop is not None:
1866 if copyop is not None:
1851 header.append('%s from %s' % (copyop, path1))
1867 header.append('%s from %s' % (copyop, path1))
1852 header.append('%s to %s' % (copyop, path2))
1868 header.append('%s to %s' % (copyop, path2))
1853 elif revs and not repo.ui.quiet:
1869 elif revs and not repo.ui.quiet:
1854 header.append(diffline(path1, revs))
1870 header.append(diffline(path1, revs))
1855
1871
1856 if binary and opts.git and not opts.nobinary:
1872 if binary and opts.git and not opts.nobinary:
1857 text = mdiff.b85diff(content1, content2)
1873 text = mdiff.b85diff(content1, content2)
1858 if text:
1874 if text:
1859 header.append('index %s..%s' %
1875 header.append('index %s..%s' %
1860 (gitindex(content1), gitindex(content2)))
1876 (gitindex(content1), gitindex(content2)))
1861 else:
1877 else:
1862 text = mdiff.unidiff(content1, date1,
1878 text = mdiff.unidiff(content1, date1,
1863 content2, date2,
1879 content2, date2,
1864 path1, path2, opts=opts)
1880 path1, path2, opts=opts)
1865 if header and (text or len(header) > 1):
1881 if header and (text or len(header) > 1):
1866 yield '\n'.join(header) + '\n'
1882 yield '\n'.join(header) + '\n'
1867 if text:
1883 if text:
1868 yield text
1884 yield text
1869
1885
1870 def diffstatsum(stats):
1886 def diffstatsum(stats):
1871 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1887 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1872 for f, a, r, b in stats:
1888 for f, a, r, b in stats:
1873 maxfile = max(maxfile, encoding.colwidth(f))
1889 maxfile = max(maxfile, encoding.colwidth(f))
1874 maxtotal = max(maxtotal, a + r)
1890 maxtotal = max(maxtotal, a + r)
1875 addtotal += a
1891 addtotal += a
1876 removetotal += r
1892 removetotal += r
1877 binary = binary or b
1893 binary = binary or b
1878
1894
1879 return maxfile, maxtotal, addtotal, removetotal, binary
1895 return maxfile, maxtotal, addtotal, removetotal, binary
1880
1896
1881 def diffstatdata(lines):
1897 def diffstatdata(lines):
1882 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1898 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1883
1899
1884 results = []
1900 results = []
1885 filename, adds, removes, isbinary = None, 0, 0, False
1901 filename, adds, removes, isbinary = None, 0, 0, False
1886
1902
1887 def addresult():
1903 def addresult():
1888 if filename:
1904 if filename:
1889 results.append((filename, adds, removes, isbinary))
1905 results.append((filename, adds, removes, isbinary))
1890
1906
1891 for line in lines:
1907 for line in lines:
1892 if line.startswith('diff'):
1908 if line.startswith('diff'):
1893 addresult()
1909 addresult()
1894 # set numbers to 0 anyway when starting new file
1910 # set numbers to 0 anyway when starting new file
1895 adds, removes, isbinary = 0, 0, False
1911 adds, removes, isbinary = 0, 0, False
1896 if line.startswith('diff --git a/'):
1912 if line.startswith('diff --git a/'):
1897 filename = gitre.search(line).group(2)
1913 filename = gitre.search(line).group(2)
1898 elif line.startswith('diff -r'):
1914 elif line.startswith('diff -r'):
1899 # format: "diff -r ... -r ... filename"
1915 # format: "diff -r ... -r ... filename"
1900 filename = diffre.search(line).group(1)
1916 filename = diffre.search(line).group(1)
1901 elif line.startswith('+') and not line.startswith('+++ '):
1917 elif line.startswith('+') and not line.startswith('+++ '):
1902 adds += 1
1918 adds += 1
1903 elif line.startswith('-') and not line.startswith('--- '):
1919 elif line.startswith('-') and not line.startswith('--- '):
1904 removes += 1
1920 removes += 1
1905 elif (line.startswith('GIT binary patch') or
1921 elif (line.startswith('GIT binary patch') or
1906 line.startswith('Binary file')):
1922 line.startswith('Binary file')):
1907 isbinary = True
1923 isbinary = True
1908 addresult()
1924 addresult()
1909 return results
1925 return results
1910
1926
1911 def diffstat(lines, width=80, git=False):
1927 def diffstat(lines, width=80, git=False):
1912 output = []
1928 output = []
1913 stats = diffstatdata(lines)
1929 stats = diffstatdata(lines)
1914 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1930 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1915
1931
1916 countwidth = len(str(maxtotal))
1932 countwidth = len(str(maxtotal))
1917 if hasbinary and countwidth < 3:
1933 if hasbinary and countwidth < 3:
1918 countwidth = 3
1934 countwidth = 3
1919 graphwidth = width - countwidth - maxname - 6
1935 graphwidth = width - countwidth - maxname - 6
1920 if graphwidth < 10:
1936 if graphwidth < 10:
1921 graphwidth = 10
1937 graphwidth = 10
1922
1938
1923 def scale(i):
1939 def scale(i):
1924 if maxtotal <= graphwidth:
1940 if maxtotal <= graphwidth:
1925 return i
1941 return i
1926 # If diffstat runs out of room it doesn't print anything,
1942 # If diffstat runs out of room it doesn't print anything,
1927 # which isn't very useful, so always print at least one + or -
1943 # which isn't very useful, so always print at least one + or -
1928 # if there were at least some changes.
1944 # if there were at least some changes.
1929 return max(i * graphwidth // maxtotal, int(bool(i)))
1945 return max(i * graphwidth // maxtotal, int(bool(i)))
1930
1946
1931 for filename, adds, removes, isbinary in stats:
1947 for filename, adds, removes, isbinary in stats:
1932 if isbinary:
1948 if isbinary:
1933 count = 'Bin'
1949 count = 'Bin'
1934 else:
1950 else:
1935 count = adds + removes
1951 count = adds + removes
1936 pluses = '+' * scale(adds)
1952 pluses = '+' * scale(adds)
1937 minuses = '-' * scale(removes)
1953 minuses = '-' * scale(removes)
1938 output.append(' %s%s | %*s %s%s\n' %
1954 output.append(' %s%s | %*s %s%s\n' %
1939 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1955 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1940 countwidth, count, pluses, minuses))
1956 countwidth, count, pluses, minuses))
1941
1957
1942 if stats:
1958 if stats:
1943 output.append(_(' %d files changed, %d insertions(+), '
1959 output.append(_(' %d files changed, %d insertions(+), '
1944 '%d deletions(-)\n')
1960 '%d deletions(-)\n')
1945 % (len(stats), totaladds, totalremoves))
1961 % (len(stats), totaladds, totalremoves))
1946
1962
1947 return ''.join(output)
1963 return ''.join(output)
1948
1964
1949 def diffstatui(*args, **kw):
1965 def diffstatui(*args, **kw):
1950 '''like diffstat(), but yields 2-tuples of (output, label) for
1966 '''like diffstat(), but yields 2-tuples of (output, label) for
1951 ui.write()
1967 ui.write()
1952 '''
1968 '''
1953
1969
1954 for line in diffstat(*args, **kw).splitlines():
1970 for line in diffstat(*args, **kw).splitlines():
1955 if line and line[-1] in '+-':
1971 if line and line[-1] in '+-':
1956 name, graph = line.rsplit(' ', 1)
1972 name, graph = line.rsplit(' ', 1)
1957 yield (name + ' ', '')
1973 yield (name + ' ', '')
1958 m = re.search(r'\++', graph)
1974 m = re.search(r'\++', graph)
1959 if m:
1975 if m:
1960 yield (m.group(0), 'diffstat.inserted')
1976 yield (m.group(0), 'diffstat.inserted')
1961 m = re.search(r'-+', graph)
1977 m = re.search(r'-+', graph)
1962 if m:
1978 if m:
1963 yield (m.group(0), 'diffstat.deleted')
1979 yield (m.group(0), 'diffstat.deleted')
1964 else:
1980 else:
1965 yield (line, '')
1981 yield (line, '')
1966 yield ('\n', '')
1982 yield ('\n', '')
@@ -1,34 +1,35 b''
1 # this is hack to make sure no escape characters are inserted into the output
1 # this is hack to make sure no escape characters are inserted into the output
2 import os, sys
2 import os, sys
3 if 'TERM' in os.environ:
3 if 'TERM' in os.environ:
4 del os.environ['TERM']
4 del os.environ['TERM']
5 import doctest
5 import doctest
6
6
7 def testmod(name, optionflags=0, testtarget=None):
7 def testmod(name, optionflags=0, testtarget=None):
8 __import__(name)
8 __import__(name)
9 mod = sys.modules[name]
9 mod = sys.modules[name]
10 if testtarget is not None:
10 if testtarget is not None:
11 mod = getattr(mod, testtarget)
11 mod = getattr(mod, testtarget)
12 doctest.testmod(mod, optionflags=optionflags)
12 doctest.testmod(mod, optionflags=optionflags)
13
13
14 testmod('mercurial.changelog')
14 testmod('mercurial.changelog')
15 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
15 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
16 testmod('mercurial.dispatch')
16 testmod('mercurial.dispatch')
17 testmod('mercurial.encoding')
17 testmod('mercurial.encoding')
18 testmod('mercurial.hg')
18 testmod('mercurial.hg')
19 testmod('mercurial.hgweb.hgwebdir_mod')
19 testmod('mercurial.hgweb.hgwebdir_mod')
20 testmod('mercurial.match')
20 testmod('mercurial.match')
21 testmod('mercurial.minirst')
21 testmod('mercurial.minirst')
22 testmod('mercurial.patch')
22 testmod('mercurial.pathutil')
23 testmod('mercurial.pathutil')
23 testmod('mercurial.revset')
24 testmod('mercurial.revset')
24 testmod('mercurial.store')
25 testmod('mercurial.store')
25 testmod('mercurial.subrepo')
26 testmod('mercurial.subrepo')
26 testmod('mercurial.templatefilters')
27 testmod('mercurial.templatefilters')
27 testmod('mercurial.ui')
28 testmod('mercurial.ui')
28 testmod('mercurial.url')
29 testmod('mercurial.url')
29 testmod('mercurial.util')
30 testmod('mercurial.util')
30 testmod('mercurial.util', testtarget='platform')
31 testmod('mercurial.util', testtarget='platform')
31 testmod('hgext.convert.cvsps')
32 testmod('hgext.convert.cvsps')
32 testmod('hgext.convert.filemap')
33 testmod('hgext.convert.filemap')
33 testmod('hgext.convert.subversion')
34 testmod('hgext.convert.subversion')
34 testmod('hgext.mq')
35 testmod('hgext.mq')
General Comments 0
You need to be logged in to leave comments. Login now