##// END OF EJS Templates
patch: do not patch unknown files (issue752)
Patrick Mezard -
r14453:ea3d5481 default
parent child Browse files
Show More
@@ -0,0 +1,67 b''
1 $ cat <<EOF >> $HGRCPATH
2 > [extensions]
3 > purge =
4 > EOF
5
6 $ hg init test
7 $ cd test
8 $ echo a > changed
9 $ echo a > removed
10 $ echo a > source
11 $ hg ci -Am addfiles
12 adding changed
13 adding removed
14 adding source
15 $ echo a >> changed
16 $ echo a > added
17 $ hg add added
18 $ hg rm removed
19 $ hg cp source copied
20 $ hg diff --git > ../unknown.diff
21
22 Test adding on top of an unknown file
23
24 $ hg up -qC 0
25 $ hg purge
26 $ echo a > added
27 $ hg import --no-commit ../unknown.diff
28 applying ../unknown.diff
29 file added already exists
30 1 out of 1 hunks FAILED -- saving rejects to file added.rej
31 abort: patch failed to apply
32 [255]
33
34 Test modifying an unknown file
35
36 $ hg revert -aq
37 $ hg purge
38 $ hg rm changed
39 $ hg ci -m removechanged
40 $ echo a > changed
41 $ hg import --no-commit ../unknown.diff
42 applying ../unknown.diff
43 abort: cannot patch changed: file is not tracked
44 [255]
45
46 Test removing an unknown file
47
48 $ hg up -qC 0
49 $ hg purge
50 $ hg rm removed
51 $ hg ci -m removeremoved
52 created new head
53 $ echo a > removed
54 $ hg import --no-commit ../unknown.diff
55 applying ../unknown.diff
56 abort: cannot patch removed: file is not tracked
57 [255]
58
59 Test copying onto an unknown file
60
61 $ hg up -qC 0
62 $ hg purge
63 $ echo a > copied
64 $ hg import --no-commit ../unknown.diff
65 applying ../unknown.diff
66 abort: cannot create copied: destination already exists
67 [255]
@@ -1,1780 +1,1786 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21
21
22 # public functions
22 # public functions
23
23
24 def split(stream):
24 def split(stream):
25 '''return an iterator of individual patches from a stream'''
25 '''return an iterator of individual patches from a stream'''
26 def isheader(line, inheader):
26 def isheader(line, inheader):
27 if inheader and line[0] in (' ', '\t'):
27 if inheader and line[0] in (' ', '\t'):
28 # continuation
28 # continuation
29 return True
29 return True
30 if line[0] in (' ', '-', '+'):
30 if line[0] in (' ', '-', '+'):
31 # diff line - don't check for header pattern in there
31 # diff line - don't check for header pattern in there
32 return False
32 return False
33 l = line.split(': ', 1)
33 l = line.split(': ', 1)
34 return len(l) == 2 and ' ' not in l[0]
34 return len(l) == 2 and ' ' not in l[0]
35
35
36 def chunk(lines):
36 def chunk(lines):
37 return cStringIO.StringIO(''.join(lines))
37 return cStringIO.StringIO(''.join(lines))
38
38
39 def hgsplit(stream, cur):
39 def hgsplit(stream, cur):
40 inheader = True
40 inheader = True
41
41
42 for line in stream:
42 for line in stream:
43 if not line.strip():
43 if not line.strip():
44 inheader = False
44 inheader = False
45 if not inheader and line.startswith('# HG changeset patch'):
45 if not inheader and line.startswith('# HG changeset patch'):
46 yield chunk(cur)
46 yield chunk(cur)
47 cur = []
47 cur = []
48 inheader = True
48 inheader = True
49
49
50 cur.append(line)
50 cur.append(line)
51
51
52 if cur:
52 if cur:
53 yield chunk(cur)
53 yield chunk(cur)
54
54
55 def mboxsplit(stream, cur):
55 def mboxsplit(stream, cur):
56 for line in stream:
56 for line in stream:
57 if line.startswith('From '):
57 if line.startswith('From '):
58 for c in split(chunk(cur[1:])):
58 for c in split(chunk(cur[1:])):
59 yield c
59 yield c
60 cur = []
60 cur = []
61
61
62 cur.append(line)
62 cur.append(line)
63
63
64 if cur:
64 if cur:
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67
67
68 def mimesplit(stream, cur):
68 def mimesplit(stream, cur):
69 def msgfp(m):
69 def msgfp(m):
70 fp = cStringIO.StringIO()
70 fp = cStringIO.StringIO()
71 g = email.Generator.Generator(fp, mangle_from_=False)
71 g = email.Generator.Generator(fp, mangle_from_=False)
72 g.flatten(m)
72 g.flatten(m)
73 fp.seek(0)
73 fp.seek(0)
74 return fp
74 return fp
75
75
76 for line in stream:
76 for line in stream:
77 cur.append(line)
77 cur.append(line)
78 c = chunk(cur)
78 c = chunk(cur)
79
79
80 m = email.Parser.Parser().parse(c)
80 m = email.Parser.Parser().parse(c)
81 if not m.is_multipart():
81 if not m.is_multipart():
82 yield msgfp(m)
82 yield msgfp(m)
83 else:
83 else:
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 for part in m.walk():
85 for part in m.walk():
86 ct = part.get_content_type()
86 ct = part.get_content_type()
87 if ct not in ok_types:
87 if ct not in ok_types:
88 continue
88 continue
89 yield msgfp(part)
89 yield msgfp(part)
90
90
91 def headersplit(stream, cur):
91 def headersplit(stream, cur):
92 inheader = False
92 inheader = False
93
93
94 for line in stream:
94 for line in stream:
95 if not inheader and isheader(line, inheader):
95 if not inheader and isheader(line, inheader):
96 yield chunk(cur)
96 yield chunk(cur)
97 cur = []
97 cur = []
98 inheader = True
98 inheader = True
99 if inheader and not isheader(line, inheader):
99 if inheader and not isheader(line, inheader):
100 inheader = False
100 inheader = False
101
101
102 cur.append(line)
102 cur.append(line)
103
103
104 if cur:
104 if cur:
105 yield chunk(cur)
105 yield chunk(cur)
106
106
107 def remainder(cur):
107 def remainder(cur):
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 class fiter(object):
110 class fiter(object):
111 def __init__(self, fp):
111 def __init__(self, fp):
112 self.fp = fp
112 self.fp = fp
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return self
115 return self
116
116
117 def next(self):
117 def next(self):
118 l = self.fp.readline()
118 l = self.fp.readline()
119 if not l:
119 if not l:
120 raise StopIteration
120 raise StopIteration
121 return l
121 return l
122
122
123 inheader = False
123 inheader = False
124 cur = []
124 cur = []
125
125
126 mimeheaders = ['content-type']
126 mimeheaders = ['content-type']
127
127
128 if not hasattr(stream, 'next'):
128 if not hasattr(stream, 'next'):
129 # http responses, for example, have readline but not next
129 # http responses, for example, have readline but not next
130 stream = fiter(stream)
130 stream = fiter(stream)
131
131
132 for line in stream:
132 for line in stream:
133 cur.append(line)
133 cur.append(line)
134 if line.startswith('# HG changeset patch'):
134 if line.startswith('# HG changeset patch'):
135 return hgsplit(stream, cur)
135 return hgsplit(stream, cur)
136 elif line.startswith('From '):
136 elif line.startswith('From '):
137 return mboxsplit(stream, cur)
137 return mboxsplit(stream, cur)
138 elif isheader(line, inheader):
138 elif isheader(line, inheader):
139 inheader = True
139 inheader = True
140 if line.split(':', 1)[0].lower() in mimeheaders:
140 if line.split(':', 1)[0].lower() in mimeheaders:
141 # let email parser handle this
141 # let email parser handle this
142 return mimesplit(stream, cur)
142 return mimesplit(stream, cur)
143 elif line.startswith('--- ') and inheader:
143 elif line.startswith('--- ') and inheader:
144 # No evil headers seen by diff start, split by hand
144 # No evil headers seen by diff start, split by hand
145 return headersplit(stream, cur)
145 return headersplit(stream, cur)
146 # Not enough info, keep reading
146 # Not enough info, keep reading
147
147
148 # if we are here, we have a very plain patch
148 # if we are here, we have a very plain patch
149 return remainder(cur)
149 return remainder(cur)
150
150
151 def extract(ui, fileobj):
151 def extract(ui, fileobj):
152 '''extract patch from data read from fileobj.
152 '''extract patch from data read from fileobj.
153
153
154 patch can be a normal patch or contained in an email message.
154 patch can be a normal patch or contained in an email message.
155
155
156 return tuple (filename, message, user, date, branch, node, p1, p2).
156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 Any item in the returned tuple can be None. If filename is None,
157 Any item in the returned tuple can be None. If filename is None,
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159
159
160 # attempt to detect the start of a patch
160 # attempt to detect the start of a patch
161 # (this heuristic is borrowed from quilt)
161 # (this heuristic is borrowed from quilt)
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166
166
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 tmpfp = os.fdopen(fd, 'w')
168 tmpfp = os.fdopen(fd, 'w')
169 try:
169 try:
170 msg = email.Parser.Parser().parse(fileobj)
170 msg = email.Parser.Parser().parse(fileobj)
171
171
172 subject = msg['Subject']
172 subject = msg['Subject']
173 user = msg['From']
173 user = msg['From']
174 if not subject and not user:
174 if not subject and not user:
175 # Not an email, restore parsed headers if any
175 # Not an email, restore parsed headers if any
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177
177
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 # should try to parse msg['Date']
179 # should try to parse msg['Date']
180 date = None
180 date = None
181 nodeid = None
181 nodeid = None
182 branch = None
182 branch = None
183 parents = []
183 parents = []
184
184
185 if subject:
185 if subject:
186 if subject.startswith('[PATCH'):
186 if subject.startswith('[PATCH'):
187 pend = subject.find(']')
187 pend = subject.find(']')
188 if pend >= 0:
188 if pend >= 0:
189 subject = subject[pend + 1:].lstrip()
189 subject = subject[pend + 1:].lstrip()
190 subject = subject.replace('\n\t', ' ')
190 subject = subject.replace('\n\t', ' ')
191 ui.debug('Subject: %s\n' % subject)
191 ui.debug('Subject: %s\n' % subject)
192 if user:
192 if user:
193 ui.debug('From: %s\n' % user)
193 ui.debug('From: %s\n' % user)
194 diffs_seen = 0
194 diffs_seen = 0
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 message = ''
196 message = ''
197 for part in msg.walk():
197 for part in msg.walk():
198 content_type = part.get_content_type()
198 content_type = part.get_content_type()
199 ui.debug('Content-Type: %s\n' % content_type)
199 ui.debug('Content-Type: %s\n' % content_type)
200 if content_type not in ok_types:
200 if content_type not in ok_types:
201 continue
201 continue
202 payload = part.get_payload(decode=True)
202 payload = part.get_payload(decode=True)
203 m = diffre.search(payload)
203 m = diffre.search(payload)
204 if m:
204 if m:
205 hgpatch = False
205 hgpatch = False
206 hgpatchheader = False
206 hgpatchheader = False
207 ignoretext = False
207 ignoretext = False
208
208
209 ui.debug('found patch at byte %d\n' % m.start(0))
209 ui.debug('found patch at byte %d\n' % m.start(0))
210 diffs_seen += 1
210 diffs_seen += 1
211 cfp = cStringIO.StringIO()
211 cfp = cStringIO.StringIO()
212 for line in payload[:m.start(0)].splitlines():
212 for line in payload[:m.start(0)].splitlines():
213 if line.startswith('# HG changeset patch') and not hgpatch:
213 if line.startswith('# HG changeset patch') and not hgpatch:
214 ui.debug('patch generated by hg export\n')
214 ui.debug('patch generated by hg export\n')
215 hgpatch = True
215 hgpatch = True
216 hgpatchheader = True
216 hgpatchheader = True
217 # drop earlier commit message content
217 # drop earlier commit message content
218 cfp.seek(0)
218 cfp.seek(0)
219 cfp.truncate()
219 cfp.truncate()
220 subject = None
220 subject = None
221 elif hgpatchheader:
221 elif hgpatchheader:
222 if line.startswith('# User '):
222 if line.startswith('# User '):
223 user = line[7:]
223 user = line[7:]
224 ui.debug('From: %s\n' % user)
224 ui.debug('From: %s\n' % user)
225 elif line.startswith("# Date "):
225 elif line.startswith("# Date "):
226 date = line[7:]
226 date = line[7:]
227 elif line.startswith("# Branch "):
227 elif line.startswith("# Branch "):
228 branch = line[9:]
228 branch = line[9:]
229 elif line.startswith("# Node ID "):
229 elif line.startswith("# Node ID "):
230 nodeid = line[10:]
230 nodeid = line[10:]
231 elif line.startswith("# Parent "):
231 elif line.startswith("# Parent "):
232 parents.append(line[10:])
232 parents.append(line[10:])
233 elif not line.startswith("# "):
233 elif not line.startswith("# "):
234 hgpatchheader = False
234 hgpatchheader = False
235 elif line == '---' and gitsendmail:
235 elif line == '---' and gitsendmail:
236 ignoretext = True
236 ignoretext = True
237 if not hgpatchheader and not ignoretext:
237 if not hgpatchheader and not ignoretext:
238 cfp.write(line)
238 cfp.write(line)
239 cfp.write('\n')
239 cfp.write('\n')
240 message = cfp.getvalue()
240 message = cfp.getvalue()
241 if tmpfp:
241 if tmpfp:
242 tmpfp.write(payload)
242 tmpfp.write(payload)
243 if not payload.endswith('\n'):
243 if not payload.endswith('\n'):
244 tmpfp.write('\n')
244 tmpfp.write('\n')
245 elif not diffs_seen and message and content_type == 'text/plain':
245 elif not diffs_seen and message and content_type == 'text/plain':
246 message += '\n' + payload
246 message += '\n' + payload
247 except:
247 except:
248 tmpfp.close()
248 tmpfp.close()
249 os.unlink(tmpname)
249 os.unlink(tmpname)
250 raise
250 raise
251
251
252 if subject and not message.startswith(subject):
252 if subject and not message.startswith(subject):
253 message = '%s\n%s' % (subject, message)
253 message = '%s\n%s' % (subject, message)
254 tmpfp.close()
254 tmpfp.close()
255 if not diffs_seen:
255 if not diffs_seen:
256 os.unlink(tmpname)
256 os.unlink(tmpname)
257 return None, message, user, date, branch, None, None, None
257 return None, message, user, date, branch, None, None, None
258 p1 = parents and parents.pop(0) or None
258 p1 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
260 return tmpname, message, user, date, branch, nodeid, p1, p2
260 return tmpname, message, user, date, branch, nodeid, p1, p2
261
261
262 class patchmeta(object):
262 class patchmeta(object):
263 """Patched file metadata
263 """Patched file metadata
264
264
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 'islink' is True if the file is a symlink and 'isexec' is True if
269 'islink' is True if the file is a symlink and 'isexec' is True if
270 the file is executable. Otherwise, 'mode' is None.
270 the file is executable. Otherwise, 'mode' is None.
271 """
271 """
272 def __init__(self, path):
272 def __init__(self, path):
273 self.path = path
273 self.path = path
274 self.oldpath = None
274 self.oldpath = None
275 self.mode = None
275 self.mode = None
276 self.op = 'MODIFY'
276 self.op = 'MODIFY'
277 self.binary = False
277 self.binary = False
278
278
279 def setmode(self, mode):
279 def setmode(self, mode):
280 islink = mode & 020000
280 islink = mode & 020000
281 isexec = mode & 0100
281 isexec = mode & 0100
282 self.mode = (islink, isexec)
282 self.mode = (islink, isexec)
283
283
284 def __repr__(self):
284 def __repr__(self):
285 return "<patchmeta %s %r>" % (self.op, self.path)
285 return "<patchmeta %s %r>" % (self.op, self.path)
286
286
287 def readgitpatch(lr):
287 def readgitpatch(lr):
288 """extract git-style metadata about patches from <patchname>"""
288 """extract git-style metadata about patches from <patchname>"""
289
289
290 # Filter patch for git information
290 # Filter patch for git information
291 gp = None
291 gp = None
292 gitpatches = []
292 gitpatches = []
293 for line in lr:
293 for line in lr:
294 line = line.rstrip(' \r\n')
294 line = line.rstrip(' \r\n')
295 if line.startswith('diff --git'):
295 if line.startswith('diff --git'):
296 m = gitre.match(line)
296 m = gitre.match(line)
297 if m:
297 if m:
298 if gp:
298 if gp:
299 gitpatches.append(gp)
299 gitpatches.append(gp)
300 dst = m.group(2)
300 dst = m.group(2)
301 gp = patchmeta(dst)
301 gp = patchmeta(dst)
302 elif gp:
302 elif gp:
303 if line.startswith('--- '):
303 if line.startswith('--- '):
304 gitpatches.append(gp)
304 gitpatches.append(gp)
305 gp = None
305 gp = None
306 continue
306 continue
307 if line.startswith('rename from '):
307 if line.startswith('rename from '):
308 gp.op = 'RENAME'
308 gp.op = 'RENAME'
309 gp.oldpath = line[12:]
309 gp.oldpath = line[12:]
310 elif line.startswith('rename to '):
310 elif line.startswith('rename to '):
311 gp.path = line[10:]
311 gp.path = line[10:]
312 elif line.startswith('copy from '):
312 elif line.startswith('copy from '):
313 gp.op = 'COPY'
313 gp.op = 'COPY'
314 gp.oldpath = line[10:]
314 gp.oldpath = line[10:]
315 elif line.startswith('copy to '):
315 elif line.startswith('copy to '):
316 gp.path = line[8:]
316 gp.path = line[8:]
317 elif line.startswith('deleted file'):
317 elif line.startswith('deleted file'):
318 gp.op = 'DELETE'
318 gp.op = 'DELETE'
319 elif line.startswith('new file mode '):
319 elif line.startswith('new file mode '):
320 gp.op = 'ADD'
320 gp.op = 'ADD'
321 gp.setmode(int(line[-6:], 8))
321 gp.setmode(int(line[-6:], 8))
322 elif line.startswith('new mode '):
322 elif line.startswith('new mode '):
323 gp.setmode(int(line[-6:], 8))
323 gp.setmode(int(line[-6:], 8))
324 elif line.startswith('GIT binary patch'):
324 elif line.startswith('GIT binary patch'):
325 gp.binary = True
325 gp.binary = True
326 if gp:
326 if gp:
327 gitpatches.append(gp)
327 gitpatches.append(gp)
328
328
329 return gitpatches
329 return gitpatches
330
330
331 class linereader(object):
331 class linereader(object):
332 # simple class to allow pushing lines back into the input stream
332 # simple class to allow pushing lines back into the input stream
333 def __init__(self, fp):
333 def __init__(self, fp):
334 self.fp = fp
334 self.fp = fp
335 self.buf = []
335 self.buf = []
336
336
337 def push(self, line):
337 def push(self, line):
338 if line is not None:
338 if line is not None:
339 self.buf.append(line)
339 self.buf.append(line)
340
340
341 def readline(self):
341 def readline(self):
342 if self.buf:
342 if self.buf:
343 l = self.buf[0]
343 l = self.buf[0]
344 del self.buf[0]
344 del self.buf[0]
345 return l
345 return l
346 return self.fp.readline()
346 return self.fp.readline()
347
347
348 def __iter__(self):
348 def __iter__(self):
349 while 1:
349 while 1:
350 l = self.readline()
350 l = self.readline()
351 if not l:
351 if not l:
352 break
352 break
353 yield l
353 yield l
354
354
355 class abstractbackend(object):
355 class abstractbackend(object):
356 def __init__(self, ui):
356 def __init__(self, ui):
357 self.ui = ui
357 self.ui = ui
358
358
359 def getfile(self, fname):
359 def getfile(self, fname):
360 """Return target file data and flags as a (data, (islink,
360 """Return target file data and flags as a (data, (islink,
361 isexec)) tuple.
361 isexec)) tuple.
362 """
362 """
363 raise NotImplementedError
363 raise NotImplementedError
364
364
365 def setfile(self, fname, data, mode, copysource):
365 def setfile(self, fname, data, mode, copysource):
366 """Write data to target file fname and set its mode. mode is a
366 """Write data to target file fname and set its mode. mode is a
367 (islink, isexec) tuple. If data is None, the file content should
367 (islink, isexec) tuple. If data is None, the file content should
368 be left unchanged. If the file is modified after being copied,
368 be left unchanged. If the file is modified after being copied,
369 copysource is set to the original file name.
369 copysource is set to the original file name.
370 """
370 """
371 raise NotImplementedError
371 raise NotImplementedError
372
372
373 def unlink(self, fname):
373 def unlink(self, fname):
374 """Unlink target file."""
374 """Unlink target file."""
375 raise NotImplementedError
375 raise NotImplementedError
376
376
377 def writerej(self, fname, failed, total, lines):
377 def writerej(self, fname, failed, total, lines):
378 """Write rejected lines for fname. total is the number of hunks
378 """Write rejected lines for fname. total is the number of hunks
379 which failed to apply and total the total number of hunks for this
379 which failed to apply and total the total number of hunks for this
380 files.
380 files.
381 """
381 """
382 pass
382 pass
383
383
384 def exists(self, fname):
384 def exists(self, fname):
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 class fsbackend(abstractbackend):
387 class fsbackend(abstractbackend):
388 def __init__(self, ui, basedir):
388 def __init__(self, ui, basedir):
389 super(fsbackend, self).__init__(ui)
389 super(fsbackend, self).__init__(ui)
390 self.opener = scmutil.opener(basedir)
390 self.opener = scmutil.opener(basedir)
391
391
392 def _join(self, f):
392 def _join(self, f):
393 return os.path.join(self.opener.base, f)
393 return os.path.join(self.opener.base, f)
394
394
395 def getfile(self, fname):
395 def getfile(self, fname):
396 path = self._join(fname)
396 path = self._join(fname)
397 if os.path.islink(path):
397 if os.path.islink(path):
398 return (os.readlink(path), (True, False))
398 return (os.readlink(path), (True, False))
399 isexec, islink = False, False
399 isexec, islink = False, False
400 try:
400 try:
401 isexec = os.lstat(path).st_mode & 0100 != 0
401 isexec = os.lstat(path).st_mode & 0100 != 0
402 islink = os.path.islink(path)
402 islink = os.path.islink(path)
403 except OSError, e:
403 except OSError, e:
404 if e.errno != errno.ENOENT:
404 if e.errno != errno.ENOENT:
405 raise
405 raise
406 return (self.opener.read(fname), (islink, isexec))
406 return (self.opener.read(fname), (islink, isexec))
407
407
408 def setfile(self, fname, data, mode, copysource):
408 def setfile(self, fname, data, mode, copysource):
409 islink, isexec = mode
409 islink, isexec = mode
410 if data is None:
410 if data is None:
411 util.setflags(self._join(fname), islink, isexec)
411 util.setflags(self._join(fname), islink, isexec)
412 return
412 return
413 if islink:
413 if islink:
414 self.opener.symlink(data, fname)
414 self.opener.symlink(data, fname)
415 else:
415 else:
416 self.opener.write(fname, data)
416 self.opener.write(fname, data)
417 if isexec:
417 if isexec:
418 util.setflags(self._join(fname), False, True)
418 util.setflags(self._join(fname), False, True)
419
419
420 def unlink(self, fname):
420 def unlink(self, fname):
421 try:
421 try:
422 util.unlinkpath(self._join(fname))
422 util.unlinkpath(self._join(fname))
423 except OSError, inst:
423 except OSError, inst:
424 if inst.errno != errno.ENOENT:
424 if inst.errno != errno.ENOENT:
425 raise
425 raise
426
426
427 def writerej(self, fname, failed, total, lines):
427 def writerej(self, fname, failed, total, lines):
428 fname = fname + ".rej"
428 fname = fname + ".rej"
429 self.ui.warn(
429 self.ui.warn(
430 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
430 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
431 (failed, total, fname))
431 (failed, total, fname))
432 fp = self.opener(fname, 'w')
432 fp = self.opener(fname, 'w')
433 fp.writelines(lines)
433 fp.writelines(lines)
434 fp.close()
434 fp.close()
435
435
436 def exists(self, fname):
436 def exists(self, fname):
437 return os.path.lexists(self._join(fname))
437 return os.path.lexists(self._join(fname))
438
438
439 class workingbackend(fsbackend):
439 class workingbackend(fsbackend):
440 def __init__(self, ui, repo, similarity):
440 def __init__(self, ui, repo, similarity):
441 super(workingbackend, self).__init__(ui, repo.root)
441 super(workingbackend, self).__init__(ui, repo.root)
442 self.repo = repo
442 self.repo = repo
443 self.similarity = similarity
443 self.similarity = similarity
444 self.removed = set()
444 self.removed = set()
445 self.changed = set()
445 self.changed = set()
446 self.copied = []
446 self.copied = []
447
447
448 def _checkknown(self, fname):
449 if self.repo.dirstate[fname] == '?' and self.exists(fname):
450 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
451
448 def setfile(self, fname, data, mode, copysource):
452 def setfile(self, fname, data, mode, copysource):
453 self._checkknown(fname)
449 super(workingbackend, self).setfile(fname, data, mode, copysource)
454 super(workingbackend, self).setfile(fname, data, mode, copysource)
450 if copysource is not None:
455 if copysource is not None:
451 self.copied.append((copysource, fname))
456 self.copied.append((copysource, fname))
452 self.changed.add(fname)
457 self.changed.add(fname)
453
458
454 def unlink(self, fname):
459 def unlink(self, fname):
460 self._checkknown(fname)
455 super(workingbackend, self).unlink(fname)
461 super(workingbackend, self).unlink(fname)
456 self.removed.add(fname)
462 self.removed.add(fname)
457 self.changed.add(fname)
463 self.changed.add(fname)
458
464
459 def close(self):
465 def close(self):
460 wctx = self.repo[None]
466 wctx = self.repo[None]
461 addremoved = set(self.changed)
467 addremoved = set(self.changed)
462 for src, dst in self.copied:
468 for src, dst in self.copied:
463 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
469 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
464 addremoved.discard(src)
470 addremoved.discard(src)
465 if (not self.similarity) and self.removed:
471 if (not self.similarity) and self.removed:
466 wctx.forget(sorted(self.removed))
472 wctx.forget(sorted(self.removed))
467 if addremoved:
473 if addremoved:
468 cwd = self.repo.getcwd()
474 cwd = self.repo.getcwd()
469 if cwd:
475 if cwd:
470 addremoved = [util.pathto(self.repo.root, cwd, f)
476 addremoved = [util.pathto(self.repo.root, cwd, f)
471 for f in addremoved]
477 for f in addremoved]
472 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
478 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
473 return sorted(self.changed)
479 return sorted(self.changed)
474
480
475 class filestore(object):
481 class filestore(object):
476 def __init__(self):
482 def __init__(self):
477 self.opener = None
483 self.opener = None
478 self.files = {}
484 self.files = {}
479 self.created = 0
485 self.created = 0
480
486
481 def setfile(self, fname, data, mode):
487 def setfile(self, fname, data, mode):
482 if self.opener is None:
488 if self.opener is None:
483 root = tempfile.mkdtemp(prefix='hg-patch-')
489 root = tempfile.mkdtemp(prefix='hg-patch-')
484 self.opener = scmutil.opener(root)
490 self.opener = scmutil.opener(root)
485 # Avoid filename issues with these simple names
491 # Avoid filename issues with these simple names
486 fn = str(self.created)
492 fn = str(self.created)
487 self.opener.write(fn, data)
493 self.opener.write(fn, data)
488 self.created += 1
494 self.created += 1
489 self.files[fname] = (fn, mode)
495 self.files[fname] = (fn, mode)
490
496
491 def getfile(self, fname):
497 def getfile(self, fname):
492 if fname not in self.files:
498 if fname not in self.files:
493 raise IOError()
499 raise IOError()
494 fn, mode = self.files[fname]
500 fn, mode = self.files[fname]
495 return self.opener.read(fn), mode
501 return self.opener.read(fn), mode
496
502
497 def close(self):
503 def close(self):
498 if self.opener:
504 if self.opener:
499 shutil.rmtree(self.opener.base)
505 shutil.rmtree(self.opener.base)
500
506
501 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
507 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
502 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
508 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
503 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
509 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
504 eolmodes = ['strict', 'crlf', 'lf', 'auto']
510 eolmodes = ['strict', 'crlf', 'lf', 'auto']
505
511
506 class patchfile(object):
512 class patchfile(object):
507 def __init__(self, ui, fname, backend, store, mode, create, remove,
513 def __init__(self, ui, fname, backend, store, mode, create, remove,
508 eolmode='strict', copysource=None):
514 eolmode='strict', copysource=None):
509 self.fname = fname
515 self.fname = fname
510 self.eolmode = eolmode
516 self.eolmode = eolmode
511 self.eol = None
517 self.eol = None
512 self.backend = backend
518 self.backend = backend
513 self.ui = ui
519 self.ui = ui
514 self.lines = []
520 self.lines = []
515 self.exists = False
521 self.exists = False
516 self.missing = True
522 self.missing = True
517 self.mode = mode
523 self.mode = mode
518 self.copysource = copysource
524 self.copysource = copysource
519 self.create = create
525 self.create = create
520 self.remove = remove
526 self.remove = remove
521 try:
527 try:
522 if copysource is None:
528 if copysource is None:
523 data, mode = backend.getfile(fname)
529 data, mode = backend.getfile(fname)
524 self.exists = True
530 self.exists = True
525 else:
531 else:
526 data, mode = store.getfile(copysource)
532 data, mode = store.getfile(copysource)
527 self.exists = backend.exists(fname)
533 self.exists = backend.exists(fname)
528 self.missing = False
534 self.missing = False
529 if data:
535 if data:
530 self.lines = data.splitlines(True)
536 self.lines = data.splitlines(True)
531 if self.mode is None:
537 if self.mode is None:
532 self.mode = mode
538 self.mode = mode
533 if self.lines:
539 if self.lines:
534 # Normalize line endings
540 # Normalize line endings
535 if self.lines[0].endswith('\r\n'):
541 if self.lines[0].endswith('\r\n'):
536 self.eol = '\r\n'
542 self.eol = '\r\n'
537 elif self.lines[0].endswith('\n'):
543 elif self.lines[0].endswith('\n'):
538 self.eol = '\n'
544 self.eol = '\n'
539 if eolmode != 'strict':
545 if eolmode != 'strict':
540 nlines = []
546 nlines = []
541 for l in self.lines:
547 for l in self.lines:
542 if l.endswith('\r\n'):
548 if l.endswith('\r\n'):
543 l = l[:-2] + '\n'
549 l = l[:-2] + '\n'
544 nlines.append(l)
550 nlines.append(l)
545 self.lines = nlines
551 self.lines = nlines
546 except IOError:
552 except IOError:
547 if create:
553 if create:
548 self.missing = False
554 self.missing = False
549 if self.mode is None:
555 if self.mode is None:
550 self.mode = (False, False)
556 self.mode = (False, False)
551 if self.missing:
557 if self.missing:
552 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
558 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
553
559
554 self.hash = {}
560 self.hash = {}
555 self.dirty = 0
561 self.dirty = 0
556 self.offset = 0
562 self.offset = 0
557 self.skew = 0
563 self.skew = 0
558 self.rej = []
564 self.rej = []
559 self.fileprinted = False
565 self.fileprinted = False
560 self.printfile(False)
566 self.printfile(False)
561 self.hunks = 0
567 self.hunks = 0
562
568
563 def writelines(self, fname, lines, mode):
569 def writelines(self, fname, lines, mode):
564 if self.eolmode == 'auto':
570 if self.eolmode == 'auto':
565 eol = self.eol
571 eol = self.eol
566 elif self.eolmode == 'crlf':
572 elif self.eolmode == 'crlf':
567 eol = '\r\n'
573 eol = '\r\n'
568 else:
574 else:
569 eol = '\n'
575 eol = '\n'
570
576
571 if self.eolmode != 'strict' and eol and eol != '\n':
577 if self.eolmode != 'strict' and eol and eol != '\n':
572 rawlines = []
578 rawlines = []
573 for l in lines:
579 for l in lines:
574 if l and l[-1] == '\n':
580 if l and l[-1] == '\n':
575 l = l[:-1] + eol
581 l = l[:-1] + eol
576 rawlines.append(l)
582 rawlines.append(l)
577 lines = rawlines
583 lines = rawlines
578
584
579 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
585 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
580
586
581 def printfile(self, warn):
587 def printfile(self, warn):
582 if self.fileprinted:
588 if self.fileprinted:
583 return
589 return
584 if warn or self.ui.verbose:
590 if warn or self.ui.verbose:
585 self.fileprinted = True
591 self.fileprinted = True
586 s = _("patching file %s\n") % self.fname
592 s = _("patching file %s\n") % self.fname
587 if warn:
593 if warn:
588 self.ui.warn(s)
594 self.ui.warn(s)
589 else:
595 else:
590 self.ui.note(s)
596 self.ui.note(s)
591
597
592
598
593 def findlines(self, l, linenum):
599 def findlines(self, l, linenum):
594 # looks through the hash and finds candidate lines. The
600 # looks through the hash and finds candidate lines. The
595 # result is a list of line numbers sorted based on distance
601 # result is a list of line numbers sorted based on distance
596 # from linenum
602 # from linenum
597
603
598 cand = self.hash.get(l, [])
604 cand = self.hash.get(l, [])
599 if len(cand) > 1:
605 if len(cand) > 1:
600 # resort our list of potentials forward then back.
606 # resort our list of potentials forward then back.
601 cand.sort(key=lambda x: abs(x - linenum))
607 cand.sort(key=lambda x: abs(x - linenum))
602 return cand
608 return cand
603
609
604 def write_rej(self):
610 def write_rej(self):
605 # our rejects are a little different from patch(1). This always
611 # our rejects are a little different from patch(1). This always
606 # creates rejects in the same form as the original patch. A file
612 # creates rejects in the same form as the original patch. A file
607 # header is inserted so that you can run the reject through patch again
613 # header is inserted so that you can run the reject through patch again
608 # without having to type the filename.
614 # without having to type the filename.
609 if not self.rej:
615 if not self.rej:
610 return
616 return
611 base = os.path.basename(self.fname)
617 base = os.path.basename(self.fname)
612 lines = ["--- %s\n+++ %s\n" % (base, base)]
618 lines = ["--- %s\n+++ %s\n" % (base, base)]
613 for x in self.rej:
619 for x in self.rej:
614 for l in x.hunk:
620 for l in x.hunk:
615 lines.append(l)
621 lines.append(l)
616 if l[-1] != '\n':
622 if l[-1] != '\n':
617 lines.append("\n\ No newline at end of file\n")
623 lines.append("\n\ No newline at end of file\n")
618 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
624 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
619
625
620 def apply(self, h):
626 def apply(self, h):
621 if not h.complete():
627 if not h.complete():
622 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
628 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
623 (h.number, h.desc, len(h.a), h.lena, len(h.b),
629 (h.number, h.desc, len(h.a), h.lena, len(h.b),
624 h.lenb))
630 h.lenb))
625
631
626 self.hunks += 1
632 self.hunks += 1
627
633
628 if self.missing:
634 if self.missing:
629 self.rej.append(h)
635 self.rej.append(h)
630 return -1
636 return -1
631
637
632 if self.exists and self.create:
638 if self.exists and self.create:
633 if self.copysource:
639 if self.copysource:
634 self.ui.warn(_("cannot create %s: destination already "
640 self.ui.warn(_("cannot create %s: destination already "
635 "exists\n" % self.fname))
641 "exists\n" % self.fname))
636 else:
642 else:
637 self.ui.warn(_("file %s already exists\n") % self.fname)
643 self.ui.warn(_("file %s already exists\n") % self.fname)
638 self.rej.append(h)
644 self.rej.append(h)
639 return -1
645 return -1
640
646
641 if isinstance(h, binhunk):
647 if isinstance(h, binhunk):
642 if self.remove:
648 if self.remove:
643 self.backend.unlink(self.fname)
649 self.backend.unlink(self.fname)
644 else:
650 else:
645 self.lines[:] = h.new()
651 self.lines[:] = h.new()
646 self.offset += len(h.new())
652 self.offset += len(h.new())
647 self.dirty = True
653 self.dirty = True
648 return 0
654 return 0
649
655
650 horig = h
656 horig = h
651 if (self.eolmode in ('crlf', 'lf')
657 if (self.eolmode in ('crlf', 'lf')
652 or self.eolmode == 'auto' and self.eol):
658 or self.eolmode == 'auto' and self.eol):
653 # If new eols are going to be normalized, then normalize
659 # If new eols are going to be normalized, then normalize
654 # hunk data before patching. Otherwise, preserve input
660 # hunk data before patching. Otherwise, preserve input
655 # line-endings.
661 # line-endings.
656 h = h.getnormalized()
662 h = h.getnormalized()
657
663
658 # fast case first, no offsets, no fuzz
664 # fast case first, no offsets, no fuzz
659 old = h.old()
665 old = h.old()
660 # patch starts counting at 1 unless we are adding the file
666 # patch starts counting at 1 unless we are adding the file
661 if h.starta == 0:
667 if h.starta == 0:
662 start = 0
668 start = 0
663 else:
669 else:
664 start = h.starta + self.offset - 1
670 start = h.starta + self.offset - 1
665 orig_start = start
671 orig_start = start
666 # if there's skew we want to emit the "(offset %d lines)" even
672 # if there's skew we want to emit the "(offset %d lines)" even
667 # when the hunk cleanly applies at start + skew, so skip the
673 # when the hunk cleanly applies at start + skew, so skip the
668 # fast case code
674 # fast case code
669 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
675 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
670 if self.remove:
676 if self.remove:
671 self.backend.unlink(self.fname)
677 self.backend.unlink(self.fname)
672 else:
678 else:
673 self.lines[start : start + h.lena] = h.new()
679 self.lines[start : start + h.lena] = h.new()
674 self.offset += h.lenb - h.lena
680 self.offset += h.lenb - h.lena
675 self.dirty = True
681 self.dirty = True
676 return 0
682 return 0
677
683
678 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
684 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
679 self.hash = {}
685 self.hash = {}
680 for x, s in enumerate(self.lines):
686 for x, s in enumerate(self.lines):
681 self.hash.setdefault(s, []).append(x)
687 self.hash.setdefault(s, []).append(x)
682 if h.hunk[-1][0] != ' ':
688 if h.hunk[-1][0] != ' ':
683 # if the hunk tried to put something at the bottom of the file
689 # if the hunk tried to put something at the bottom of the file
684 # override the start line and use eof here
690 # override the start line and use eof here
685 search_start = len(self.lines)
691 search_start = len(self.lines)
686 else:
692 else:
687 search_start = orig_start + self.skew
693 search_start = orig_start + self.skew
688
694
689 for fuzzlen in xrange(3):
695 for fuzzlen in xrange(3):
690 for toponly in [True, False]:
696 for toponly in [True, False]:
691 old = h.old(fuzzlen, toponly)
697 old = h.old(fuzzlen, toponly)
692
698
693 cand = self.findlines(old[0][1:], search_start)
699 cand = self.findlines(old[0][1:], search_start)
694 for l in cand:
700 for l in cand:
695 if diffhelpers.testhunk(old, self.lines, l) == 0:
701 if diffhelpers.testhunk(old, self.lines, l) == 0:
696 newlines = h.new(fuzzlen, toponly)
702 newlines = h.new(fuzzlen, toponly)
697 self.lines[l : l + len(old)] = newlines
703 self.lines[l : l + len(old)] = newlines
698 self.offset += len(newlines) - len(old)
704 self.offset += len(newlines) - len(old)
699 self.skew = l - orig_start
705 self.skew = l - orig_start
700 self.dirty = True
706 self.dirty = True
701 offset = l - orig_start - fuzzlen
707 offset = l - orig_start - fuzzlen
702 if fuzzlen:
708 if fuzzlen:
703 msg = _("Hunk #%d succeeded at %d "
709 msg = _("Hunk #%d succeeded at %d "
704 "with fuzz %d "
710 "with fuzz %d "
705 "(offset %d lines).\n")
711 "(offset %d lines).\n")
706 self.printfile(True)
712 self.printfile(True)
707 self.ui.warn(msg %
713 self.ui.warn(msg %
708 (h.number, l + 1, fuzzlen, offset))
714 (h.number, l + 1, fuzzlen, offset))
709 else:
715 else:
710 msg = _("Hunk #%d succeeded at %d "
716 msg = _("Hunk #%d succeeded at %d "
711 "(offset %d lines).\n")
717 "(offset %d lines).\n")
712 self.ui.note(msg % (h.number, l + 1, offset))
718 self.ui.note(msg % (h.number, l + 1, offset))
713 return fuzzlen
719 return fuzzlen
714 self.printfile(True)
720 self.printfile(True)
715 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
721 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
716 self.rej.append(horig)
722 self.rej.append(horig)
717 return -1
723 return -1
718
724
719 def close(self):
725 def close(self):
720 if self.dirty:
726 if self.dirty:
721 self.writelines(self.fname, self.lines, self.mode)
727 self.writelines(self.fname, self.lines, self.mode)
722 self.write_rej()
728 self.write_rej()
723 return len(self.rej)
729 return len(self.rej)
724
730
725 class hunk(object):
731 class hunk(object):
726 def __init__(self, desc, num, lr, context):
732 def __init__(self, desc, num, lr, context):
727 self.number = num
733 self.number = num
728 self.desc = desc
734 self.desc = desc
729 self.hunk = [desc]
735 self.hunk = [desc]
730 self.a = []
736 self.a = []
731 self.b = []
737 self.b = []
732 self.starta = self.lena = None
738 self.starta = self.lena = None
733 self.startb = self.lenb = None
739 self.startb = self.lenb = None
734 if lr is not None:
740 if lr is not None:
735 if context:
741 if context:
736 self.read_context_hunk(lr)
742 self.read_context_hunk(lr)
737 else:
743 else:
738 self.read_unified_hunk(lr)
744 self.read_unified_hunk(lr)
739
745
740 def getnormalized(self):
746 def getnormalized(self):
741 """Return a copy with line endings normalized to LF."""
747 """Return a copy with line endings normalized to LF."""
742
748
743 def normalize(lines):
749 def normalize(lines):
744 nlines = []
750 nlines = []
745 for line in lines:
751 for line in lines:
746 if line.endswith('\r\n'):
752 if line.endswith('\r\n'):
747 line = line[:-2] + '\n'
753 line = line[:-2] + '\n'
748 nlines.append(line)
754 nlines.append(line)
749 return nlines
755 return nlines
750
756
751 # Dummy object, it is rebuilt manually
757 # Dummy object, it is rebuilt manually
752 nh = hunk(self.desc, self.number, None, None)
758 nh = hunk(self.desc, self.number, None, None)
753 nh.number = self.number
759 nh.number = self.number
754 nh.desc = self.desc
760 nh.desc = self.desc
755 nh.hunk = self.hunk
761 nh.hunk = self.hunk
756 nh.a = normalize(self.a)
762 nh.a = normalize(self.a)
757 nh.b = normalize(self.b)
763 nh.b = normalize(self.b)
758 nh.starta = self.starta
764 nh.starta = self.starta
759 nh.startb = self.startb
765 nh.startb = self.startb
760 nh.lena = self.lena
766 nh.lena = self.lena
761 nh.lenb = self.lenb
767 nh.lenb = self.lenb
762 return nh
768 return nh
763
769
764 def read_unified_hunk(self, lr):
770 def read_unified_hunk(self, lr):
765 m = unidesc.match(self.desc)
771 m = unidesc.match(self.desc)
766 if not m:
772 if not m:
767 raise PatchError(_("bad hunk #%d") % self.number)
773 raise PatchError(_("bad hunk #%d") % self.number)
768 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
774 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
769 if self.lena is None:
775 if self.lena is None:
770 self.lena = 1
776 self.lena = 1
771 else:
777 else:
772 self.lena = int(self.lena)
778 self.lena = int(self.lena)
773 if self.lenb is None:
779 if self.lenb is None:
774 self.lenb = 1
780 self.lenb = 1
775 else:
781 else:
776 self.lenb = int(self.lenb)
782 self.lenb = int(self.lenb)
777 self.starta = int(self.starta)
783 self.starta = int(self.starta)
778 self.startb = int(self.startb)
784 self.startb = int(self.startb)
779 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
785 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
780 # if we hit eof before finishing out the hunk, the last line will
786 # if we hit eof before finishing out the hunk, the last line will
781 # be zero length. Lets try to fix it up.
787 # be zero length. Lets try to fix it up.
782 while len(self.hunk[-1]) == 0:
788 while len(self.hunk[-1]) == 0:
783 del self.hunk[-1]
789 del self.hunk[-1]
784 del self.a[-1]
790 del self.a[-1]
785 del self.b[-1]
791 del self.b[-1]
786 self.lena -= 1
792 self.lena -= 1
787 self.lenb -= 1
793 self.lenb -= 1
788 self._fixnewline(lr)
794 self._fixnewline(lr)
789
795
790 def read_context_hunk(self, lr):
796 def read_context_hunk(self, lr):
791 self.desc = lr.readline()
797 self.desc = lr.readline()
792 m = contextdesc.match(self.desc)
798 m = contextdesc.match(self.desc)
793 if not m:
799 if not m:
794 raise PatchError(_("bad hunk #%d") % self.number)
800 raise PatchError(_("bad hunk #%d") % self.number)
795 foo, self.starta, foo2, aend, foo3 = m.groups()
801 foo, self.starta, foo2, aend, foo3 = m.groups()
796 self.starta = int(self.starta)
802 self.starta = int(self.starta)
797 if aend is None:
803 if aend is None:
798 aend = self.starta
804 aend = self.starta
799 self.lena = int(aend) - self.starta
805 self.lena = int(aend) - self.starta
800 if self.starta:
806 if self.starta:
801 self.lena += 1
807 self.lena += 1
802 for x in xrange(self.lena):
808 for x in xrange(self.lena):
803 l = lr.readline()
809 l = lr.readline()
804 if l.startswith('---'):
810 if l.startswith('---'):
805 # lines addition, old block is empty
811 # lines addition, old block is empty
806 lr.push(l)
812 lr.push(l)
807 break
813 break
808 s = l[2:]
814 s = l[2:]
809 if l.startswith('- ') or l.startswith('! '):
815 if l.startswith('- ') or l.startswith('! '):
810 u = '-' + s
816 u = '-' + s
811 elif l.startswith(' '):
817 elif l.startswith(' '):
812 u = ' ' + s
818 u = ' ' + s
813 else:
819 else:
814 raise PatchError(_("bad hunk #%d old text line %d") %
820 raise PatchError(_("bad hunk #%d old text line %d") %
815 (self.number, x))
821 (self.number, x))
816 self.a.append(u)
822 self.a.append(u)
817 self.hunk.append(u)
823 self.hunk.append(u)
818
824
819 l = lr.readline()
825 l = lr.readline()
820 if l.startswith('\ '):
826 if l.startswith('\ '):
821 s = self.a[-1][:-1]
827 s = self.a[-1][:-1]
822 self.a[-1] = s
828 self.a[-1] = s
823 self.hunk[-1] = s
829 self.hunk[-1] = s
824 l = lr.readline()
830 l = lr.readline()
825 m = contextdesc.match(l)
831 m = contextdesc.match(l)
826 if not m:
832 if not m:
827 raise PatchError(_("bad hunk #%d") % self.number)
833 raise PatchError(_("bad hunk #%d") % self.number)
828 foo, self.startb, foo2, bend, foo3 = m.groups()
834 foo, self.startb, foo2, bend, foo3 = m.groups()
829 self.startb = int(self.startb)
835 self.startb = int(self.startb)
830 if bend is None:
836 if bend is None:
831 bend = self.startb
837 bend = self.startb
832 self.lenb = int(bend) - self.startb
838 self.lenb = int(bend) - self.startb
833 if self.startb:
839 if self.startb:
834 self.lenb += 1
840 self.lenb += 1
835 hunki = 1
841 hunki = 1
836 for x in xrange(self.lenb):
842 for x in xrange(self.lenb):
837 l = lr.readline()
843 l = lr.readline()
838 if l.startswith('\ '):
844 if l.startswith('\ '):
839 # XXX: the only way to hit this is with an invalid line range.
845 # XXX: the only way to hit this is with an invalid line range.
840 # The no-eol marker is not counted in the line range, but I
846 # The no-eol marker is not counted in the line range, but I
841 # guess there are diff(1) out there which behave differently.
847 # guess there are diff(1) out there which behave differently.
842 s = self.b[-1][:-1]
848 s = self.b[-1][:-1]
843 self.b[-1] = s
849 self.b[-1] = s
844 self.hunk[hunki - 1] = s
850 self.hunk[hunki - 1] = s
845 continue
851 continue
846 if not l:
852 if not l:
847 # line deletions, new block is empty and we hit EOF
853 # line deletions, new block is empty and we hit EOF
848 lr.push(l)
854 lr.push(l)
849 break
855 break
850 s = l[2:]
856 s = l[2:]
851 if l.startswith('+ ') or l.startswith('! '):
857 if l.startswith('+ ') or l.startswith('! '):
852 u = '+' + s
858 u = '+' + s
853 elif l.startswith(' '):
859 elif l.startswith(' '):
854 u = ' ' + s
860 u = ' ' + s
855 elif len(self.b) == 0:
861 elif len(self.b) == 0:
856 # line deletions, new block is empty
862 # line deletions, new block is empty
857 lr.push(l)
863 lr.push(l)
858 break
864 break
859 else:
865 else:
860 raise PatchError(_("bad hunk #%d old text line %d") %
866 raise PatchError(_("bad hunk #%d old text line %d") %
861 (self.number, x))
867 (self.number, x))
862 self.b.append(s)
868 self.b.append(s)
863 while True:
869 while True:
864 if hunki >= len(self.hunk):
870 if hunki >= len(self.hunk):
865 h = ""
871 h = ""
866 else:
872 else:
867 h = self.hunk[hunki]
873 h = self.hunk[hunki]
868 hunki += 1
874 hunki += 1
869 if h == u:
875 if h == u:
870 break
876 break
871 elif h.startswith('-'):
877 elif h.startswith('-'):
872 continue
878 continue
873 else:
879 else:
874 self.hunk.insert(hunki - 1, u)
880 self.hunk.insert(hunki - 1, u)
875 break
881 break
876
882
877 if not self.a:
883 if not self.a:
878 # this happens when lines were only added to the hunk
884 # this happens when lines were only added to the hunk
879 for x in self.hunk:
885 for x in self.hunk:
880 if x.startswith('-') or x.startswith(' '):
886 if x.startswith('-') or x.startswith(' '):
881 self.a.append(x)
887 self.a.append(x)
882 if not self.b:
888 if not self.b:
883 # this happens when lines were only deleted from the hunk
889 # this happens when lines were only deleted from the hunk
884 for x in self.hunk:
890 for x in self.hunk:
885 if x.startswith('+') or x.startswith(' '):
891 if x.startswith('+') or x.startswith(' '):
886 self.b.append(x[1:])
892 self.b.append(x[1:])
887 # @@ -start,len +start,len @@
893 # @@ -start,len +start,len @@
888 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
894 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
889 self.startb, self.lenb)
895 self.startb, self.lenb)
890 self.hunk[0] = self.desc
896 self.hunk[0] = self.desc
891 self._fixnewline(lr)
897 self._fixnewline(lr)
892
898
893 def _fixnewline(self, lr):
899 def _fixnewline(self, lr):
894 l = lr.readline()
900 l = lr.readline()
895 if l.startswith('\ '):
901 if l.startswith('\ '):
896 diffhelpers.fix_newline(self.hunk, self.a, self.b)
902 diffhelpers.fix_newline(self.hunk, self.a, self.b)
897 else:
903 else:
898 lr.push(l)
904 lr.push(l)
899
905
900 def complete(self):
906 def complete(self):
901 return len(self.a) == self.lena and len(self.b) == self.lenb
907 return len(self.a) == self.lena and len(self.b) == self.lenb
902
908
903 def fuzzit(self, l, fuzz, toponly):
909 def fuzzit(self, l, fuzz, toponly):
904 # this removes context lines from the top and bottom of list 'l'. It
910 # this removes context lines from the top and bottom of list 'l'. It
905 # checks the hunk to make sure only context lines are removed, and then
911 # checks the hunk to make sure only context lines are removed, and then
906 # returns a new shortened list of lines.
912 # returns a new shortened list of lines.
907 fuzz = min(fuzz, len(l)-1)
913 fuzz = min(fuzz, len(l)-1)
908 if fuzz:
914 if fuzz:
909 top = 0
915 top = 0
910 bot = 0
916 bot = 0
911 hlen = len(self.hunk)
917 hlen = len(self.hunk)
912 for x in xrange(hlen - 1):
918 for x in xrange(hlen - 1):
913 # the hunk starts with the @@ line, so use x+1
919 # the hunk starts with the @@ line, so use x+1
914 if self.hunk[x + 1][0] == ' ':
920 if self.hunk[x + 1][0] == ' ':
915 top += 1
921 top += 1
916 else:
922 else:
917 break
923 break
918 if not toponly:
924 if not toponly:
919 for x in xrange(hlen - 1):
925 for x in xrange(hlen - 1):
920 if self.hunk[hlen - bot - 1][0] == ' ':
926 if self.hunk[hlen - bot - 1][0] == ' ':
921 bot += 1
927 bot += 1
922 else:
928 else:
923 break
929 break
924
930
925 # top and bot now count context in the hunk
931 # top and bot now count context in the hunk
926 # adjust them if either one is short
932 # adjust them if either one is short
927 context = max(top, bot, 3)
933 context = max(top, bot, 3)
928 if bot < context:
934 if bot < context:
929 bot = max(0, fuzz - (context - bot))
935 bot = max(0, fuzz - (context - bot))
930 else:
936 else:
931 bot = min(fuzz, bot)
937 bot = min(fuzz, bot)
932 if top < context:
938 if top < context:
933 top = max(0, fuzz - (context - top))
939 top = max(0, fuzz - (context - top))
934 else:
940 else:
935 top = min(fuzz, top)
941 top = min(fuzz, top)
936
942
937 return l[top:len(l)-bot]
943 return l[top:len(l)-bot]
938 return l
944 return l
939
945
940 def old(self, fuzz=0, toponly=False):
946 def old(self, fuzz=0, toponly=False):
941 return self.fuzzit(self.a, fuzz, toponly)
947 return self.fuzzit(self.a, fuzz, toponly)
942
948
943 def new(self, fuzz=0, toponly=False):
949 def new(self, fuzz=0, toponly=False):
944 return self.fuzzit(self.b, fuzz, toponly)
950 return self.fuzzit(self.b, fuzz, toponly)
945
951
946 class binhunk:
952 class binhunk:
947 'A binary patch file. Only understands literals so far.'
953 'A binary patch file. Only understands literals so far.'
948 def __init__(self, lr):
954 def __init__(self, lr):
949 self.text = None
955 self.text = None
950 self.hunk = ['GIT binary patch\n']
956 self.hunk = ['GIT binary patch\n']
951 self._read(lr)
957 self._read(lr)
952
958
953 def complete(self):
959 def complete(self):
954 return self.text is not None
960 return self.text is not None
955
961
956 def new(self):
962 def new(self):
957 return [self.text]
963 return [self.text]
958
964
959 def _read(self, lr):
965 def _read(self, lr):
960 line = lr.readline()
966 line = lr.readline()
961 self.hunk.append(line)
967 self.hunk.append(line)
962 while line and not line.startswith('literal '):
968 while line and not line.startswith('literal '):
963 line = lr.readline()
969 line = lr.readline()
964 self.hunk.append(line)
970 self.hunk.append(line)
965 if not line:
971 if not line:
966 raise PatchError(_('could not extract binary patch'))
972 raise PatchError(_('could not extract binary patch'))
967 size = int(line[8:].rstrip())
973 size = int(line[8:].rstrip())
968 dec = []
974 dec = []
969 line = lr.readline()
975 line = lr.readline()
970 self.hunk.append(line)
976 self.hunk.append(line)
971 while len(line) > 1:
977 while len(line) > 1:
972 l = line[0]
978 l = line[0]
973 if l <= 'Z' and l >= 'A':
979 if l <= 'Z' and l >= 'A':
974 l = ord(l) - ord('A') + 1
980 l = ord(l) - ord('A') + 1
975 else:
981 else:
976 l = ord(l) - ord('a') + 27
982 l = ord(l) - ord('a') + 27
977 dec.append(base85.b85decode(line[1:-1])[:l])
983 dec.append(base85.b85decode(line[1:-1])[:l])
978 line = lr.readline()
984 line = lr.readline()
979 self.hunk.append(line)
985 self.hunk.append(line)
980 text = zlib.decompress(''.join(dec))
986 text = zlib.decompress(''.join(dec))
981 if len(text) != size:
987 if len(text) != size:
982 raise PatchError(_('binary patch is %d bytes, not %d') %
988 raise PatchError(_('binary patch is %d bytes, not %d') %
983 len(text), size)
989 len(text), size)
984 self.text = text
990 self.text = text
985
991
986 def parsefilename(str):
992 def parsefilename(str):
987 # --- filename \t|space stuff
993 # --- filename \t|space stuff
988 s = str[4:].rstrip('\r\n')
994 s = str[4:].rstrip('\r\n')
989 i = s.find('\t')
995 i = s.find('\t')
990 if i < 0:
996 if i < 0:
991 i = s.find(' ')
997 i = s.find(' ')
992 if i < 0:
998 if i < 0:
993 return s
999 return s
994 return s[:i]
1000 return s[:i]
995
1001
996 def pathstrip(path, strip):
1002 def pathstrip(path, strip):
997 pathlen = len(path)
1003 pathlen = len(path)
998 i = 0
1004 i = 0
999 if strip == 0:
1005 if strip == 0:
1000 return '', path.rstrip()
1006 return '', path.rstrip()
1001 count = strip
1007 count = strip
1002 while count > 0:
1008 while count > 0:
1003 i = path.find('/', i)
1009 i = path.find('/', i)
1004 if i == -1:
1010 if i == -1:
1005 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1011 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1006 (count, strip, path))
1012 (count, strip, path))
1007 i += 1
1013 i += 1
1008 # consume '//' in the path
1014 # consume '//' in the path
1009 while i < pathlen - 1 and path[i] == '/':
1015 while i < pathlen - 1 and path[i] == '/':
1010 i += 1
1016 i += 1
1011 count -= 1
1017 count -= 1
1012 return path[:i].lstrip(), path[i:].rstrip()
1018 return path[:i].lstrip(), path[i:].rstrip()
1013
1019
1014 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1020 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1015 if gp:
1021 if gp:
1016 # Git patches do not play games. Excluding copies from the
1022 # Git patches do not play games. Excluding copies from the
1017 # following heuristic avoids a lot of confusion
1023 # following heuristic avoids a lot of confusion
1018 fname = pathstrip(gp.path, strip - 1)[1]
1024 fname = pathstrip(gp.path, strip - 1)[1]
1019 create = gp.op in ('ADD', 'COPY', 'RENAME')
1025 create = gp.op in ('ADD', 'COPY', 'RENAME')
1020 remove = gp.op == 'DELETE'
1026 remove = gp.op == 'DELETE'
1021 missing = not create and not backend.exists(fname)
1027 missing = not create and not backend.exists(fname)
1022 return fname, create, remove
1028 return fname, create, remove
1023 nulla = afile_orig == "/dev/null"
1029 nulla = afile_orig == "/dev/null"
1024 nullb = bfile_orig == "/dev/null"
1030 nullb = bfile_orig == "/dev/null"
1025 create = nulla and hunk.starta == 0 and hunk.lena == 0
1031 create = nulla and hunk.starta == 0 and hunk.lena == 0
1026 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1032 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1027 abase, afile = pathstrip(afile_orig, strip)
1033 abase, afile = pathstrip(afile_orig, strip)
1028 gooda = not nulla and backend.exists(afile)
1034 gooda = not nulla and backend.exists(afile)
1029 bbase, bfile = pathstrip(bfile_orig, strip)
1035 bbase, bfile = pathstrip(bfile_orig, strip)
1030 if afile == bfile:
1036 if afile == bfile:
1031 goodb = gooda
1037 goodb = gooda
1032 else:
1038 else:
1033 goodb = not nullb and backend.exists(bfile)
1039 goodb = not nullb and backend.exists(bfile)
1034 missing = not goodb and not gooda and not create
1040 missing = not goodb and not gooda and not create
1035
1041
1036 # some diff programs apparently produce patches where the afile is
1042 # some diff programs apparently produce patches where the afile is
1037 # not /dev/null, but afile starts with bfile
1043 # not /dev/null, but afile starts with bfile
1038 abasedir = afile[:afile.rfind('/') + 1]
1044 abasedir = afile[:afile.rfind('/') + 1]
1039 bbasedir = bfile[:bfile.rfind('/') + 1]
1045 bbasedir = bfile[:bfile.rfind('/') + 1]
1040 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1046 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1041 and hunk.starta == 0 and hunk.lena == 0):
1047 and hunk.starta == 0 and hunk.lena == 0):
1042 create = True
1048 create = True
1043 missing = False
1049 missing = False
1044
1050
1045 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1051 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1046 # diff is between a file and its backup. In this case, the original
1052 # diff is between a file and its backup. In this case, the original
1047 # file should be patched (see original mpatch code).
1053 # file should be patched (see original mpatch code).
1048 isbackup = (abase == bbase and bfile.startswith(afile))
1054 isbackup = (abase == bbase and bfile.startswith(afile))
1049 fname = None
1055 fname = None
1050 if not missing:
1056 if not missing:
1051 if gooda and goodb:
1057 if gooda and goodb:
1052 fname = isbackup and afile or bfile
1058 fname = isbackup and afile or bfile
1053 elif gooda:
1059 elif gooda:
1054 fname = afile
1060 fname = afile
1055
1061
1056 if not fname:
1062 if not fname:
1057 if not nullb:
1063 if not nullb:
1058 fname = isbackup and afile or bfile
1064 fname = isbackup and afile or bfile
1059 elif not nulla:
1065 elif not nulla:
1060 fname = afile
1066 fname = afile
1061 else:
1067 else:
1062 raise PatchError(_("undefined source and destination files"))
1068 raise PatchError(_("undefined source and destination files"))
1063
1069
1064 return fname, create, remove
1070 return fname, create, remove
1065
1071
1066 def scangitpatch(lr, firstline):
1072 def scangitpatch(lr, firstline):
1067 """
1073 """
1068 Git patches can emit:
1074 Git patches can emit:
1069 - rename a to b
1075 - rename a to b
1070 - change b
1076 - change b
1071 - copy a to c
1077 - copy a to c
1072 - change c
1078 - change c
1073
1079
1074 We cannot apply this sequence as-is, the renamed 'a' could not be
1080 We cannot apply this sequence as-is, the renamed 'a' could not be
1075 found for it would have been renamed already. And we cannot copy
1081 found for it would have been renamed already. And we cannot copy
1076 from 'b' instead because 'b' would have been changed already. So
1082 from 'b' instead because 'b' would have been changed already. So
1077 we scan the git patch for copy and rename commands so we can
1083 we scan the git patch for copy and rename commands so we can
1078 perform the copies ahead of time.
1084 perform the copies ahead of time.
1079 """
1085 """
1080 pos = 0
1086 pos = 0
1081 try:
1087 try:
1082 pos = lr.fp.tell()
1088 pos = lr.fp.tell()
1083 fp = lr.fp
1089 fp = lr.fp
1084 except IOError:
1090 except IOError:
1085 fp = cStringIO.StringIO(lr.fp.read())
1091 fp = cStringIO.StringIO(lr.fp.read())
1086 gitlr = linereader(fp)
1092 gitlr = linereader(fp)
1087 gitlr.push(firstline)
1093 gitlr.push(firstline)
1088 gitpatches = readgitpatch(gitlr)
1094 gitpatches = readgitpatch(gitlr)
1089 fp.seek(pos)
1095 fp.seek(pos)
1090 return gitpatches
1096 return gitpatches
1091
1097
1092 def iterhunks(fp):
1098 def iterhunks(fp):
1093 """Read a patch and yield the following events:
1099 """Read a patch and yield the following events:
1094 - ("file", afile, bfile, firsthunk): select a new target file.
1100 - ("file", afile, bfile, firsthunk): select a new target file.
1095 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1101 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1096 "file" event.
1102 "file" event.
1097 - ("git", gitchanges): current diff is in git format, gitchanges
1103 - ("git", gitchanges): current diff is in git format, gitchanges
1098 maps filenames to gitpatch records. Unique event.
1104 maps filenames to gitpatch records. Unique event.
1099 """
1105 """
1100 afile = ""
1106 afile = ""
1101 bfile = ""
1107 bfile = ""
1102 state = None
1108 state = None
1103 hunknum = 0
1109 hunknum = 0
1104 emitfile = newfile = False
1110 emitfile = newfile = False
1105 gitpatches = None
1111 gitpatches = None
1106
1112
1107 # our states
1113 # our states
1108 BFILE = 1
1114 BFILE = 1
1109 context = None
1115 context = None
1110 lr = linereader(fp)
1116 lr = linereader(fp)
1111
1117
1112 while True:
1118 while True:
1113 x = lr.readline()
1119 x = lr.readline()
1114 if not x:
1120 if not x:
1115 break
1121 break
1116 if state == BFILE and (
1122 if state == BFILE and (
1117 (not context and x[0] == '@')
1123 (not context and x[0] == '@')
1118 or (context is not False and x.startswith('***************'))
1124 or (context is not False and x.startswith('***************'))
1119 or x.startswith('GIT binary patch')):
1125 or x.startswith('GIT binary patch')):
1120 gp = None
1126 gp = None
1121 if gitpatches and gitpatches[-1][0] == bfile:
1127 if gitpatches and gitpatches[-1][0] == bfile:
1122 gp = gitpatches.pop()[1]
1128 gp = gitpatches.pop()[1]
1123 if x.startswith('GIT binary patch'):
1129 if x.startswith('GIT binary patch'):
1124 h = binhunk(lr)
1130 h = binhunk(lr)
1125 else:
1131 else:
1126 if context is None and x.startswith('***************'):
1132 if context is None and x.startswith('***************'):
1127 context = True
1133 context = True
1128 h = hunk(x, hunknum + 1, lr, context)
1134 h = hunk(x, hunknum + 1, lr, context)
1129 hunknum += 1
1135 hunknum += 1
1130 if emitfile:
1136 if emitfile:
1131 emitfile = False
1137 emitfile = False
1132 yield 'file', (afile, bfile, h, gp)
1138 yield 'file', (afile, bfile, h, gp)
1133 yield 'hunk', h
1139 yield 'hunk', h
1134 elif x.startswith('diff --git'):
1140 elif x.startswith('diff --git'):
1135 m = gitre.match(x)
1141 m = gitre.match(x)
1136 if not m:
1142 if not m:
1137 continue
1143 continue
1138 if gitpatches is None:
1144 if gitpatches is None:
1139 # scan whole input for git metadata
1145 # scan whole input for git metadata
1140 gitpatches = [('b/' + gp.path, gp) for gp
1146 gitpatches = [('b/' + gp.path, gp) for gp
1141 in scangitpatch(lr, x)]
1147 in scangitpatch(lr, x)]
1142 yield 'git', [g[1] for g in gitpatches
1148 yield 'git', [g[1] for g in gitpatches
1143 if g[1].op in ('COPY', 'RENAME')]
1149 if g[1].op in ('COPY', 'RENAME')]
1144 gitpatches.reverse()
1150 gitpatches.reverse()
1145 afile = 'a/' + m.group(1)
1151 afile = 'a/' + m.group(1)
1146 bfile = 'b/' + m.group(2)
1152 bfile = 'b/' + m.group(2)
1147 while bfile != gitpatches[-1][0]:
1153 while bfile != gitpatches[-1][0]:
1148 gp = gitpatches.pop()[1]
1154 gp = gitpatches.pop()[1]
1149 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1155 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1150 gp = gitpatches[-1][1]
1156 gp = gitpatches[-1][1]
1151 # copy/rename + modify should modify target, not source
1157 # copy/rename + modify should modify target, not source
1152 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1158 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1153 afile = bfile
1159 afile = bfile
1154 newfile = True
1160 newfile = True
1155 elif x.startswith('---'):
1161 elif x.startswith('---'):
1156 # check for a unified diff
1162 # check for a unified diff
1157 l2 = lr.readline()
1163 l2 = lr.readline()
1158 if not l2.startswith('+++'):
1164 if not l2.startswith('+++'):
1159 lr.push(l2)
1165 lr.push(l2)
1160 continue
1166 continue
1161 newfile = True
1167 newfile = True
1162 context = False
1168 context = False
1163 afile = parsefilename(x)
1169 afile = parsefilename(x)
1164 bfile = parsefilename(l2)
1170 bfile = parsefilename(l2)
1165 elif x.startswith('***'):
1171 elif x.startswith('***'):
1166 # check for a context diff
1172 # check for a context diff
1167 l2 = lr.readline()
1173 l2 = lr.readline()
1168 if not l2.startswith('---'):
1174 if not l2.startswith('---'):
1169 lr.push(l2)
1175 lr.push(l2)
1170 continue
1176 continue
1171 l3 = lr.readline()
1177 l3 = lr.readline()
1172 lr.push(l3)
1178 lr.push(l3)
1173 if not l3.startswith("***************"):
1179 if not l3.startswith("***************"):
1174 lr.push(l2)
1180 lr.push(l2)
1175 continue
1181 continue
1176 newfile = True
1182 newfile = True
1177 context = True
1183 context = True
1178 afile = parsefilename(x)
1184 afile = parsefilename(x)
1179 bfile = parsefilename(l2)
1185 bfile = parsefilename(l2)
1180
1186
1181 if newfile:
1187 if newfile:
1182 newfile = False
1188 newfile = False
1183 emitfile = True
1189 emitfile = True
1184 state = BFILE
1190 state = BFILE
1185 hunknum = 0
1191 hunknum = 0
1186
1192
1187 while gitpatches:
1193 while gitpatches:
1188 gp = gitpatches.pop()[1]
1194 gp = gitpatches.pop()[1]
1189 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1195 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1190
1196
1191 def applydiff(ui, fp, changed, backend, store, strip=1, eolmode='strict'):
1197 def applydiff(ui, fp, changed, backend, store, strip=1, eolmode='strict'):
1192 """Reads a patch from fp and tries to apply it.
1198 """Reads a patch from fp and tries to apply it.
1193
1199
1194 The dict 'changed' is filled in with all of the filenames changed
1200 The dict 'changed' is filled in with all of the filenames changed
1195 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1201 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1196 found and 1 if there was any fuzz.
1202 found and 1 if there was any fuzz.
1197
1203
1198 If 'eolmode' is 'strict', the patch content and patched file are
1204 If 'eolmode' is 'strict', the patch content and patched file are
1199 read in binary mode. Otherwise, line endings are ignored when
1205 read in binary mode. Otherwise, line endings are ignored when
1200 patching then normalized according to 'eolmode'.
1206 patching then normalized according to 'eolmode'.
1201 """
1207 """
1202 return _applydiff(ui, fp, patchfile, backend, store, changed, strip=strip,
1208 return _applydiff(ui, fp, patchfile, backend, store, changed, strip=strip,
1203 eolmode=eolmode)
1209 eolmode=eolmode)
1204
1210
1205 def _applydiff(ui, fp, patcher, backend, store, changed, strip=1,
1211 def _applydiff(ui, fp, patcher, backend, store, changed, strip=1,
1206 eolmode='strict'):
1212 eolmode='strict'):
1207
1213
1208 def pstrip(p):
1214 def pstrip(p):
1209 return pathstrip(p, strip - 1)[1]
1215 return pathstrip(p, strip - 1)[1]
1210
1216
1211 rejects = 0
1217 rejects = 0
1212 err = 0
1218 err = 0
1213 current_file = None
1219 current_file = None
1214
1220
1215 for state, values in iterhunks(fp):
1221 for state, values in iterhunks(fp):
1216 if state == 'hunk':
1222 if state == 'hunk':
1217 if not current_file:
1223 if not current_file:
1218 continue
1224 continue
1219 ret = current_file.apply(values)
1225 ret = current_file.apply(values)
1220 if ret >= 0:
1226 if ret >= 0:
1221 changed.setdefault(current_file.fname, None)
1227 changed.setdefault(current_file.fname, None)
1222 if ret > 0:
1228 if ret > 0:
1223 err = 1
1229 err = 1
1224 elif state == 'file':
1230 elif state == 'file':
1225 if current_file:
1231 if current_file:
1226 rejects += current_file.close()
1232 rejects += current_file.close()
1227 current_file = None
1233 current_file = None
1228 afile, bfile, first_hunk, gp = values
1234 afile, bfile, first_hunk, gp = values
1229 copysource = None
1235 copysource = None
1230 if gp:
1236 if gp:
1231 path = pstrip(gp.path)
1237 path = pstrip(gp.path)
1232 if gp.oldpath:
1238 if gp.oldpath:
1233 copysource = pstrip(gp.oldpath)
1239 copysource = pstrip(gp.oldpath)
1234 changed[path] = gp
1240 changed[path] = gp
1235 if gp.op == 'DELETE':
1241 if gp.op == 'DELETE':
1236 backend.unlink(path)
1242 backend.unlink(path)
1237 continue
1243 continue
1238 if gp.op == 'RENAME':
1244 if gp.op == 'RENAME':
1239 backend.unlink(copysource)
1245 backend.unlink(copysource)
1240 if not first_hunk:
1246 if not first_hunk:
1241 data, mode = None, None
1247 data, mode = None, None
1242 if gp.op in ('RENAME', 'COPY'):
1248 if gp.op in ('RENAME', 'COPY'):
1243 data, mode = store.getfile(copysource)
1249 data, mode = store.getfile(copysource)
1244 if gp.mode:
1250 if gp.mode:
1245 mode = gp.mode
1251 mode = gp.mode
1246 if gp.op == 'ADD':
1252 if gp.op == 'ADD':
1247 # Added files without content have no hunk and
1253 # Added files without content have no hunk and
1248 # must be created
1254 # must be created
1249 data = ''
1255 data = ''
1250 if data or mode:
1256 if data or mode:
1251 if (gp.op in ('ADD', 'RENAME', 'COPY')
1257 if (gp.op in ('ADD', 'RENAME', 'COPY')
1252 and backend.exists(path)):
1258 and backend.exists(path)):
1253 raise PatchError(_("cannot create %s: destination "
1259 raise PatchError(_("cannot create %s: destination "
1254 "already exists") % path)
1260 "already exists") % path)
1255 backend.setfile(path, data, mode, copysource)
1261 backend.setfile(path, data, mode, copysource)
1256 if not first_hunk:
1262 if not first_hunk:
1257 continue
1263 continue
1258 try:
1264 try:
1259 mode = gp and gp.mode or None
1265 mode = gp and gp.mode or None
1260 current_file, create, remove = selectfile(
1266 current_file, create, remove = selectfile(
1261 backend, afile, bfile, first_hunk, strip, gp)
1267 backend, afile, bfile, first_hunk, strip, gp)
1262 current_file = patcher(ui, current_file, backend, store, mode,
1268 current_file = patcher(ui, current_file, backend, store, mode,
1263 create, remove, eolmode=eolmode,
1269 create, remove, eolmode=eolmode,
1264 copysource=copysource)
1270 copysource=copysource)
1265 except PatchError, inst:
1271 except PatchError, inst:
1266 ui.warn(str(inst) + '\n')
1272 ui.warn(str(inst) + '\n')
1267 current_file = None
1273 current_file = None
1268 rejects += 1
1274 rejects += 1
1269 continue
1275 continue
1270 elif state == 'git':
1276 elif state == 'git':
1271 for gp in values:
1277 for gp in values:
1272 path = pstrip(gp.oldpath)
1278 path = pstrip(gp.oldpath)
1273 data, mode = backend.getfile(path)
1279 data, mode = backend.getfile(path)
1274 store.setfile(path, data, mode)
1280 store.setfile(path, data, mode)
1275 else:
1281 else:
1276 raise util.Abort(_('unsupported parser state: %s') % state)
1282 raise util.Abort(_('unsupported parser state: %s') % state)
1277
1283
1278 if current_file:
1284 if current_file:
1279 rejects += current_file.close()
1285 rejects += current_file.close()
1280
1286
1281 if rejects:
1287 if rejects:
1282 return -1
1288 return -1
1283 return err
1289 return err
1284
1290
1285 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1291 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1286 similarity):
1292 similarity):
1287 """use <patcher> to apply <patchname> to the working directory.
1293 """use <patcher> to apply <patchname> to the working directory.
1288 returns whether patch was applied with fuzz factor."""
1294 returns whether patch was applied with fuzz factor."""
1289
1295
1290 fuzz = False
1296 fuzz = False
1291 args = []
1297 args = []
1292 cwd = repo.root
1298 cwd = repo.root
1293 if cwd:
1299 if cwd:
1294 args.append('-d %s' % util.shellquote(cwd))
1300 args.append('-d %s' % util.shellquote(cwd))
1295 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1301 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1296 util.shellquote(patchname)))
1302 util.shellquote(patchname)))
1297 try:
1303 try:
1298 for line in fp:
1304 for line in fp:
1299 line = line.rstrip()
1305 line = line.rstrip()
1300 ui.note(line + '\n')
1306 ui.note(line + '\n')
1301 if line.startswith('patching file '):
1307 if line.startswith('patching file '):
1302 pf = util.parsepatchoutput(line)
1308 pf = util.parsepatchoutput(line)
1303 printed_file = False
1309 printed_file = False
1304 files.setdefault(pf, None)
1310 files.setdefault(pf, None)
1305 elif line.find('with fuzz') >= 0:
1311 elif line.find('with fuzz') >= 0:
1306 fuzz = True
1312 fuzz = True
1307 if not printed_file:
1313 if not printed_file:
1308 ui.warn(pf + '\n')
1314 ui.warn(pf + '\n')
1309 printed_file = True
1315 printed_file = True
1310 ui.warn(line + '\n')
1316 ui.warn(line + '\n')
1311 elif line.find('saving rejects to file') >= 0:
1317 elif line.find('saving rejects to file') >= 0:
1312 ui.warn(line + '\n')
1318 ui.warn(line + '\n')
1313 elif line.find('FAILED') >= 0:
1319 elif line.find('FAILED') >= 0:
1314 if not printed_file:
1320 if not printed_file:
1315 ui.warn(pf + '\n')
1321 ui.warn(pf + '\n')
1316 printed_file = True
1322 printed_file = True
1317 ui.warn(line + '\n')
1323 ui.warn(line + '\n')
1318 finally:
1324 finally:
1319 if files:
1325 if files:
1320 cfiles = list(files)
1326 cfiles = list(files)
1321 cwd = repo.getcwd()
1327 cwd = repo.getcwd()
1322 if cwd:
1328 if cwd:
1323 cfiles = [util.pathto(repo.root, cwd, f)
1329 cfiles = [util.pathto(repo.root, cwd, f)
1324 for f in cfile]
1330 for f in cfile]
1325 scmutil.addremove(repo, cfiles, similarity=similarity)
1331 scmutil.addremove(repo, cfiles, similarity=similarity)
1326 code = fp.close()
1332 code = fp.close()
1327 if code:
1333 if code:
1328 raise PatchError(_("patch command failed: %s") %
1334 raise PatchError(_("patch command failed: %s") %
1329 util.explainexit(code)[0])
1335 util.explainexit(code)[0])
1330 return fuzz
1336 return fuzz
1331
1337
1332 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1338 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1333 similarity=0):
1339 similarity=0):
1334 """use builtin patch to apply <patchobj> to the working directory.
1340 """use builtin patch to apply <patchobj> to the working directory.
1335 returns whether patch was applied with fuzz factor."""
1341 returns whether patch was applied with fuzz factor."""
1336
1342
1337 if files is None:
1343 if files is None:
1338 files = {}
1344 files = {}
1339 if eolmode is None:
1345 if eolmode is None:
1340 eolmode = ui.config('patch', 'eol', 'strict')
1346 eolmode = ui.config('patch', 'eol', 'strict')
1341 if eolmode.lower() not in eolmodes:
1347 if eolmode.lower() not in eolmodes:
1342 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1348 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1343 eolmode = eolmode.lower()
1349 eolmode = eolmode.lower()
1344
1350
1345 store = filestore()
1351 store = filestore()
1346 backend = workingbackend(ui, repo, similarity)
1352 backend = workingbackend(ui, repo, similarity)
1347 try:
1353 try:
1348 fp = open(patchobj, 'rb')
1354 fp = open(patchobj, 'rb')
1349 except TypeError:
1355 except TypeError:
1350 fp = patchobj
1356 fp = patchobj
1351 try:
1357 try:
1352 ret = applydiff(ui, fp, files, backend, store, strip=strip,
1358 ret = applydiff(ui, fp, files, backend, store, strip=strip,
1353 eolmode=eolmode)
1359 eolmode=eolmode)
1354 finally:
1360 finally:
1355 if fp != patchobj:
1361 if fp != patchobj:
1356 fp.close()
1362 fp.close()
1357 files.update(dict.fromkeys(backend.close()))
1363 files.update(dict.fromkeys(backend.close()))
1358 store.close()
1364 store.close()
1359 if ret < 0:
1365 if ret < 0:
1360 raise PatchError(_('patch failed to apply'))
1366 raise PatchError(_('patch failed to apply'))
1361 return ret > 0
1367 return ret > 0
1362
1368
1363 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1369 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1364 similarity=0):
1370 similarity=0):
1365 """Apply <patchname> to the working directory.
1371 """Apply <patchname> to the working directory.
1366
1372
1367 'eolmode' specifies how end of lines should be handled. It can be:
1373 'eolmode' specifies how end of lines should be handled. It can be:
1368 - 'strict': inputs are read in binary mode, EOLs are preserved
1374 - 'strict': inputs are read in binary mode, EOLs are preserved
1369 - 'crlf': EOLs are ignored when patching and reset to CRLF
1375 - 'crlf': EOLs are ignored when patching and reset to CRLF
1370 - 'lf': EOLs are ignored when patching and reset to LF
1376 - 'lf': EOLs are ignored when patching and reset to LF
1371 - None: get it from user settings, default to 'strict'
1377 - None: get it from user settings, default to 'strict'
1372 'eolmode' is ignored when using an external patcher program.
1378 'eolmode' is ignored when using an external patcher program.
1373
1379
1374 Returns whether patch was applied with fuzz factor.
1380 Returns whether patch was applied with fuzz factor.
1375 """
1381 """
1376 patcher = ui.config('ui', 'patch')
1382 patcher = ui.config('ui', 'patch')
1377 if files is None:
1383 if files is None:
1378 files = {}
1384 files = {}
1379 try:
1385 try:
1380 if patcher:
1386 if patcher:
1381 return _externalpatch(ui, repo, patcher, patchname, strip,
1387 return _externalpatch(ui, repo, patcher, patchname, strip,
1382 files, similarity)
1388 files, similarity)
1383 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1389 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1384 similarity)
1390 similarity)
1385 except PatchError, err:
1391 except PatchError, err:
1386 raise util.Abort(str(err))
1392 raise util.Abort(str(err))
1387
1393
1388 def changedfiles(ui, repo, patchpath, strip=1):
1394 def changedfiles(ui, repo, patchpath, strip=1):
1389 backend = fsbackend(ui, repo.root)
1395 backend = fsbackend(ui, repo.root)
1390 fp = open(patchpath, 'rb')
1396 fp = open(patchpath, 'rb')
1391 try:
1397 try:
1392 changed = set()
1398 changed = set()
1393 for state, values in iterhunks(fp):
1399 for state, values in iterhunks(fp):
1394 if state == 'file':
1400 if state == 'file':
1395 afile, bfile, first_hunk, gp = values
1401 afile, bfile, first_hunk, gp = values
1396 if gp:
1402 if gp:
1397 changed.add(pathstrip(gp.path, strip - 1)[1])
1403 changed.add(pathstrip(gp.path, strip - 1)[1])
1398 if gp.op == 'RENAME':
1404 if gp.op == 'RENAME':
1399 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1405 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1400 if not first_hunk:
1406 if not first_hunk:
1401 continue
1407 continue
1402 current_file, create, remove = selectfile(
1408 current_file, create, remove = selectfile(
1403 backend, afile, bfile, first_hunk, strip, gp)
1409 backend, afile, bfile, first_hunk, strip, gp)
1404 changed.add(current_file)
1410 changed.add(current_file)
1405 elif state not in ('hunk', 'git'):
1411 elif state not in ('hunk', 'git'):
1406 raise util.Abort(_('unsupported parser state: %s') % state)
1412 raise util.Abort(_('unsupported parser state: %s') % state)
1407 return changed
1413 return changed
1408 finally:
1414 finally:
1409 fp.close()
1415 fp.close()
1410
1416
1411 def b85diff(to, tn):
1417 def b85diff(to, tn):
1412 '''print base85-encoded binary diff'''
1418 '''print base85-encoded binary diff'''
1413 def gitindex(text):
1419 def gitindex(text):
1414 if not text:
1420 if not text:
1415 return hex(nullid)
1421 return hex(nullid)
1416 l = len(text)
1422 l = len(text)
1417 s = util.sha1('blob %d\0' % l)
1423 s = util.sha1('blob %d\0' % l)
1418 s.update(text)
1424 s.update(text)
1419 return s.hexdigest()
1425 return s.hexdigest()
1420
1426
1421 def fmtline(line):
1427 def fmtline(line):
1422 l = len(line)
1428 l = len(line)
1423 if l <= 26:
1429 if l <= 26:
1424 l = chr(ord('A') + l - 1)
1430 l = chr(ord('A') + l - 1)
1425 else:
1431 else:
1426 l = chr(l - 26 + ord('a') - 1)
1432 l = chr(l - 26 + ord('a') - 1)
1427 return '%c%s\n' % (l, base85.b85encode(line, True))
1433 return '%c%s\n' % (l, base85.b85encode(line, True))
1428
1434
1429 def chunk(text, csize=52):
1435 def chunk(text, csize=52):
1430 l = len(text)
1436 l = len(text)
1431 i = 0
1437 i = 0
1432 while i < l:
1438 while i < l:
1433 yield text[i:i + csize]
1439 yield text[i:i + csize]
1434 i += csize
1440 i += csize
1435
1441
1436 tohash = gitindex(to)
1442 tohash = gitindex(to)
1437 tnhash = gitindex(tn)
1443 tnhash = gitindex(tn)
1438 if tohash == tnhash:
1444 if tohash == tnhash:
1439 return ""
1445 return ""
1440
1446
1441 # TODO: deltas
1447 # TODO: deltas
1442 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1448 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1443 (tohash, tnhash, len(tn))]
1449 (tohash, tnhash, len(tn))]
1444 for l in chunk(zlib.compress(tn)):
1450 for l in chunk(zlib.compress(tn)):
1445 ret.append(fmtline(l))
1451 ret.append(fmtline(l))
1446 ret.append('\n')
1452 ret.append('\n')
1447 return ''.join(ret)
1453 return ''.join(ret)
1448
1454
1449 class GitDiffRequired(Exception):
1455 class GitDiffRequired(Exception):
1450 pass
1456 pass
1451
1457
1452 def diffopts(ui, opts=None, untrusted=False):
1458 def diffopts(ui, opts=None, untrusted=False):
1453 def get(key, name=None, getter=ui.configbool):
1459 def get(key, name=None, getter=ui.configbool):
1454 return ((opts and opts.get(key)) or
1460 return ((opts and opts.get(key)) or
1455 getter('diff', name or key, None, untrusted=untrusted))
1461 getter('diff', name or key, None, untrusted=untrusted))
1456 return mdiff.diffopts(
1462 return mdiff.diffopts(
1457 text=opts and opts.get('text'),
1463 text=opts and opts.get('text'),
1458 git=get('git'),
1464 git=get('git'),
1459 nodates=get('nodates'),
1465 nodates=get('nodates'),
1460 showfunc=get('show_function', 'showfunc'),
1466 showfunc=get('show_function', 'showfunc'),
1461 ignorews=get('ignore_all_space', 'ignorews'),
1467 ignorews=get('ignore_all_space', 'ignorews'),
1462 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1468 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1463 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1469 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1464 context=get('unified', getter=ui.config))
1470 context=get('unified', getter=ui.config))
1465
1471
1466 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1472 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1467 losedatafn=None, prefix=''):
1473 losedatafn=None, prefix=''):
1468 '''yields diff of changes to files between two nodes, or node and
1474 '''yields diff of changes to files between two nodes, or node and
1469 working directory.
1475 working directory.
1470
1476
1471 if node1 is None, use first dirstate parent instead.
1477 if node1 is None, use first dirstate parent instead.
1472 if node2 is None, compare node1 with working directory.
1478 if node2 is None, compare node1 with working directory.
1473
1479
1474 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1480 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1475 every time some change cannot be represented with the current
1481 every time some change cannot be represented with the current
1476 patch format. Return False to upgrade to git patch format, True to
1482 patch format. Return False to upgrade to git patch format, True to
1477 accept the loss or raise an exception to abort the diff. It is
1483 accept the loss or raise an exception to abort the diff. It is
1478 called with the name of current file being diffed as 'fn'. If set
1484 called with the name of current file being diffed as 'fn'. If set
1479 to None, patches will always be upgraded to git format when
1485 to None, patches will always be upgraded to git format when
1480 necessary.
1486 necessary.
1481
1487
1482 prefix is a filename prefix that is prepended to all filenames on
1488 prefix is a filename prefix that is prepended to all filenames on
1483 display (used for subrepos).
1489 display (used for subrepos).
1484 '''
1490 '''
1485
1491
1486 if opts is None:
1492 if opts is None:
1487 opts = mdiff.defaultopts
1493 opts = mdiff.defaultopts
1488
1494
1489 if not node1 and not node2:
1495 if not node1 and not node2:
1490 node1 = repo.dirstate.p1()
1496 node1 = repo.dirstate.p1()
1491
1497
1492 def lrugetfilectx():
1498 def lrugetfilectx():
1493 cache = {}
1499 cache = {}
1494 order = []
1500 order = []
1495 def getfilectx(f, ctx):
1501 def getfilectx(f, ctx):
1496 fctx = ctx.filectx(f, filelog=cache.get(f))
1502 fctx = ctx.filectx(f, filelog=cache.get(f))
1497 if f not in cache:
1503 if f not in cache:
1498 if len(cache) > 20:
1504 if len(cache) > 20:
1499 del cache[order.pop(0)]
1505 del cache[order.pop(0)]
1500 cache[f] = fctx.filelog()
1506 cache[f] = fctx.filelog()
1501 else:
1507 else:
1502 order.remove(f)
1508 order.remove(f)
1503 order.append(f)
1509 order.append(f)
1504 return fctx
1510 return fctx
1505 return getfilectx
1511 return getfilectx
1506 getfilectx = lrugetfilectx()
1512 getfilectx = lrugetfilectx()
1507
1513
1508 ctx1 = repo[node1]
1514 ctx1 = repo[node1]
1509 ctx2 = repo[node2]
1515 ctx2 = repo[node2]
1510
1516
1511 if not changes:
1517 if not changes:
1512 changes = repo.status(ctx1, ctx2, match=match)
1518 changes = repo.status(ctx1, ctx2, match=match)
1513 modified, added, removed = changes[:3]
1519 modified, added, removed = changes[:3]
1514
1520
1515 if not modified and not added and not removed:
1521 if not modified and not added and not removed:
1516 return []
1522 return []
1517
1523
1518 revs = None
1524 revs = None
1519 if not repo.ui.quiet:
1525 if not repo.ui.quiet:
1520 hexfunc = repo.ui.debugflag and hex or short
1526 hexfunc = repo.ui.debugflag and hex or short
1521 revs = [hexfunc(node) for node in [node1, node2] if node]
1527 revs = [hexfunc(node) for node in [node1, node2] if node]
1522
1528
1523 copy = {}
1529 copy = {}
1524 if opts.git or opts.upgrade:
1530 if opts.git or opts.upgrade:
1525 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1531 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1526
1532
1527 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1533 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1528 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1534 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1529 if opts.upgrade and not opts.git:
1535 if opts.upgrade and not opts.git:
1530 try:
1536 try:
1531 def losedata(fn):
1537 def losedata(fn):
1532 if not losedatafn or not losedatafn(fn=fn):
1538 if not losedatafn or not losedatafn(fn=fn):
1533 raise GitDiffRequired()
1539 raise GitDiffRequired()
1534 # Buffer the whole output until we are sure it can be generated
1540 # Buffer the whole output until we are sure it can be generated
1535 return list(difffn(opts.copy(git=False), losedata))
1541 return list(difffn(opts.copy(git=False), losedata))
1536 except GitDiffRequired:
1542 except GitDiffRequired:
1537 return difffn(opts.copy(git=True), None)
1543 return difffn(opts.copy(git=True), None)
1538 else:
1544 else:
1539 return difffn(opts, None)
1545 return difffn(opts, None)
1540
1546
1541 def difflabel(func, *args, **kw):
1547 def difflabel(func, *args, **kw):
1542 '''yields 2-tuples of (output, label) based on the output of func()'''
1548 '''yields 2-tuples of (output, label) based on the output of func()'''
1543 prefixes = [('diff', 'diff.diffline'),
1549 prefixes = [('diff', 'diff.diffline'),
1544 ('copy', 'diff.extended'),
1550 ('copy', 'diff.extended'),
1545 ('rename', 'diff.extended'),
1551 ('rename', 'diff.extended'),
1546 ('old', 'diff.extended'),
1552 ('old', 'diff.extended'),
1547 ('new', 'diff.extended'),
1553 ('new', 'diff.extended'),
1548 ('deleted', 'diff.extended'),
1554 ('deleted', 'diff.extended'),
1549 ('---', 'diff.file_a'),
1555 ('---', 'diff.file_a'),
1550 ('+++', 'diff.file_b'),
1556 ('+++', 'diff.file_b'),
1551 ('@@', 'diff.hunk'),
1557 ('@@', 'diff.hunk'),
1552 ('-', 'diff.deleted'),
1558 ('-', 'diff.deleted'),
1553 ('+', 'diff.inserted')]
1559 ('+', 'diff.inserted')]
1554
1560
1555 for chunk in func(*args, **kw):
1561 for chunk in func(*args, **kw):
1556 lines = chunk.split('\n')
1562 lines = chunk.split('\n')
1557 for i, line in enumerate(lines):
1563 for i, line in enumerate(lines):
1558 if i != 0:
1564 if i != 0:
1559 yield ('\n', '')
1565 yield ('\n', '')
1560 stripline = line
1566 stripline = line
1561 if line and line[0] in '+-':
1567 if line and line[0] in '+-':
1562 # highlight trailing whitespace, but only in changed lines
1568 # highlight trailing whitespace, but only in changed lines
1563 stripline = line.rstrip()
1569 stripline = line.rstrip()
1564 for prefix, label in prefixes:
1570 for prefix, label in prefixes:
1565 if stripline.startswith(prefix):
1571 if stripline.startswith(prefix):
1566 yield (stripline, label)
1572 yield (stripline, label)
1567 break
1573 break
1568 else:
1574 else:
1569 yield (line, '')
1575 yield (line, '')
1570 if line != stripline:
1576 if line != stripline:
1571 yield (line[len(stripline):], 'diff.trailingwhitespace')
1577 yield (line[len(stripline):], 'diff.trailingwhitespace')
1572
1578
1573 def diffui(*args, **kw):
1579 def diffui(*args, **kw):
1574 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1580 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1575 return difflabel(diff, *args, **kw)
1581 return difflabel(diff, *args, **kw)
1576
1582
1577
1583
1578 def _addmodehdr(header, omode, nmode):
1584 def _addmodehdr(header, omode, nmode):
1579 if omode != nmode:
1585 if omode != nmode:
1580 header.append('old mode %s\n' % omode)
1586 header.append('old mode %s\n' % omode)
1581 header.append('new mode %s\n' % nmode)
1587 header.append('new mode %s\n' % nmode)
1582
1588
1583 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1589 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1584 copy, getfilectx, opts, losedatafn, prefix):
1590 copy, getfilectx, opts, losedatafn, prefix):
1585
1591
1586 def join(f):
1592 def join(f):
1587 return os.path.join(prefix, f)
1593 return os.path.join(prefix, f)
1588
1594
1589 date1 = util.datestr(ctx1.date())
1595 date1 = util.datestr(ctx1.date())
1590 man1 = ctx1.manifest()
1596 man1 = ctx1.manifest()
1591
1597
1592 gone = set()
1598 gone = set()
1593 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1599 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1594
1600
1595 copyto = dict([(v, k) for k, v in copy.items()])
1601 copyto = dict([(v, k) for k, v in copy.items()])
1596
1602
1597 if opts.git:
1603 if opts.git:
1598 revs = None
1604 revs = None
1599
1605
1600 for f in sorted(modified + added + removed):
1606 for f in sorted(modified + added + removed):
1601 to = None
1607 to = None
1602 tn = None
1608 tn = None
1603 dodiff = True
1609 dodiff = True
1604 header = []
1610 header = []
1605 if f in man1:
1611 if f in man1:
1606 to = getfilectx(f, ctx1).data()
1612 to = getfilectx(f, ctx1).data()
1607 if f not in removed:
1613 if f not in removed:
1608 tn = getfilectx(f, ctx2).data()
1614 tn = getfilectx(f, ctx2).data()
1609 a, b = f, f
1615 a, b = f, f
1610 if opts.git or losedatafn:
1616 if opts.git or losedatafn:
1611 if f in added:
1617 if f in added:
1612 mode = gitmode[ctx2.flags(f)]
1618 mode = gitmode[ctx2.flags(f)]
1613 if f in copy or f in copyto:
1619 if f in copy or f in copyto:
1614 if opts.git:
1620 if opts.git:
1615 if f in copy:
1621 if f in copy:
1616 a = copy[f]
1622 a = copy[f]
1617 else:
1623 else:
1618 a = copyto[f]
1624 a = copyto[f]
1619 omode = gitmode[man1.flags(a)]
1625 omode = gitmode[man1.flags(a)]
1620 _addmodehdr(header, omode, mode)
1626 _addmodehdr(header, omode, mode)
1621 if a in removed and a not in gone:
1627 if a in removed and a not in gone:
1622 op = 'rename'
1628 op = 'rename'
1623 gone.add(a)
1629 gone.add(a)
1624 else:
1630 else:
1625 op = 'copy'
1631 op = 'copy'
1626 header.append('%s from %s\n' % (op, join(a)))
1632 header.append('%s from %s\n' % (op, join(a)))
1627 header.append('%s to %s\n' % (op, join(f)))
1633 header.append('%s to %s\n' % (op, join(f)))
1628 to = getfilectx(a, ctx1).data()
1634 to = getfilectx(a, ctx1).data()
1629 else:
1635 else:
1630 losedatafn(f)
1636 losedatafn(f)
1631 else:
1637 else:
1632 if opts.git:
1638 if opts.git:
1633 header.append('new file mode %s\n' % mode)
1639 header.append('new file mode %s\n' % mode)
1634 elif ctx2.flags(f):
1640 elif ctx2.flags(f):
1635 losedatafn(f)
1641 losedatafn(f)
1636 # In theory, if tn was copied or renamed we should check
1642 # In theory, if tn was copied or renamed we should check
1637 # if the source is binary too but the copy record already
1643 # if the source is binary too but the copy record already
1638 # forces git mode.
1644 # forces git mode.
1639 if util.binary(tn):
1645 if util.binary(tn):
1640 if opts.git:
1646 if opts.git:
1641 dodiff = 'binary'
1647 dodiff = 'binary'
1642 else:
1648 else:
1643 losedatafn(f)
1649 losedatafn(f)
1644 if not opts.git and not tn:
1650 if not opts.git and not tn:
1645 # regular diffs cannot represent new empty file
1651 # regular diffs cannot represent new empty file
1646 losedatafn(f)
1652 losedatafn(f)
1647 elif f in removed:
1653 elif f in removed:
1648 if opts.git:
1654 if opts.git:
1649 # have we already reported a copy above?
1655 # have we already reported a copy above?
1650 if ((f in copy and copy[f] in added
1656 if ((f in copy and copy[f] in added
1651 and copyto[copy[f]] == f) or
1657 and copyto[copy[f]] == f) or
1652 (f in copyto and copyto[f] in added
1658 (f in copyto and copyto[f] in added
1653 and copy[copyto[f]] == f)):
1659 and copy[copyto[f]] == f)):
1654 dodiff = False
1660 dodiff = False
1655 else:
1661 else:
1656 header.append('deleted file mode %s\n' %
1662 header.append('deleted file mode %s\n' %
1657 gitmode[man1.flags(f)])
1663 gitmode[man1.flags(f)])
1658 elif not to or util.binary(to):
1664 elif not to or util.binary(to):
1659 # regular diffs cannot represent empty file deletion
1665 # regular diffs cannot represent empty file deletion
1660 losedatafn(f)
1666 losedatafn(f)
1661 else:
1667 else:
1662 oflag = man1.flags(f)
1668 oflag = man1.flags(f)
1663 nflag = ctx2.flags(f)
1669 nflag = ctx2.flags(f)
1664 binary = util.binary(to) or util.binary(tn)
1670 binary = util.binary(to) or util.binary(tn)
1665 if opts.git:
1671 if opts.git:
1666 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1672 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1667 if binary:
1673 if binary:
1668 dodiff = 'binary'
1674 dodiff = 'binary'
1669 elif binary or nflag != oflag:
1675 elif binary or nflag != oflag:
1670 losedatafn(f)
1676 losedatafn(f)
1671 if opts.git:
1677 if opts.git:
1672 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1678 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1673
1679
1674 if dodiff:
1680 if dodiff:
1675 if dodiff == 'binary':
1681 if dodiff == 'binary':
1676 text = b85diff(to, tn)
1682 text = b85diff(to, tn)
1677 else:
1683 else:
1678 text = mdiff.unidiff(to, date1,
1684 text = mdiff.unidiff(to, date1,
1679 # ctx2 date may be dynamic
1685 # ctx2 date may be dynamic
1680 tn, util.datestr(ctx2.date()),
1686 tn, util.datestr(ctx2.date()),
1681 join(a), join(b), revs, opts=opts)
1687 join(a), join(b), revs, opts=opts)
1682 if header and (text or len(header) > 1):
1688 if header and (text or len(header) > 1):
1683 yield ''.join(header)
1689 yield ''.join(header)
1684 if text:
1690 if text:
1685 yield text
1691 yield text
1686
1692
1687 def diffstatsum(stats):
1693 def diffstatsum(stats):
1688 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1689 for f, a, r, b in stats:
1695 for f, a, r, b in stats:
1690 maxfile = max(maxfile, encoding.colwidth(f))
1696 maxfile = max(maxfile, encoding.colwidth(f))
1691 maxtotal = max(maxtotal, a + r)
1697 maxtotal = max(maxtotal, a + r)
1692 addtotal += a
1698 addtotal += a
1693 removetotal += r
1699 removetotal += r
1694 binary = binary or b
1700 binary = binary or b
1695
1701
1696 return maxfile, maxtotal, addtotal, removetotal, binary
1702 return maxfile, maxtotal, addtotal, removetotal, binary
1697
1703
1698 def diffstatdata(lines):
1704 def diffstatdata(lines):
1699 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1700
1706
1701 results = []
1707 results = []
1702 filename, adds, removes = None, 0, 0
1708 filename, adds, removes = None, 0, 0
1703
1709
1704 def addresult():
1710 def addresult():
1705 if filename:
1711 if filename:
1706 isbinary = adds == 0 and removes == 0
1712 isbinary = adds == 0 and removes == 0
1707 results.append((filename, adds, removes, isbinary))
1713 results.append((filename, adds, removes, isbinary))
1708
1714
1709 for line in lines:
1715 for line in lines:
1710 if line.startswith('diff'):
1716 if line.startswith('diff'):
1711 addresult()
1717 addresult()
1712 # set numbers to 0 anyway when starting new file
1718 # set numbers to 0 anyway when starting new file
1713 adds, removes = 0, 0
1719 adds, removes = 0, 0
1714 if line.startswith('diff --git'):
1720 if line.startswith('diff --git'):
1715 filename = gitre.search(line).group(1)
1721 filename = gitre.search(line).group(1)
1716 elif line.startswith('diff -r'):
1722 elif line.startswith('diff -r'):
1717 # format: "diff -r ... -r ... filename"
1723 # format: "diff -r ... -r ... filename"
1718 filename = diffre.search(line).group(1)
1724 filename = diffre.search(line).group(1)
1719 elif line.startswith('+') and not line.startswith('+++'):
1725 elif line.startswith('+') and not line.startswith('+++'):
1720 adds += 1
1726 adds += 1
1721 elif line.startswith('-') and not line.startswith('---'):
1727 elif line.startswith('-') and not line.startswith('---'):
1722 removes += 1
1728 removes += 1
1723 addresult()
1729 addresult()
1724 return results
1730 return results
1725
1731
1726 def diffstat(lines, width=80, git=False):
1732 def diffstat(lines, width=80, git=False):
1727 output = []
1733 output = []
1728 stats = diffstatdata(lines)
1734 stats = diffstatdata(lines)
1729 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1735 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1730
1736
1731 countwidth = len(str(maxtotal))
1737 countwidth = len(str(maxtotal))
1732 if hasbinary and countwidth < 3:
1738 if hasbinary and countwidth < 3:
1733 countwidth = 3
1739 countwidth = 3
1734 graphwidth = width - countwidth - maxname - 6
1740 graphwidth = width - countwidth - maxname - 6
1735 if graphwidth < 10:
1741 if graphwidth < 10:
1736 graphwidth = 10
1742 graphwidth = 10
1737
1743
1738 def scale(i):
1744 def scale(i):
1739 if maxtotal <= graphwidth:
1745 if maxtotal <= graphwidth:
1740 return i
1746 return i
1741 # If diffstat runs out of room it doesn't print anything,
1747 # If diffstat runs out of room it doesn't print anything,
1742 # which isn't very useful, so always print at least one + or -
1748 # which isn't very useful, so always print at least one + or -
1743 # if there were at least some changes.
1749 # if there were at least some changes.
1744 return max(i * graphwidth // maxtotal, int(bool(i)))
1750 return max(i * graphwidth // maxtotal, int(bool(i)))
1745
1751
1746 for filename, adds, removes, isbinary in stats:
1752 for filename, adds, removes, isbinary in stats:
1747 if git and isbinary:
1753 if git and isbinary:
1748 count = 'Bin'
1754 count = 'Bin'
1749 else:
1755 else:
1750 count = adds + removes
1756 count = adds + removes
1751 pluses = '+' * scale(adds)
1757 pluses = '+' * scale(adds)
1752 minuses = '-' * scale(removes)
1758 minuses = '-' * scale(removes)
1753 output.append(' %s%s | %*s %s%s\n' %
1759 output.append(' %s%s | %*s %s%s\n' %
1754 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1760 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1755 countwidth, count, pluses, minuses))
1761 countwidth, count, pluses, minuses))
1756
1762
1757 if stats:
1763 if stats:
1758 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1764 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1759 % (len(stats), totaladds, totalremoves))
1765 % (len(stats), totaladds, totalremoves))
1760
1766
1761 return ''.join(output)
1767 return ''.join(output)
1762
1768
1763 def diffstatui(*args, **kw):
1769 def diffstatui(*args, **kw):
1764 '''like diffstat(), but yields 2-tuples of (output, label) for
1770 '''like diffstat(), but yields 2-tuples of (output, label) for
1765 ui.write()
1771 ui.write()
1766 '''
1772 '''
1767
1773
1768 for line in diffstat(*args, **kw).splitlines():
1774 for line in diffstat(*args, **kw).splitlines():
1769 if line and line[-1] in '+-':
1775 if line and line[-1] in '+-':
1770 name, graph = line.rsplit(' ', 1)
1776 name, graph = line.rsplit(' ', 1)
1771 yield (name + ' ', '')
1777 yield (name + ' ', '')
1772 m = re.search(r'\++', graph)
1778 m = re.search(r'\++', graph)
1773 if m:
1779 if m:
1774 yield (m.group(0), 'diffstat.inserted')
1780 yield (m.group(0), 'diffstat.inserted')
1775 m = re.search(r'-+', graph)
1781 m = re.search(r'-+', graph)
1776 if m:
1782 if m:
1777 yield (m.group(0), 'diffstat.deleted')
1783 yield (m.group(0), 'diffstat.deleted')
1778 else:
1784 else:
1779 yield (line, '')
1785 yield (line, '')
1780 yield ('\n', '')
1786 yield ('\n', '')
@@ -1,123 +1,108 b''
1 $ "$TESTDIR/hghave" symlink || exit 80
1 $ "$TESTDIR/hghave" symlink || exit 80
2
2
3 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "[extensions]" >> $HGRCPATH
4 $ echo "mq=" >> $HGRCPATH
4 $ echo "mq=" >> $HGRCPATH
5
5
6 $ hg init
6 $ hg init
7 $ hg qinit
7 $ hg qinit
8 $ hg qnew base.patch
8 $ hg qnew base.patch
9 $ echo aaa > a
9 $ echo aaa > a
10 $ echo bbb > b
10 $ echo bbb > b
11 $ echo ccc > c
11 $ echo ccc > c
12 $ hg add a b c
12 $ hg add a b c
13 $ hg qrefresh
13 $ hg qrefresh
14 $ $TESTDIR/readlink.py a
14 $ $TESTDIR/readlink.py a
15 a -> a not a symlink
15 a -> a not a symlink
16
16
17
17
18 test replacing a file with a symlink
18 test replacing a file with a symlink
19
19
20 $ hg qnew symlink.patch
20 $ hg qnew symlink.patch
21 $ rm a
21 $ rm a
22 $ ln -s b a
22 $ ln -s b a
23 $ hg qrefresh --git
23 $ hg qrefresh --git
24 $ $TESTDIR/readlink.py a
24 $ $TESTDIR/readlink.py a
25 a -> b
25 a -> b
26
26
27 $ hg qpop
27 $ hg qpop
28 popping symlink.patch
28 popping symlink.patch
29 now at: base.patch
29 now at: base.patch
30 $ hg qpush
30 $ hg qpush
31 applying symlink.patch
31 applying symlink.patch
32 now at: symlink.patch
32 now at: symlink.patch
33 $ $TESTDIR/readlink.py a
33 $ $TESTDIR/readlink.py a
34 a -> b
34 a -> b
35
35
36
36
37 test updating a symlink
37 test updating a symlink
38
38
39 $ rm a
39 $ rm a
40 $ ln -s c a
40 $ ln -s c a
41 $ hg qnew --git -f updatelink
41 $ hg qnew --git -f updatelink
42 $ $TESTDIR/readlink.py a
42 $ $TESTDIR/readlink.py a
43 a -> c
43 a -> c
44 $ hg qpop
44 $ hg qpop
45 popping updatelink
45 popping updatelink
46 now at: symlink.patch
46 now at: symlink.patch
47 $ hg qpush --debug
47 $ hg qpush --debug
48 applying updatelink
48 applying updatelink
49 patching file a
49 patching file a
50 a
50 a
51 now at: updatelink
51 now at: updatelink
52 $ $TESTDIR/readlink.py a
52 $ $TESTDIR/readlink.py a
53 a -> c
53 a -> c
54 $ hg st
54 $ hg st
55
55
56
56
57 test replacing a symlink with a file
57 test replacing a symlink with a file
58
58
59 $ ln -s c s
59 $ ln -s c s
60 $ hg add s
60 $ hg add s
61 $ hg qnew --git -f addlink
61 $ hg qnew --git -f addlink
62 $ rm s
62 $ rm s
63 $ echo sss > s
63 $ echo sss > s
64 $ hg qnew --git -f replacelinkwithfile
64 $ hg qnew --git -f replacelinkwithfile
65 $ hg qpop
65 $ hg qpop
66 popping replacelinkwithfile
66 popping replacelinkwithfile
67 now at: addlink
67 now at: addlink
68 $ hg qpush
68 $ hg qpush
69 applying replacelinkwithfile
69 applying replacelinkwithfile
70 now at: replacelinkwithfile
70 now at: replacelinkwithfile
71 $ cat s
71 $ cat s
72 sss
72 sss
73 $ hg st
73 $ hg st
74
74
75
75
76 test symlink removal
76 test symlink removal
77
77
78 $ hg qnew removesl.patch
78 $ hg qnew removesl.patch
79 $ hg rm a
79 $ hg rm a
80 $ hg qrefresh --git
80 $ hg qrefresh --git
81 $ hg qpop
81 $ hg qpop
82 popping removesl.patch
82 popping removesl.patch
83 now at: replacelinkwithfile
83 now at: replacelinkwithfile
84 $ hg qpush
84 $ hg qpush
85 applying removesl.patch
85 applying removesl.patch
86 now at: removesl.patch
86 now at: removesl.patch
87 $ hg st -c
87 $ hg st -c
88 C b
88 C b
89 C c
89 C c
90 C s
90 C s
91
91
92 replace broken symlink with another broken symlink
92 replace broken symlink with another broken symlink
93
93
94 $ ln -s linka linka
94 $ ln -s linka linka
95 $ hg add linka
95 $ hg add linka
96 $ hg qnew link
96 $ hg qnew link
97 $ hg mv linka linkb
97 $ hg mv linka linkb
98 $ rm linkb
98 $ rm linkb
99 $ ln -s linkb linkb
99 $ ln -s linkb linkb
100 $ hg qnew movelink
100 $ hg qnew movelink
101 $ hg qpop
101 $ hg qpop
102 popping movelink
102 popping movelink
103 now at: link
103 now at: link
104 $ hg qpush
104 $ hg qpush
105 applying movelink
105 applying movelink
106 now at: movelink
106 now at: movelink
107 $ $TESTDIR/readlink.py linkb
107 $ $TESTDIR/readlink.py linkb
108 linkb -> linkb
108 linkb -> linkb
109
110 check patch does not overwrite untracked symlinks
111
112 $ hg qpop
113 popping movelink
114 now at: link
115 $ ln -s linkbb linkb
116 $ hg qpush
117 applying movelink
118 cannot create linkb: destination already exists
119 1 out of 1 hunks FAILED -- saving rejects to file linkb.rej
120 patch failed, unable to continue (try -v)
121 patch failed, rejects left in working dir
122 errors during apply, please fix and refresh movelink
123 [2]
General Comments 0
You need to be logged in to leave comments. Login now