##// END OF EJS Templates
configitems: register the 'patch.eol' config
marmoute -
r33226:dd50a370 default
parent child Browse files
Show More
@@ -1,154 +1,157 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 def loadconfigtable(ui, extname, configtable):
16 def loadconfigtable(ui, extname, configtable):
17 """update config item known to the ui with the extension ones"""
17 """update config item known to the ui with the extension ones"""
18 for section, items in configtable.items():
18 for section, items in configtable.items():
19 knownitems = ui._knownconfig.setdefault(section, {})
19 knownitems = ui._knownconfig.setdefault(section, {})
20 knownkeys = set(knownitems)
20 knownkeys = set(knownitems)
21 newkeys = set(items)
21 newkeys = set(items)
22 for key in sorted(knownkeys & newkeys):
22 for key in sorted(knownkeys & newkeys):
23 msg = "extension '%s' overwrite config item '%s.%s'"
23 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg %= (extname, section, key)
24 msg %= (extname, section, key)
25 ui.develwarn(msg, config='warn-config')
25 ui.develwarn(msg, config='warn-config')
26
26
27 knownitems.update(items)
27 knownitems.update(items)
28
28
29 class configitem(object):
29 class configitem(object):
30 """represent a known config item
30 """represent a known config item
31
31
32 :section: the official config section where to find this item,
32 :section: the official config section where to find this item,
33 :name: the official name within the section,
33 :name: the official name within the section,
34 :default: default value for this item,
34 :default: default value for this item,
35 """
35 """
36
36
37 def __init__(self, section, name, default=None):
37 def __init__(self, section, name, default=None):
38 self.section = section
38 self.section = section
39 self.name = name
39 self.name = name
40 self.default = default
40 self.default = default
41
41
42 coreitems = {}
42 coreitems = {}
43
43
44 def _register(configtable, *args, **kwargs):
44 def _register(configtable, *args, **kwargs):
45 item = configitem(*args, **kwargs)
45 item = configitem(*args, **kwargs)
46 section = configtable.setdefault(item.section, {})
46 section = configtable.setdefault(item.section, {})
47 if item.name in section:
47 if item.name in section:
48 msg = "duplicated config item registration for '%s.%s'"
48 msg = "duplicated config item registration for '%s.%s'"
49 raise error.ProgrammingError(msg % (item.section, item.name))
49 raise error.ProgrammingError(msg % (item.section, item.name))
50 section[item.name] = item
50 section[item.name] = item
51
51
52 # Registering actual config items
52 # Registering actual config items
53
53
54 def getitemregister(configtable):
54 def getitemregister(configtable):
55 return functools.partial(_register, configtable)
55 return functools.partial(_register, configtable)
56
56
57 coreconfigitem = getitemregister(coreitems)
57 coreconfigitem = getitemregister(coreitems)
58
58
59 coreconfigitem('auth', 'cookiefile',
59 coreconfigitem('auth', 'cookiefile',
60 default=None,
60 default=None,
61 )
61 )
62 # bookmarks.pushing: internal hack for discovery
62 # bookmarks.pushing: internal hack for discovery
63 coreconfigitem('bookmarks', 'pushing',
63 coreconfigitem('bookmarks', 'pushing',
64 default=list,
64 default=list,
65 )
65 )
66 # bundle.mainreporoot: internal hack for bundlerepo
66 # bundle.mainreporoot: internal hack for bundlerepo
67 coreconfigitem('bundle', 'mainreporoot',
67 coreconfigitem('bundle', 'mainreporoot',
68 default='',
68 default='',
69 )
69 )
70 # bundle.reorder: experimental config
70 # bundle.reorder: experimental config
71 coreconfigitem('bundle', 'reorder',
71 coreconfigitem('bundle', 'reorder',
72 default='auto',
72 default='auto',
73 )
73 )
74 coreconfigitem('color', 'mode',
74 coreconfigitem('color', 'mode',
75 default='auto',
75 default='auto',
76 )
76 )
77 coreconfigitem('devel', 'all-warnings',
77 coreconfigitem('devel', 'all-warnings',
78 default=False,
78 default=False,
79 )
79 )
80 coreconfigitem('devel', 'bundle2.debug',
80 coreconfigitem('devel', 'bundle2.debug',
81 default=False,
81 default=False,
82 )
82 )
83 coreconfigitem('devel', 'check-locks',
83 coreconfigitem('devel', 'check-locks',
84 default=False,
84 default=False,
85 )
85 )
86 coreconfigitem('devel', 'check-relroot',
86 coreconfigitem('devel', 'check-relroot',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('devel', 'disableloaddefaultcerts',
89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 default=False,
90 default=False,
91 )
91 )
92 coreconfigitem('devel', 'legacy.exchange',
92 coreconfigitem('devel', 'legacy.exchange',
93 default=list,
93 default=list,
94 )
94 )
95 coreconfigitem('devel', 'servercafile',
95 coreconfigitem('devel', 'servercafile',
96 default='',
96 default='',
97 )
97 )
98 coreconfigitem('devel', 'serverexactprotocol',
98 coreconfigitem('devel', 'serverexactprotocol',
99 default='',
99 default='',
100 )
100 )
101 coreconfigitem('devel', 'serverrequirecert',
101 coreconfigitem('devel', 'serverrequirecert',
102 default=False,
102 default=False,
103 )
103 )
104 coreconfigitem('devel', 'strip-obsmarkers',
104 coreconfigitem('devel', 'strip-obsmarkers',
105 default=True,
105 default=True,
106 )
106 )
107 coreconfigitem('hostsecurity', 'ciphers',
107 coreconfigitem('hostsecurity', 'ciphers',
108 default=None,
108 default=None,
109 )
109 )
110 coreconfigitem('hostsecurity', 'disabletls10warning',
110 coreconfigitem('hostsecurity', 'disabletls10warning',
111 default=False,
111 default=False,
112 )
112 )
113 coreconfigitem('patch', 'eol',
114 default='strict',
115 )
113 coreconfigitem('patch', 'fuzz',
116 coreconfigitem('patch', 'fuzz',
114 default=2,
117 default=2,
115 )
118 )
116 coreconfigitem('server', 'bundle1',
119 coreconfigitem('server', 'bundle1',
117 default=True,
120 default=True,
118 )
121 )
119 coreconfigitem('server', 'bundle1gd',
122 coreconfigitem('server', 'bundle1gd',
120 default=None,
123 default=None,
121 )
124 )
122 coreconfigitem('server', 'compressionengines',
125 coreconfigitem('server', 'compressionengines',
123 default=list,
126 default=list,
124 )
127 )
125 coreconfigitem('server', 'concurrent-push-mode',
128 coreconfigitem('server', 'concurrent-push-mode',
126 default='strict',
129 default='strict',
127 )
130 )
128 coreconfigitem('server', 'disablefullbundle',
131 coreconfigitem('server', 'disablefullbundle',
129 default=False,
132 default=False,
130 )
133 )
131 coreconfigitem('server', 'maxhttpheaderlen',
134 coreconfigitem('server', 'maxhttpheaderlen',
132 default=1024,
135 default=1024,
133 )
136 )
134 coreconfigitem('server', 'preferuncompressed',
137 coreconfigitem('server', 'preferuncompressed',
135 default=False,
138 default=False,
136 )
139 )
137 coreconfigitem('server', 'uncompressedallowsecret',
140 coreconfigitem('server', 'uncompressedallowsecret',
138 default=False,
141 default=False,
139 )
142 )
140 coreconfigitem('server', 'validate',
143 coreconfigitem('server', 'validate',
141 default=False,
144 default=False,
142 )
145 )
143 coreconfigitem('server', 'zliblevel',
146 coreconfigitem('server', 'zliblevel',
144 default=-1,
147 default=-1,
145 )
148 )
146 coreconfigitem('ui', 'clonebundleprefers',
149 coreconfigitem('ui', 'clonebundleprefers',
147 default=list,
150 default=list,
148 )
151 )
149 coreconfigitem('ui', 'interactive',
152 coreconfigitem('ui', 'interactive',
150 default=None,
153 default=None,
151 )
154 )
152 coreconfigitem('ui', 'quiet',
155 coreconfigitem('ui', 'quiet',
153 default=False,
156 default=False,
154 )
157 )
@@ -1,2746 +1,2746 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 class PatchError(Exception):
49 class PatchError(Exception):
50 pass
50 pass
51
51
52
52
53 # public functions
53 # public functions
54
54
55 def split(stream):
55 def split(stream):
56 '''return an iterator of individual patches from a stream'''
56 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
57 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
58 if inheader and line[0] in (' ', '\t'):
59 # continuation
59 # continuation
60 return True
60 return True
61 if line[0] in (' ', '-', '+'):
61 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
62 # diff line - don't check for header pattern in there
63 return False
63 return False
64 l = line.split(': ', 1)
64 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
65 return len(l) == 2 and ' ' not in l[0]
66
66
67 def chunk(lines):
67 def chunk(lines):
68 return stringio(''.join(lines))
68 return stringio(''.join(lines))
69
69
70 def hgsplit(stream, cur):
70 def hgsplit(stream, cur):
71 inheader = True
71 inheader = True
72
72
73 for line in stream:
73 for line in stream:
74 if not line.strip():
74 if not line.strip():
75 inheader = False
75 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
76 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
77 yield chunk(cur)
78 cur = []
78 cur = []
79 inheader = True
79 inheader = True
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 yield chunk(cur)
84 yield chunk(cur)
85
85
86 def mboxsplit(stream, cur):
86 def mboxsplit(stream, cur):
87 for line in stream:
87 for line in stream:
88 if line.startswith('From '):
88 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
89 for c in split(chunk(cur[1:])):
90 yield c
90 yield c
91 cur = []
91 cur = []
92
92
93 cur.append(line)
93 cur.append(line)
94
94
95 if cur:
95 if cur:
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98
98
99 def mimesplit(stream, cur):
99 def mimesplit(stream, cur):
100 def msgfp(m):
100 def msgfp(m):
101 fp = stringio()
101 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
103 g.flatten(m)
104 fp.seek(0)
104 fp.seek(0)
105 return fp
105 return fp
106
106
107 for line in stream:
107 for line in stream:
108 cur.append(line)
108 cur.append(line)
109 c = chunk(cur)
109 c = chunk(cur)
110
110
111 m = email.Parser.Parser().parse(c)
111 m = email.Parser.Parser().parse(c)
112 if not m.is_multipart():
112 if not m.is_multipart():
113 yield msgfp(m)
113 yield msgfp(m)
114 else:
114 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
116 for part in m.walk():
117 ct = part.get_content_type()
117 ct = part.get_content_type()
118 if ct not in ok_types:
118 if ct not in ok_types:
119 continue
119 continue
120 yield msgfp(part)
120 yield msgfp(part)
121
121
122 def headersplit(stream, cur):
122 def headersplit(stream, cur):
123 inheader = False
123 inheader = False
124
124
125 for line in stream:
125 for line in stream:
126 if not inheader and isheader(line, inheader):
126 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
127 yield chunk(cur)
128 cur = []
128 cur = []
129 inheader = True
129 inheader = True
130 if inheader and not isheader(line, inheader):
130 if inheader and not isheader(line, inheader):
131 inheader = False
131 inheader = False
132
132
133 cur.append(line)
133 cur.append(line)
134
134
135 if cur:
135 if cur:
136 yield chunk(cur)
136 yield chunk(cur)
137
137
138 def remainder(cur):
138 def remainder(cur):
139 yield chunk(cur)
139 yield chunk(cur)
140
140
141 class fiter(object):
141 class fiter(object):
142 def __init__(self, fp):
142 def __init__(self, fp):
143 self.fp = fp
143 self.fp = fp
144
144
145 def __iter__(self):
145 def __iter__(self):
146 return self
146 return self
147
147
148 def next(self):
148 def next(self):
149 l = self.fp.readline()
149 l = self.fp.readline()
150 if not l:
150 if not l:
151 raise StopIteration
151 raise StopIteration
152 return l
152 return l
153
153
154 inheader = False
154 inheader = False
155 cur = []
155 cur = []
156
156
157 mimeheaders = ['content-type']
157 mimeheaders = ['content-type']
158
158
159 if not util.safehasattr(stream, 'next'):
159 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
160 # http responses, for example, have readline but not next
161 stream = fiter(stream)
161 stream = fiter(stream)
162
162
163 for line in stream:
163 for line in stream:
164 cur.append(line)
164 cur.append(line)
165 if line.startswith('# HG changeset patch'):
165 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
166 return hgsplit(stream, cur)
167 elif line.startswith('From '):
167 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
168 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
169 elif isheader(line, inheader):
170 inheader = True
170 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
172 # let email parser handle this
173 return mimesplit(stream, cur)
173 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
174 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
175 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
176 return headersplit(stream, cur)
177 # Not enough info, keep reading
177 # Not enough info, keep reading
178
178
179 # if we are here, we have a very plain patch
179 # if we are here, we have a very plain patch
180 return remainder(cur)
180 return remainder(cur)
181
181
182 ## Some facility for extensible patch parsing:
182 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
183 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
184 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
185 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
186 ('Node ID', 'nodeid'),
187 ]
187 ]
188
188
189 def extract(ui, fileobj):
189 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
190 '''extract patch from data read from fileobj.
191
191
192 patch can be a normal patch or contained in an email message.
192 patch can be a normal patch or contained in an email message.
193
193
194 return a dictionary. Standard keys are:
194 return a dictionary. Standard keys are:
195 - filename,
195 - filename,
196 - message,
196 - message,
197 - user,
197 - user,
198 - date,
198 - date,
199 - branch,
199 - branch,
200 - node,
200 - node,
201 - p1,
201 - p1,
202 - p2.
202 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
203 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
205
206 # attempt to detect the start of a patch
206 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
207 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
210 r'---[ \t].*?^\+\+\+[ \t]|'
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212
212
213 data = {}
213 data = {}
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 try:
216 try:
217 msg = email.Parser.Parser().parse(fileobj)
217 msg = email.Parser.Parser().parse(fileobj)
218
218
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 if not subject and not data['user']:
221 if not subject and not data['user']:
222 # Not an email, restore parsed headers if any
222 # Not an email, restore parsed headers if any
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224
224
225 # should try to parse msg['Date']
225 # should try to parse msg['Date']
226 parents = []
226 parents = []
227
227
228 if subject:
228 if subject:
229 if subject.startswith('[PATCH'):
229 if subject.startswith('[PATCH'):
230 pend = subject.find(']')
230 pend = subject.find(']')
231 if pend >= 0:
231 if pend >= 0:
232 subject = subject[pend + 1:].lstrip()
232 subject = subject[pend + 1:].lstrip()
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 ui.debug('Subject: %s\n' % subject)
234 ui.debug('Subject: %s\n' % subject)
235 if data['user']:
235 if data['user']:
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 diffs_seen = 0
237 diffs_seen = 0
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 message = ''
239 message = ''
240 for part in msg.walk():
240 for part in msg.walk():
241 content_type = part.get_content_type()
241 content_type = part.get_content_type()
242 ui.debug('Content-Type: %s\n' % content_type)
242 ui.debug('Content-Type: %s\n' % content_type)
243 if content_type not in ok_types:
243 if content_type not in ok_types:
244 continue
244 continue
245 payload = part.get_payload(decode=True)
245 payload = part.get_payload(decode=True)
246 m = diffre.search(payload)
246 m = diffre.search(payload)
247 if m:
247 if m:
248 hgpatch = False
248 hgpatch = False
249 hgpatchheader = False
249 hgpatchheader = False
250 ignoretext = False
250 ignoretext = False
251
251
252 ui.debug('found patch at byte %d\n' % m.start(0))
252 ui.debug('found patch at byte %d\n' % m.start(0))
253 diffs_seen += 1
253 diffs_seen += 1
254 cfp = stringio()
254 cfp = stringio()
255 for line in payload[:m.start(0)].splitlines():
255 for line in payload[:m.start(0)].splitlines():
256 if line.startswith('# HG changeset patch') and not hgpatch:
256 if line.startswith('# HG changeset patch') and not hgpatch:
257 ui.debug('patch generated by hg export\n')
257 ui.debug('patch generated by hg export\n')
258 hgpatch = True
258 hgpatch = True
259 hgpatchheader = True
259 hgpatchheader = True
260 # drop earlier commit message content
260 # drop earlier commit message content
261 cfp.seek(0)
261 cfp.seek(0)
262 cfp.truncate()
262 cfp.truncate()
263 subject = None
263 subject = None
264 elif hgpatchheader:
264 elif hgpatchheader:
265 if line.startswith('# User '):
265 if line.startswith('# User '):
266 data['user'] = line[7:]
266 data['user'] = line[7:]
267 ui.debug('From: %s\n' % data['user'])
267 ui.debug('From: %s\n' % data['user'])
268 elif line.startswith("# Parent "):
268 elif line.startswith("# Parent "):
269 parents.append(line[9:].lstrip())
269 parents.append(line[9:].lstrip())
270 elif line.startswith("# "):
270 elif line.startswith("# "):
271 for header, key in patchheadermap:
271 for header, key in patchheadermap:
272 prefix = '# %s ' % header
272 prefix = '# %s ' % header
273 if line.startswith(prefix):
273 if line.startswith(prefix):
274 data[key] = line[len(prefix):]
274 data[key] = line[len(prefix):]
275 else:
275 else:
276 hgpatchheader = False
276 hgpatchheader = False
277 elif line == '---':
277 elif line == '---':
278 ignoretext = True
278 ignoretext = True
279 if not hgpatchheader and not ignoretext:
279 if not hgpatchheader and not ignoretext:
280 cfp.write(line)
280 cfp.write(line)
281 cfp.write('\n')
281 cfp.write('\n')
282 message = cfp.getvalue()
282 message = cfp.getvalue()
283 if tmpfp:
283 if tmpfp:
284 tmpfp.write(payload)
284 tmpfp.write(payload)
285 if not payload.endswith('\n'):
285 if not payload.endswith('\n'):
286 tmpfp.write('\n')
286 tmpfp.write('\n')
287 elif not diffs_seen and message and content_type == 'text/plain':
287 elif not diffs_seen and message and content_type == 'text/plain':
288 message += '\n' + payload
288 message += '\n' + payload
289 except: # re-raises
289 except: # re-raises
290 tmpfp.close()
290 tmpfp.close()
291 os.unlink(tmpname)
291 os.unlink(tmpname)
292 raise
292 raise
293
293
294 if subject and not message.startswith(subject):
294 if subject and not message.startswith(subject):
295 message = '%s\n%s' % (subject, message)
295 message = '%s\n%s' % (subject, message)
296 data['message'] = message
296 data['message'] = message
297 tmpfp.close()
297 tmpfp.close()
298 if parents:
298 if parents:
299 data['p1'] = parents.pop(0)
299 data['p1'] = parents.pop(0)
300 if parents:
300 if parents:
301 data['p2'] = parents.pop(0)
301 data['p2'] = parents.pop(0)
302
302
303 if diffs_seen:
303 if diffs_seen:
304 data['filename'] = tmpname
304 data['filename'] = tmpname
305 else:
305 else:
306 os.unlink(tmpname)
306 os.unlink(tmpname)
307 return data
307 return data
308
308
309 class patchmeta(object):
309 class patchmeta(object):
310 """Patched file metadata
310 """Patched file metadata
311
311
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 'islink' is True if the file is a symlink and 'isexec' is True if
316 'islink' is True if the file is a symlink and 'isexec' is True if
317 the file is executable. Otherwise, 'mode' is None.
317 the file is executable. Otherwise, 'mode' is None.
318 """
318 """
319 def __init__(self, path):
319 def __init__(self, path):
320 self.path = path
320 self.path = path
321 self.oldpath = None
321 self.oldpath = None
322 self.mode = None
322 self.mode = None
323 self.op = 'MODIFY'
323 self.op = 'MODIFY'
324 self.binary = False
324 self.binary = False
325
325
326 def setmode(self, mode):
326 def setmode(self, mode):
327 islink = mode & 0o20000
327 islink = mode & 0o20000
328 isexec = mode & 0o100
328 isexec = mode & 0o100
329 self.mode = (islink, isexec)
329 self.mode = (islink, isexec)
330
330
331 def copy(self):
331 def copy(self):
332 other = patchmeta(self.path)
332 other = patchmeta(self.path)
333 other.oldpath = self.oldpath
333 other.oldpath = self.oldpath
334 other.mode = self.mode
334 other.mode = self.mode
335 other.op = self.op
335 other.op = self.op
336 other.binary = self.binary
336 other.binary = self.binary
337 return other
337 return other
338
338
339 def _ispatchinga(self, afile):
339 def _ispatchinga(self, afile):
340 if afile == '/dev/null':
340 if afile == '/dev/null':
341 return self.op == 'ADD'
341 return self.op == 'ADD'
342 return afile == 'a/' + (self.oldpath or self.path)
342 return afile == 'a/' + (self.oldpath or self.path)
343
343
344 def _ispatchingb(self, bfile):
344 def _ispatchingb(self, bfile):
345 if bfile == '/dev/null':
345 if bfile == '/dev/null':
346 return self.op == 'DELETE'
346 return self.op == 'DELETE'
347 return bfile == 'b/' + self.path
347 return bfile == 'b/' + self.path
348
348
349 def ispatching(self, afile, bfile):
349 def ispatching(self, afile, bfile):
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351
351
352 def __repr__(self):
352 def __repr__(self):
353 return "<patchmeta %s %r>" % (self.op, self.path)
353 return "<patchmeta %s %r>" % (self.op, self.path)
354
354
355 def readgitpatch(lr):
355 def readgitpatch(lr):
356 """extract git-style metadata about patches from <patchname>"""
356 """extract git-style metadata about patches from <patchname>"""
357
357
358 # Filter patch for git information
358 # Filter patch for git information
359 gp = None
359 gp = None
360 gitpatches = []
360 gitpatches = []
361 for line in lr:
361 for line in lr:
362 line = line.rstrip(' \r\n')
362 line = line.rstrip(' \r\n')
363 if line.startswith('diff --git a/'):
363 if line.startswith('diff --git a/'):
364 m = gitre.match(line)
364 m = gitre.match(line)
365 if m:
365 if m:
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368 dst = m.group(2)
368 dst = m.group(2)
369 gp = patchmeta(dst)
369 gp = patchmeta(dst)
370 elif gp:
370 elif gp:
371 if line.startswith('--- '):
371 if line.startswith('--- '):
372 gitpatches.append(gp)
372 gitpatches.append(gp)
373 gp = None
373 gp = None
374 continue
374 continue
375 if line.startswith('rename from '):
375 if line.startswith('rename from '):
376 gp.op = 'RENAME'
376 gp.op = 'RENAME'
377 gp.oldpath = line[12:]
377 gp.oldpath = line[12:]
378 elif line.startswith('rename to '):
378 elif line.startswith('rename to '):
379 gp.path = line[10:]
379 gp.path = line[10:]
380 elif line.startswith('copy from '):
380 elif line.startswith('copy from '):
381 gp.op = 'COPY'
381 gp.op = 'COPY'
382 gp.oldpath = line[10:]
382 gp.oldpath = line[10:]
383 elif line.startswith('copy to '):
383 elif line.startswith('copy to '):
384 gp.path = line[8:]
384 gp.path = line[8:]
385 elif line.startswith('deleted file'):
385 elif line.startswith('deleted file'):
386 gp.op = 'DELETE'
386 gp.op = 'DELETE'
387 elif line.startswith('new file mode '):
387 elif line.startswith('new file mode '):
388 gp.op = 'ADD'
388 gp.op = 'ADD'
389 gp.setmode(int(line[-6:], 8))
389 gp.setmode(int(line[-6:], 8))
390 elif line.startswith('new mode '):
390 elif line.startswith('new mode '):
391 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
392 elif line.startswith('GIT binary patch'):
392 elif line.startswith('GIT binary patch'):
393 gp.binary = True
393 gp.binary = True
394 if gp:
394 if gp:
395 gitpatches.append(gp)
395 gitpatches.append(gp)
396
396
397 return gitpatches
397 return gitpatches
398
398
399 class linereader(object):
399 class linereader(object):
400 # simple class to allow pushing lines back into the input stream
400 # simple class to allow pushing lines back into the input stream
401 def __init__(self, fp):
401 def __init__(self, fp):
402 self.fp = fp
402 self.fp = fp
403 self.buf = []
403 self.buf = []
404
404
405 def push(self, line):
405 def push(self, line):
406 if line is not None:
406 if line is not None:
407 self.buf.append(line)
407 self.buf.append(line)
408
408
409 def readline(self):
409 def readline(self):
410 if self.buf:
410 if self.buf:
411 l = self.buf[0]
411 l = self.buf[0]
412 del self.buf[0]
412 del self.buf[0]
413 return l
413 return l
414 return self.fp.readline()
414 return self.fp.readline()
415
415
416 def __iter__(self):
416 def __iter__(self):
417 return iter(self.readline, '')
417 return iter(self.readline, '')
418
418
419 class abstractbackend(object):
419 class abstractbackend(object):
420 def __init__(self, ui):
420 def __init__(self, ui):
421 self.ui = ui
421 self.ui = ui
422
422
423 def getfile(self, fname):
423 def getfile(self, fname):
424 """Return target file data and flags as a (data, (islink,
424 """Return target file data and flags as a (data, (islink,
425 isexec)) tuple. Data is None if file is missing/deleted.
425 isexec)) tuple. Data is None if file is missing/deleted.
426 """
426 """
427 raise NotImplementedError
427 raise NotImplementedError
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 """Write data to target file fname and set its mode. mode is a
430 """Write data to target file fname and set its mode. mode is a
431 (islink, isexec) tuple. If data is None, the file content should
431 (islink, isexec) tuple. If data is None, the file content should
432 be left unchanged. If the file is modified after being copied,
432 be left unchanged. If the file is modified after being copied,
433 copysource is set to the original file name.
433 copysource is set to the original file name.
434 """
434 """
435 raise NotImplementedError
435 raise NotImplementedError
436
436
437 def unlink(self, fname):
437 def unlink(self, fname):
438 """Unlink target file."""
438 """Unlink target file."""
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def writerej(self, fname, failed, total, lines):
441 def writerej(self, fname, failed, total, lines):
442 """Write rejected lines for fname. total is the number of hunks
442 """Write rejected lines for fname. total is the number of hunks
443 which failed to apply and total the total number of hunks for this
443 which failed to apply and total the total number of hunks for this
444 files.
444 files.
445 """
445 """
446 pass
446 pass
447
447
448 def exists(self, fname):
448 def exists(self, fname):
449 raise NotImplementedError
449 raise NotImplementedError
450
450
451 def close(self):
451 def close(self):
452 raise NotImplementedError
452 raise NotImplementedError
453
453
454 class fsbackend(abstractbackend):
454 class fsbackend(abstractbackend):
455 def __init__(self, ui, basedir):
455 def __init__(self, ui, basedir):
456 super(fsbackend, self).__init__(ui)
456 super(fsbackend, self).__init__(ui)
457 self.opener = vfsmod.vfs(basedir)
457 self.opener = vfsmod.vfs(basedir)
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 if self.opener.islink(fname):
460 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
461 return (self.opener.readlink(fname), (True, False))
462
462
463 isexec = False
463 isexec = False
464 try:
464 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
466 except OSError as e:
467 if e.errno != errno.ENOENT:
467 if e.errno != errno.ENOENT:
468 raise
468 raise
469 try:
469 try:
470 return (self.opener.read(fname), (False, isexec))
470 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
471 except IOError as e:
472 if e.errno != errno.ENOENT:
472 if e.errno != errno.ENOENT:
473 raise
473 raise
474 return None, None
474 return None, None
475
475
476 def setfile(self, fname, data, mode, copysource):
476 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
477 islink, isexec = mode
478 if data is None:
478 if data is None:
479 self.opener.setflags(fname, islink, isexec)
479 self.opener.setflags(fname, islink, isexec)
480 return
480 return
481 if islink:
481 if islink:
482 self.opener.symlink(data, fname)
482 self.opener.symlink(data, fname)
483 else:
483 else:
484 self.opener.write(fname, data)
484 self.opener.write(fname, data)
485 if isexec:
485 if isexec:
486 self.opener.setflags(fname, False, True)
486 self.opener.setflags(fname, False, True)
487
487
488 def unlink(self, fname):
488 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
489 self.opener.unlinkpath(fname, ignoremissing=True)
490
490
491 def writerej(self, fname, failed, total, lines):
491 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
492 fname = fname + ".rej"
493 self.ui.warn(
493 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
495 (failed, total, fname))
496 fp = self.opener(fname, 'w')
496 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
497 fp.writelines(lines)
498 fp.close()
498 fp.close()
499
499
500 def exists(self, fname):
500 def exists(self, fname):
501 return self.opener.lexists(fname)
501 return self.opener.lexists(fname)
502
502
503 class workingbackend(fsbackend):
503 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
504 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
505 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
506 self.repo = repo
507 self.similarity = similarity
507 self.similarity = similarity
508 self.removed = set()
508 self.removed = set()
509 self.changed = set()
509 self.changed = set()
510 self.copied = []
510 self.copied = []
511
511
512 def _checkknown(self, fname):
512 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
515
516 def setfile(self, fname, data, mode, copysource):
516 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
517 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
519 if copysource is not None:
520 self.copied.append((copysource, fname))
520 self.copied.append((copysource, fname))
521 self.changed.add(fname)
521 self.changed.add(fname)
522
522
523 def unlink(self, fname):
523 def unlink(self, fname):
524 self._checkknown(fname)
524 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
525 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
526 self.removed.add(fname)
527 self.changed.add(fname)
527 self.changed.add(fname)
528
528
529 def close(self):
529 def close(self):
530 wctx = self.repo[None]
530 wctx = self.repo[None]
531 changed = set(self.changed)
531 changed = set(self.changed)
532 for src, dst in self.copied:
532 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
534 if self.removed:
535 wctx.forget(sorted(self.removed))
535 wctx.forget(sorted(self.removed))
536 for f in self.removed:
536 for f in self.removed:
537 if f not in self.repo.dirstate:
537 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
538 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
539 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
540 # deleted, and should not be considered by
541 # marktouched().
541 # marktouched().
542 changed.discard(f)
542 changed.discard(f)
543 if changed:
543 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
544 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
545 return sorted(self.changed)
546
546
547 class filestore(object):
547 class filestore(object):
548 def __init__(self, maxsize=None):
548 def __init__(self, maxsize=None):
549 self.opener = None
549 self.opener = None
550 self.files = {}
550 self.files = {}
551 self.created = 0
551 self.created = 0
552 self.maxsize = maxsize
552 self.maxsize = maxsize
553 if self.maxsize is None:
553 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
554 self.maxsize = 4*(2**20)
555 self.size = 0
555 self.size = 0
556 self.data = {}
556 self.data = {}
557
557
558 def setfile(self, fname, data, mode, copied=None):
558 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
560 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
561 self.size += len(data)
562 else:
562 else:
563 if self.opener is None:
563 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = vfsmod.vfs(root)
565 self.opener = vfsmod.vfs(root)
566 # Avoid filename issues with these simple names
566 # Avoid filename issues with these simple names
567 fn = str(self.created)
567 fn = str(self.created)
568 self.opener.write(fn, data)
568 self.opener.write(fn, data)
569 self.created += 1
569 self.created += 1
570 self.files[fname] = (fn, mode, copied)
570 self.files[fname] = (fn, mode, copied)
571
571
572 def getfile(self, fname):
572 def getfile(self, fname):
573 if fname in self.data:
573 if fname in self.data:
574 return self.data[fname]
574 return self.data[fname]
575 if not self.opener or fname not in self.files:
575 if not self.opener or fname not in self.files:
576 return None, None, None
576 return None, None, None
577 fn, mode, copied = self.files[fname]
577 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
578 return self.opener.read(fn), mode, copied
579
579
580 def close(self):
580 def close(self):
581 if self.opener:
581 if self.opener:
582 shutil.rmtree(self.opener.base)
582 shutil.rmtree(self.opener.base)
583
583
584 class repobackend(abstractbackend):
584 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
585 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
586 super(repobackend, self).__init__(ui)
587 self.repo = repo
587 self.repo = repo
588 self.ctx = ctx
588 self.ctx = ctx
589 self.store = store
589 self.store = store
590 self.changed = set()
590 self.changed = set()
591 self.removed = set()
591 self.removed = set()
592 self.copied = {}
592 self.copied = {}
593
593
594 def _checkknown(self, fname):
594 def _checkknown(self, fname):
595 if fname not in self.ctx:
595 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
597
598 def getfile(self, fname):
598 def getfile(self, fname):
599 try:
599 try:
600 fctx = self.ctx[fname]
600 fctx = self.ctx[fname]
601 except error.LookupError:
601 except error.LookupError:
602 return None, None
602 return None, None
603 flags = fctx.flags()
603 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
604 return fctx.data(), ('l' in flags, 'x' in flags)
605
605
606 def setfile(self, fname, data, mode, copysource):
606 def setfile(self, fname, data, mode, copysource):
607 if copysource:
607 if copysource:
608 self._checkknown(copysource)
608 self._checkknown(copysource)
609 if data is None:
609 if data is None:
610 data = self.ctx[fname].data()
610 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
611 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
612 self.changed.add(fname)
613 if copysource:
613 if copysource:
614 self.copied[fname] = copysource
614 self.copied[fname] = copysource
615
615
616 def unlink(self, fname):
616 def unlink(self, fname):
617 self._checkknown(fname)
617 self._checkknown(fname)
618 self.removed.add(fname)
618 self.removed.add(fname)
619
619
620 def exists(self, fname):
620 def exists(self, fname):
621 return fname in self.ctx
621 return fname in self.ctx
622
622
623 def close(self):
623 def close(self):
624 return self.changed | self.removed
624 return self.changed | self.removed
625
625
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
630
631 class patchfile(object):
631 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
633 self.fname = gp.path
634 self.eolmode = eolmode
634 self.eolmode = eolmode
635 self.eol = None
635 self.eol = None
636 self.backend = backend
636 self.backend = backend
637 self.ui = ui
637 self.ui = ui
638 self.lines = []
638 self.lines = []
639 self.exists = False
639 self.exists = False
640 self.missing = True
640 self.missing = True
641 self.mode = gp.mode
641 self.mode = gp.mode
642 self.copysource = gp.oldpath
642 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
644 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
645 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
646 data, mode = backend.getfile(self.fname)
647 else:
647 else:
648 data, mode = store.getfile(self.copysource)[:2]
648 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
649 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
651 self.missing = False
652 if data:
652 if data:
653 self.lines = mdiff.splitnewlines(data)
653 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
654 if self.mode is None:
655 self.mode = mode
655 self.mode = mode
656 if self.lines:
656 if self.lines:
657 # Normalize line endings
657 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
658 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
659 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
660 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
661 self.eol = '\n'
662 if eolmode != 'strict':
662 if eolmode != 'strict':
663 nlines = []
663 nlines = []
664 for l in self.lines:
664 for l in self.lines:
665 if l.endswith('\r\n'):
665 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
666 l = l[:-2] + '\n'
667 nlines.append(l)
667 nlines.append(l)
668 self.lines = nlines
668 self.lines = nlines
669 else:
669 else:
670 if self.create:
670 if self.create:
671 self.missing = False
671 self.missing = False
672 if self.mode is None:
672 if self.mode is None:
673 self.mode = (False, False)
673 self.mode = (False, False)
674 if self.missing:
674 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 "current directory)\n"))
677 "current directory)\n"))
678
678
679 self.hash = {}
679 self.hash = {}
680 self.dirty = 0
680 self.dirty = 0
681 self.offset = 0
681 self.offset = 0
682 self.skew = 0
682 self.skew = 0
683 self.rej = []
683 self.rej = []
684 self.fileprinted = False
684 self.fileprinted = False
685 self.printfile(False)
685 self.printfile(False)
686 self.hunks = 0
686 self.hunks = 0
687
687
688 def writelines(self, fname, lines, mode):
688 def writelines(self, fname, lines, mode):
689 if self.eolmode == 'auto':
689 if self.eolmode == 'auto':
690 eol = self.eol
690 eol = self.eol
691 elif self.eolmode == 'crlf':
691 elif self.eolmode == 'crlf':
692 eol = '\r\n'
692 eol = '\r\n'
693 else:
693 else:
694 eol = '\n'
694 eol = '\n'
695
695
696 if self.eolmode != 'strict' and eol and eol != '\n':
696 if self.eolmode != 'strict' and eol and eol != '\n':
697 rawlines = []
697 rawlines = []
698 for l in lines:
698 for l in lines:
699 if l and l[-1] == '\n':
699 if l and l[-1] == '\n':
700 l = l[:-1] + eol
700 l = l[:-1] + eol
701 rawlines.append(l)
701 rawlines.append(l)
702 lines = rawlines
702 lines = rawlines
703
703
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705
705
706 def printfile(self, warn):
706 def printfile(self, warn):
707 if self.fileprinted:
707 if self.fileprinted:
708 return
708 return
709 if warn or self.ui.verbose:
709 if warn or self.ui.verbose:
710 self.fileprinted = True
710 self.fileprinted = True
711 s = _("patching file %s\n") % self.fname
711 s = _("patching file %s\n") % self.fname
712 if warn:
712 if warn:
713 self.ui.warn(s)
713 self.ui.warn(s)
714 else:
714 else:
715 self.ui.note(s)
715 self.ui.note(s)
716
716
717
717
718 def findlines(self, l, linenum):
718 def findlines(self, l, linenum):
719 # looks through the hash and finds candidate lines. The
719 # looks through the hash and finds candidate lines. The
720 # result is a list of line numbers sorted based on distance
720 # result is a list of line numbers sorted based on distance
721 # from linenum
721 # from linenum
722
722
723 cand = self.hash.get(l, [])
723 cand = self.hash.get(l, [])
724 if len(cand) > 1:
724 if len(cand) > 1:
725 # resort our list of potentials forward then back.
725 # resort our list of potentials forward then back.
726 cand.sort(key=lambda x: abs(x - linenum))
726 cand.sort(key=lambda x: abs(x - linenum))
727 return cand
727 return cand
728
728
729 def write_rej(self):
729 def write_rej(self):
730 # our rejects are a little different from patch(1). This always
730 # our rejects are a little different from patch(1). This always
731 # creates rejects in the same form as the original patch. A file
731 # creates rejects in the same form as the original patch. A file
732 # header is inserted so that you can run the reject through patch again
732 # header is inserted so that you can run the reject through patch again
733 # without having to type the filename.
733 # without having to type the filename.
734 if not self.rej:
734 if not self.rej:
735 return
735 return
736 base = os.path.basename(self.fname)
736 base = os.path.basename(self.fname)
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 for x in self.rej:
738 for x in self.rej:
739 for l in x.hunk:
739 for l in x.hunk:
740 lines.append(l)
740 lines.append(l)
741 if l[-1:] != '\n':
741 if l[-1:] != '\n':
742 lines.append("\n\ No newline at end of file\n")
742 lines.append("\n\ No newline at end of file\n")
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744
744
745 def apply(self, h):
745 def apply(self, h):
746 if not h.complete():
746 if not h.complete():
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 h.lenb))
749 h.lenb))
750
750
751 self.hunks += 1
751 self.hunks += 1
752
752
753 if self.missing:
753 if self.missing:
754 self.rej.append(h)
754 self.rej.append(h)
755 return -1
755 return -1
756
756
757 if self.exists and self.create:
757 if self.exists and self.create:
758 if self.copysource:
758 if self.copysource:
759 self.ui.warn(_("cannot create %s: destination already "
759 self.ui.warn(_("cannot create %s: destination already "
760 "exists\n") % self.fname)
760 "exists\n") % self.fname)
761 else:
761 else:
762 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.rej.append(h)
763 self.rej.append(h)
764 return -1
764 return -1
765
765
766 if isinstance(h, binhunk):
766 if isinstance(h, binhunk):
767 if self.remove:
767 if self.remove:
768 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
769 else:
769 else:
770 l = h.new(self.lines)
770 l = h.new(self.lines)
771 self.lines[:] = l
771 self.lines[:] = l
772 self.offset += len(l)
772 self.offset += len(l)
773 self.dirty = True
773 self.dirty = True
774 return 0
774 return 0
775
775
776 horig = h
776 horig = h
777 if (self.eolmode in ('crlf', 'lf')
777 if (self.eolmode in ('crlf', 'lf')
778 or self.eolmode == 'auto' and self.eol):
778 or self.eolmode == 'auto' and self.eol):
779 # If new eols are going to be normalized, then normalize
779 # If new eols are going to be normalized, then normalize
780 # hunk data before patching. Otherwise, preserve input
780 # hunk data before patching. Otherwise, preserve input
781 # line-endings.
781 # line-endings.
782 h = h.getnormalized()
782 h = h.getnormalized()
783
783
784 # fast case first, no offsets, no fuzz
784 # fast case first, no offsets, no fuzz
785 old, oldstart, new, newstart = h.fuzzit(0, False)
785 old, oldstart, new, newstart = h.fuzzit(0, False)
786 oldstart += self.offset
786 oldstart += self.offset
787 orig_start = oldstart
787 orig_start = oldstart
788 # if there's skew we want to emit the "(offset %d lines)" even
788 # if there's skew we want to emit the "(offset %d lines)" even
789 # when the hunk cleanly applies at start + skew, so skip the
789 # when the hunk cleanly applies at start + skew, so skip the
790 # fast case code
790 # fast case code
791 if (self.skew == 0 and
791 if (self.skew == 0 and
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 if self.remove:
793 if self.remove:
794 self.backend.unlink(self.fname)
794 self.backend.unlink(self.fname)
795 else:
795 else:
796 self.lines[oldstart:oldstart + len(old)] = new
796 self.lines[oldstart:oldstart + len(old)] = new
797 self.offset += len(new) - len(old)
797 self.offset += len(new) - len(old)
798 self.dirty = True
798 self.dirty = True
799 return 0
799 return 0
800
800
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 self.hash = {}
802 self.hash = {}
803 for x, s in enumerate(self.lines):
803 for x, s in enumerate(self.lines):
804 self.hash.setdefault(s, []).append(x)
804 self.hash.setdefault(s, []).append(x)
805
805
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for toponly in [True, False]:
807 for toponly in [True, False]:
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 oldstart = oldstart + self.offset + self.skew
809 oldstart = oldstart + self.offset + self.skew
810 oldstart = min(oldstart, len(self.lines))
810 oldstart = min(oldstart, len(self.lines))
811 if old:
811 if old:
812 cand = self.findlines(old[0][1:], oldstart)
812 cand = self.findlines(old[0][1:], oldstart)
813 else:
813 else:
814 # Only adding lines with no or fuzzed context, just
814 # Only adding lines with no or fuzzed context, just
815 # take the skew in account
815 # take the skew in account
816 cand = [oldstart]
816 cand = [oldstart]
817
817
818 for l in cand:
818 for l in cand:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 self.lines[l : l + len(old)] = new
820 self.lines[l : l + len(old)] = new
821 self.offset += len(new) - len(old)
821 self.offset += len(new) - len(old)
822 self.skew = l - orig_start
822 self.skew = l - orig_start
823 self.dirty = True
823 self.dirty = True
824 offset = l - orig_start - fuzzlen
824 offset = l - orig_start - fuzzlen
825 if fuzzlen:
825 if fuzzlen:
826 msg = _("Hunk #%d succeeded at %d "
826 msg = _("Hunk #%d succeeded at %d "
827 "with fuzz %d "
827 "with fuzz %d "
828 "(offset %d lines).\n")
828 "(offset %d lines).\n")
829 self.printfile(True)
829 self.printfile(True)
830 self.ui.warn(msg %
830 self.ui.warn(msg %
831 (h.number, l + 1, fuzzlen, offset))
831 (h.number, l + 1, fuzzlen, offset))
832 else:
832 else:
833 msg = _("Hunk #%d succeeded at %d "
833 msg = _("Hunk #%d succeeded at %d "
834 "(offset %d lines).\n")
834 "(offset %d lines).\n")
835 self.ui.note(msg % (h.number, l + 1, offset))
835 self.ui.note(msg % (h.number, l + 1, offset))
836 return fuzzlen
836 return fuzzlen
837 self.printfile(True)
837 self.printfile(True)
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.rej.append(horig)
839 self.rej.append(horig)
840 return -1
840 return -1
841
841
842 def close(self):
842 def close(self):
843 if self.dirty:
843 if self.dirty:
844 self.writelines(self.fname, self.lines, self.mode)
844 self.writelines(self.fname, self.lines, self.mode)
845 self.write_rej()
845 self.write_rej()
846 return len(self.rej)
846 return len(self.rej)
847
847
848 class header(object):
848 class header(object):
849 """patch header
849 """patch header
850 """
850 """
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
853 allhunks_re = re.compile('(?:index|deleted file) ')
853 allhunks_re = re.compile('(?:index|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 newfile_re = re.compile('(?:new file)')
856 newfile_re = re.compile('(?:new file)')
857
857
858 def __init__(self, header):
858 def __init__(self, header):
859 self.header = header
859 self.header = header
860 self.hunks = []
860 self.hunks = []
861
861
862 def binary(self):
862 def binary(self):
863 return any(h.startswith('index ') for h in self.header)
863 return any(h.startswith('index ') for h in self.header)
864
864
865 def pretty(self, fp):
865 def pretty(self, fp):
866 for h in self.header:
866 for h in self.header:
867 if h.startswith('index '):
867 if h.startswith('index '):
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 break
869 break
870 if self.pretty_re.match(h):
870 if self.pretty_re.match(h):
871 fp.write(h)
871 fp.write(h)
872 if self.binary():
872 if self.binary():
873 fp.write(_('this is a binary file\n'))
873 fp.write(_('this is a binary file\n'))
874 break
874 break
875 if h.startswith('---'):
875 if h.startswith('---'):
876 fp.write(_('%d hunks, %d lines changed\n') %
876 fp.write(_('%d hunks, %d lines changed\n') %
877 (len(self.hunks),
877 (len(self.hunks),
878 sum([max(h.added, h.removed) for h in self.hunks])))
878 sum([max(h.added, h.removed) for h in self.hunks])))
879 break
879 break
880 fp.write(h)
880 fp.write(h)
881
881
882 def write(self, fp):
882 def write(self, fp):
883 fp.write(''.join(self.header))
883 fp.write(''.join(self.header))
884
884
885 def allhunks(self):
885 def allhunks(self):
886 return any(self.allhunks_re.match(h) for h in self.header)
886 return any(self.allhunks_re.match(h) for h in self.header)
887
887
888 def files(self):
888 def files(self):
889 match = self.diffgit_re.match(self.header[0])
889 match = self.diffgit_re.match(self.header[0])
890 if match:
890 if match:
891 fromfile, tofile = match.groups()
891 fromfile, tofile = match.groups()
892 if fromfile == tofile:
892 if fromfile == tofile:
893 return [fromfile]
893 return [fromfile]
894 return [fromfile, tofile]
894 return [fromfile, tofile]
895 else:
895 else:
896 return self.diff_re.match(self.header[0]).groups()
896 return self.diff_re.match(self.header[0]).groups()
897
897
898 def filename(self):
898 def filename(self):
899 return self.files()[-1]
899 return self.files()[-1]
900
900
901 def __repr__(self):
901 def __repr__(self):
902 return '<header %s>' % (' '.join(map(repr, self.files())))
902 return '<header %s>' % (' '.join(map(repr, self.files())))
903
903
904 def isnewfile(self):
904 def isnewfile(self):
905 return any(self.newfile_re.match(h) for h in self.header)
905 return any(self.newfile_re.match(h) for h in self.header)
906
906
907 def special(self):
907 def special(self):
908 # Special files are shown only at the header level and not at the hunk
908 # Special files are shown only at the header level and not at the hunk
909 # level for example a file that has been deleted is a special file.
909 # level for example a file that has been deleted is a special file.
910 # The user cannot change the content of the operation, in the case of
910 # The user cannot change the content of the operation, in the case of
911 # the deleted file he has to take the deletion or not take it, he
911 # the deleted file he has to take the deletion or not take it, he
912 # cannot take some of it.
912 # cannot take some of it.
913 # Newly added files are special if they are empty, they are not special
913 # Newly added files are special if they are empty, they are not special
914 # if they have some content as we want to be able to change it
914 # if they have some content as we want to be able to change it
915 nocontent = len(self.header) == 2
915 nocontent = len(self.header) == 2
916 emptynewfile = self.isnewfile() and nocontent
916 emptynewfile = self.isnewfile() and nocontent
917 return emptynewfile or \
917 return emptynewfile or \
918 any(self.special_re.match(h) for h in self.header)
918 any(self.special_re.match(h) for h in self.header)
919
919
920 class recordhunk(object):
920 class recordhunk(object):
921 """patch hunk
921 """patch hunk
922
922
923 XXX shouldn't we merge this with the other hunk class?
923 XXX shouldn't we merge this with the other hunk class?
924 """
924 """
925 maxcontext = 3
925 maxcontext = 3
926
926
927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
928 def trimcontext(number, lines):
928 def trimcontext(number, lines):
929 delta = len(lines) - self.maxcontext
929 delta = len(lines) - self.maxcontext
930 if False and delta > 0:
930 if False and delta > 0:
931 return number + delta, lines[:self.maxcontext]
931 return number + delta, lines[:self.maxcontext]
932 return number, lines
932 return number, lines
933
933
934 self.header = header
934 self.header = header
935 self.fromline, self.before = trimcontext(fromline, before)
935 self.fromline, self.before = trimcontext(fromline, before)
936 self.toline, self.after = trimcontext(toline, after)
936 self.toline, self.after = trimcontext(toline, after)
937 self.proc = proc
937 self.proc = proc
938 self.hunk = hunk
938 self.hunk = hunk
939 self.added, self.removed = self.countchanges(self.hunk)
939 self.added, self.removed = self.countchanges(self.hunk)
940
940
941 def __eq__(self, v):
941 def __eq__(self, v):
942 if not isinstance(v, recordhunk):
942 if not isinstance(v, recordhunk):
943 return False
943 return False
944
944
945 return ((v.hunk == self.hunk) and
945 return ((v.hunk == self.hunk) and
946 (v.proc == self.proc) and
946 (v.proc == self.proc) and
947 (self.fromline == v.fromline) and
947 (self.fromline == v.fromline) and
948 (self.header.files() == v.header.files()))
948 (self.header.files() == v.header.files()))
949
949
950 def __hash__(self):
950 def __hash__(self):
951 return hash((tuple(self.hunk),
951 return hash((tuple(self.hunk),
952 tuple(self.header.files()),
952 tuple(self.header.files()),
953 self.fromline,
953 self.fromline,
954 self.proc))
954 self.proc))
955
955
956 def countchanges(self, hunk):
956 def countchanges(self, hunk):
957 """hunk -> (n+,n-)"""
957 """hunk -> (n+,n-)"""
958 add = len([h for h in hunk if h[0] == '+'])
958 add = len([h for h in hunk if h[0] == '+'])
959 rem = len([h for h in hunk if h[0] == '-'])
959 rem = len([h for h in hunk if h[0] == '-'])
960 return add, rem
960 return add, rem
961
961
962 def reversehunk(self):
962 def reversehunk(self):
963 """return another recordhunk which is the reverse of the hunk
963 """return another recordhunk which is the reverse of the hunk
964
964
965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
966 that, swap fromline/toline and +/- signs while keep other things
966 that, swap fromline/toline and +/- signs while keep other things
967 unchanged.
967 unchanged.
968 """
968 """
969 m = {'+': '-', '-': '+'}
969 m = {'+': '-', '-': '+'}
970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
972 self.before, hunk, self.after)
972 self.before, hunk, self.after)
973
973
974 def write(self, fp):
974 def write(self, fp):
975 delta = len(self.before) + len(self.after)
975 delta = len(self.before) + len(self.after)
976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
977 delta -= 1
977 delta -= 1
978 fromlen = delta + self.removed
978 fromlen = delta + self.removed
979 tolen = delta + self.added
979 tolen = delta + self.added
980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
981 (self.fromline, fromlen, self.toline, tolen,
981 (self.fromline, fromlen, self.toline, tolen,
982 self.proc and (' ' + self.proc)))
982 self.proc and (' ' + self.proc)))
983 fp.write(''.join(self.before + self.hunk + self.after))
983 fp.write(''.join(self.before + self.hunk + self.after))
984
984
985 pretty = write
985 pretty = write
986
986
987 def filename(self):
987 def filename(self):
988 return self.header.filename()
988 return self.header.filename()
989
989
990 def __repr__(self):
990 def __repr__(self):
991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
992
992
993 def filterpatch(ui, headers, operation=None):
993 def filterpatch(ui, headers, operation=None):
994 """Interactively filter patch chunks into applied-only chunks"""
994 """Interactively filter patch chunks into applied-only chunks"""
995 if operation is None:
995 if operation is None:
996 operation = 'record'
996 operation = 'record'
997 messages = {
997 messages = {
998 'multiple': {
998 'multiple': {
999 'discard': _("discard change %d/%d to '%s'?"),
999 'discard': _("discard change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1000 'record': _("record change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1001 'revert': _("revert change %d/%d to '%s'?"),
1002 }[operation],
1002 }[operation],
1003 'single': {
1003 'single': {
1004 'discard': _("discard this change to '%s'?"),
1004 'discard': _("discard this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1005 'record': _("record this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1006 'revert': _("revert this change to '%s'?"),
1007 }[operation],
1007 }[operation],
1008 'help': {
1008 'help': {
1009 'discard': _('[Ynesfdaq?]'
1009 'discard': _('[Ynesfdaq?]'
1010 '$$ &Yes, discard this change'
1010 '$$ &Yes, discard this change'
1011 '$$ &No, skip this change'
1011 '$$ &No, skip this change'
1012 '$$ &Edit this change manually'
1012 '$$ &Edit this change manually'
1013 '$$ &Skip remaining changes to this file'
1013 '$$ &Skip remaining changes to this file'
1014 '$$ Discard remaining changes to this &file'
1014 '$$ Discard remaining changes to this &file'
1015 '$$ &Done, skip remaining changes and files'
1015 '$$ &Done, skip remaining changes and files'
1016 '$$ Discard &all changes to all remaining files'
1016 '$$ Discard &all changes to all remaining files'
1017 '$$ &Quit, discarding no changes'
1017 '$$ &Quit, discarding no changes'
1018 '$$ &? (display help)'),
1018 '$$ &? (display help)'),
1019 'record': _('[Ynesfdaq?]'
1019 'record': _('[Ynesfdaq?]'
1020 '$$ &Yes, record this change'
1020 '$$ &Yes, record this change'
1021 '$$ &No, skip this change'
1021 '$$ &No, skip this change'
1022 '$$ &Edit this change manually'
1022 '$$ &Edit this change manually'
1023 '$$ &Skip remaining changes to this file'
1023 '$$ &Skip remaining changes to this file'
1024 '$$ Record remaining changes to this &file'
1024 '$$ Record remaining changes to this &file'
1025 '$$ &Done, skip remaining changes and files'
1025 '$$ &Done, skip remaining changes and files'
1026 '$$ Record &all changes to all remaining files'
1026 '$$ Record &all changes to all remaining files'
1027 '$$ &Quit, recording no changes'
1027 '$$ &Quit, recording no changes'
1028 '$$ &? (display help)'),
1028 '$$ &? (display help)'),
1029 'revert': _('[Ynesfdaq?]'
1029 'revert': _('[Ynesfdaq?]'
1030 '$$ &Yes, revert this change'
1030 '$$ &Yes, revert this change'
1031 '$$ &No, skip this change'
1031 '$$ &No, skip this change'
1032 '$$ &Edit this change manually'
1032 '$$ &Edit this change manually'
1033 '$$ &Skip remaining changes to this file'
1033 '$$ &Skip remaining changes to this file'
1034 '$$ Revert remaining changes to this &file'
1034 '$$ Revert remaining changes to this &file'
1035 '$$ &Done, skip remaining changes and files'
1035 '$$ &Done, skip remaining changes and files'
1036 '$$ Revert &all changes to all remaining files'
1036 '$$ Revert &all changes to all remaining files'
1037 '$$ &Quit, reverting no changes'
1037 '$$ &Quit, reverting no changes'
1038 '$$ &? (display help)')
1038 '$$ &? (display help)')
1039 }[operation]
1039 }[operation]
1040 }
1040 }
1041
1041
1042 def prompt(skipfile, skipall, query, chunk):
1042 def prompt(skipfile, skipall, query, chunk):
1043 """prompt query, and process base inputs
1043 """prompt query, and process base inputs
1044
1044
1045 - y/n for the rest of file
1045 - y/n for the rest of file
1046 - y/n for the rest
1046 - y/n for the rest
1047 - ? (help)
1047 - ? (help)
1048 - q (quit)
1048 - q (quit)
1049
1049
1050 Return True/False and possibly updated skipfile and skipall.
1050 Return True/False and possibly updated skipfile and skipall.
1051 """
1051 """
1052 newpatches = None
1052 newpatches = None
1053 if skipall is not None:
1053 if skipall is not None:
1054 return skipall, skipfile, skipall, newpatches
1054 return skipall, skipfile, skipall, newpatches
1055 if skipfile is not None:
1055 if skipfile is not None:
1056 return skipfile, skipfile, skipall, newpatches
1056 return skipfile, skipfile, skipall, newpatches
1057 while True:
1057 while True:
1058 resps = messages['help']
1058 resps = messages['help']
1059 r = ui.promptchoice("%s %s" % (query, resps))
1059 r = ui.promptchoice("%s %s" % (query, resps))
1060 ui.write("\n")
1060 ui.write("\n")
1061 if r == 8: # ?
1061 if r == 8: # ?
1062 for c, t in ui.extractchoices(resps)[1]:
1062 for c, t in ui.extractchoices(resps)[1]:
1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1064 continue
1064 continue
1065 elif r == 0: # yes
1065 elif r == 0: # yes
1066 ret = True
1066 ret = True
1067 elif r == 1: # no
1067 elif r == 1: # no
1068 ret = False
1068 ret = False
1069 elif r == 2: # Edit patch
1069 elif r == 2: # Edit patch
1070 if chunk is None:
1070 if chunk is None:
1071 ui.write(_('cannot edit patch for whole file'))
1071 ui.write(_('cannot edit patch for whole file'))
1072 ui.write("\n")
1072 ui.write("\n")
1073 continue
1073 continue
1074 if chunk.header.binary():
1074 if chunk.header.binary():
1075 ui.write(_('cannot edit patch for binary file'))
1075 ui.write(_('cannot edit patch for binary file'))
1076 ui.write("\n")
1076 ui.write("\n")
1077 continue
1077 continue
1078 # Patch comment based on the Git one (based on comment at end of
1078 # Patch comment based on the Git one (based on comment at end of
1079 # https://mercurial-scm.org/wiki/RecordExtension)
1079 # https://mercurial-scm.org/wiki/RecordExtension)
1080 phelp = '---' + _("""
1080 phelp = '---' + _("""
1081 To remove '-' lines, make them ' ' lines (context).
1081 To remove '-' lines, make them ' ' lines (context).
1082 To remove '+' lines, delete them.
1082 To remove '+' lines, delete them.
1083 Lines starting with # will be removed from the patch.
1083 Lines starting with # will be removed from the patch.
1084
1084
1085 If the patch applies cleanly, the edited hunk will immediately be
1085 If the patch applies cleanly, the edited hunk will immediately be
1086 added to the record list. If it does not apply cleanly, a rejects
1086 added to the record list. If it does not apply cleanly, a rejects
1087 file will be generated: you can use that when you try again. If
1087 file will be generated: you can use that when you try again. If
1088 all lines of the hunk are removed, then the edit is aborted and
1088 all lines of the hunk are removed, then the edit is aborted and
1089 the hunk is left unchanged.
1089 the hunk is left unchanged.
1090 """)
1090 """)
1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1092 suffix=".diff", text=True)
1092 suffix=".diff", text=True)
1093 ncpatchfp = None
1093 ncpatchfp = None
1094 try:
1094 try:
1095 # Write the initial patch
1095 # Write the initial patch
1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1097 chunk.header.write(f)
1097 chunk.header.write(f)
1098 chunk.write(f)
1098 chunk.write(f)
1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1100 f.close()
1100 f.close()
1101 # Start the editor and wait for it to complete
1101 # Start the editor and wait for it to complete
1102 editor = ui.geteditor()
1102 editor = ui.geteditor()
1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1104 environ={'HGUSER': ui.username()},
1104 environ={'HGUSER': ui.username()},
1105 blockedtag='filterpatch')
1105 blockedtag='filterpatch')
1106 if ret != 0:
1106 if ret != 0:
1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1108 continue
1108 continue
1109 # Remove comment lines
1109 # Remove comment lines
1110 patchfp = open(patchfn)
1110 patchfp = open(patchfn)
1111 ncpatchfp = stringio()
1111 ncpatchfp = stringio()
1112 for line in util.iterfile(patchfp):
1112 for line in util.iterfile(patchfp):
1113 if not line.startswith('#'):
1113 if not line.startswith('#'):
1114 ncpatchfp.write(line)
1114 ncpatchfp.write(line)
1115 patchfp.close()
1115 patchfp.close()
1116 ncpatchfp.seek(0)
1116 ncpatchfp.seek(0)
1117 newpatches = parsepatch(ncpatchfp)
1117 newpatches = parsepatch(ncpatchfp)
1118 finally:
1118 finally:
1119 os.unlink(patchfn)
1119 os.unlink(patchfn)
1120 del ncpatchfp
1120 del ncpatchfp
1121 # Signal that the chunk shouldn't be applied as-is, but
1121 # Signal that the chunk shouldn't be applied as-is, but
1122 # provide the new patch to be used instead.
1122 # provide the new patch to be used instead.
1123 ret = False
1123 ret = False
1124 elif r == 3: # Skip
1124 elif r == 3: # Skip
1125 ret = skipfile = False
1125 ret = skipfile = False
1126 elif r == 4: # file (Record remaining)
1126 elif r == 4: # file (Record remaining)
1127 ret = skipfile = True
1127 ret = skipfile = True
1128 elif r == 5: # done, skip remaining
1128 elif r == 5: # done, skip remaining
1129 ret = skipall = False
1129 ret = skipall = False
1130 elif r == 6: # all
1130 elif r == 6: # all
1131 ret = skipall = True
1131 ret = skipall = True
1132 elif r == 7: # quit
1132 elif r == 7: # quit
1133 raise error.Abort(_('user quit'))
1133 raise error.Abort(_('user quit'))
1134 return ret, skipfile, skipall, newpatches
1134 return ret, skipfile, skipall, newpatches
1135
1135
1136 seen = set()
1136 seen = set()
1137 applied = {} # 'filename' -> [] of chunks
1137 applied = {} # 'filename' -> [] of chunks
1138 skipfile, skipall = None, None
1138 skipfile, skipall = None, None
1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1140 for h in headers:
1140 for h in headers:
1141 pos += len(h.hunks)
1141 pos += len(h.hunks)
1142 skipfile = None
1142 skipfile = None
1143 fixoffset = 0
1143 fixoffset = 0
1144 hdr = ''.join(h.header)
1144 hdr = ''.join(h.header)
1145 if hdr in seen:
1145 if hdr in seen:
1146 continue
1146 continue
1147 seen.add(hdr)
1147 seen.add(hdr)
1148 if skipall is None:
1148 if skipall is None:
1149 h.pretty(ui)
1149 h.pretty(ui)
1150 msg = (_('examine changes to %s?') %
1150 msg = (_('examine changes to %s?') %
1151 _(' and ').join("'%s'" % f for f in h.files()))
1151 _(' and ').join("'%s'" % f for f in h.files()))
1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1153 if not r:
1153 if not r:
1154 continue
1154 continue
1155 applied[h.filename()] = [h]
1155 applied[h.filename()] = [h]
1156 if h.allhunks():
1156 if h.allhunks():
1157 applied[h.filename()] += h.hunks
1157 applied[h.filename()] += h.hunks
1158 continue
1158 continue
1159 for i, chunk in enumerate(h.hunks):
1159 for i, chunk in enumerate(h.hunks):
1160 if skipfile is None and skipall is None:
1160 if skipfile is None and skipall is None:
1161 chunk.pretty(ui)
1161 chunk.pretty(ui)
1162 if total == 1:
1162 if total == 1:
1163 msg = messages['single'] % chunk.filename()
1163 msg = messages['single'] % chunk.filename()
1164 else:
1164 else:
1165 idx = pos - len(h.hunks) + i
1165 idx = pos - len(h.hunks) + i
1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1168 skipall, msg, chunk)
1168 skipall, msg, chunk)
1169 if r:
1169 if r:
1170 if fixoffset:
1170 if fixoffset:
1171 chunk = copy.copy(chunk)
1171 chunk = copy.copy(chunk)
1172 chunk.toline += fixoffset
1172 chunk.toline += fixoffset
1173 applied[chunk.filename()].append(chunk)
1173 applied[chunk.filename()].append(chunk)
1174 elif newpatches is not None:
1174 elif newpatches is not None:
1175 for newpatch in newpatches:
1175 for newpatch in newpatches:
1176 for newhunk in newpatch.hunks:
1176 for newhunk in newpatch.hunks:
1177 if fixoffset:
1177 if fixoffset:
1178 newhunk.toline += fixoffset
1178 newhunk.toline += fixoffset
1179 applied[newhunk.filename()].append(newhunk)
1179 applied[newhunk.filename()].append(newhunk)
1180 else:
1180 else:
1181 fixoffset += chunk.removed - chunk.added
1181 fixoffset += chunk.removed - chunk.added
1182 return (sum([h for h in applied.itervalues()
1182 return (sum([h for h in applied.itervalues()
1183 if h[0].special() or len(h) > 1], []), {})
1183 if h[0].special() or len(h) > 1], []), {})
1184 class hunk(object):
1184 class hunk(object):
1185 def __init__(self, desc, num, lr, context):
1185 def __init__(self, desc, num, lr, context):
1186 self.number = num
1186 self.number = num
1187 self.desc = desc
1187 self.desc = desc
1188 self.hunk = [desc]
1188 self.hunk = [desc]
1189 self.a = []
1189 self.a = []
1190 self.b = []
1190 self.b = []
1191 self.starta = self.lena = None
1191 self.starta = self.lena = None
1192 self.startb = self.lenb = None
1192 self.startb = self.lenb = None
1193 if lr is not None:
1193 if lr is not None:
1194 if context:
1194 if context:
1195 self.read_context_hunk(lr)
1195 self.read_context_hunk(lr)
1196 else:
1196 else:
1197 self.read_unified_hunk(lr)
1197 self.read_unified_hunk(lr)
1198
1198
1199 def getnormalized(self):
1199 def getnormalized(self):
1200 """Return a copy with line endings normalized to LF."""
1200 """Return a copy with line endings normalized to LF."""
1201
1201
1202 def normalize(lines):
1202 def normalize(lines):
1203 nlines = []
1203 nlines = []
1204 for line in lines:
1204 for line in lines:
1205 if line.endswith('\r\n'):
1205 if line.endswith('\r\n'):
1206 line = line[:-2] + '\n'
1206 line = line[:-2] + '\n'
1207 nlines.append(line)
1207 nlines.append(line)
1208 return nlines
1208 return nlines
1209
1209
1210 # Dummy object, it is rebuilt manually
1210 # Dummy object, it is rebuilt manually
1211 nh = hunk(self.desc, self.number, None, None)
1211 nh = hunk(self.desc, self.number, None, None)
1212 nh.number = self.number
1212 nh.number = self.number
1213 nh.desc = self.desc
1213 nh.desc = self.desc
1214 nh.hunk = self.hunk
1214 nh.hunk = self.hunk
1215 nh.a = normalize(self.a)
1215 nh.a = normalize(self.a)
1216 nh.b = normalize(self.b)
1216 nh.b = normalize(self.b)
1217 nh.starta = self.starta
1217 nh.starta = self.starta
1218 nh.startb = self.startb
1218 nh.startb = self.startb
1219 nh.lena = self.lena
1219 nh.lena = self.lena
1220 nh.lenb = self.lenb
1220 nh.lenb = self.lenb
1221 return nh
1221 return nh
1222
1222
1223 def read_unified_hunk(self, lr):
1223 def read_unified_hunk(self, lr):
1224 m = unidesc.match(self.desc)
1224 m = unidesc.match(self.desc)
1225 if not m:
1225 if not m:
1226 raise PatchError(_("bad hunk #%d") % self.number)
1226 raise PatchError(_("bad hunk #%d") % self.number)
1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1228 if self.lena is None:
1228 if self.lena is None:
1229 self.lena = 1
1229 self.lena = 1
1230 else:
1230 else:
1231 self.lena = int(self.lena)
1231 self.lena = int(self.lena)
1232 if self.lenb is None:
1232 if self.lenb is None:
1233 self.lenb = 1
1233 self.lenb = 1
1234 else:
1234 else:
1235 self.lenb = int(self.lenb)
1235 self.lenb = int(self.lenb)
1236 self.starta = int(self.starta)
1236 self.starta = int(self.starta)
1237 self.startb = int(self.startb)
1237 self.startb = int(self.startb)
1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1239 self.b)
1239 self.b)
1240 # if we hit eof before finishing out the hunk, the last line will
1240 # if we hit eof before finishing out the hunk, the last line will
1241 # be zero length. Lets try to fix it up.
1241 # be zero length. Lets try to fix it up.
1242 while len(self.hunk[-1]) == 0:
1242 while len(self.hunk[-1]) == 0:
1243 del self.hunk[-1]
1243 del self.hunk[-1]
1244 del self.a[-1]
1244 del self.a[-1]
1245 del self.b[-1]
1245 del self.b[-1]
1246 self.lena -= 1
1246 self.lena -= 1
1247 self.lenb -= 1
1247 self.lenb -= 1
1248 self._fixnewline(lr)
1248 self._fixnewline(lr)
1249
1249
1250 def read_context_hunk(self, lr):
1250 def read_context_hunk(self, lr):
1251 self.desc = lr.readline()
1251 self.desc = lr.readline()
1252 m = contextdesc.match(self.desc)
1252 m = contextdesc.match(self.desc)
1253 if not m:
1253 if not m:
1254 raise PatchError(_("bad hunk #%d") % self.number)
1254 raise PatchError(_("bad hunk #%d") % self.number)
1255 self.starta, aend = m.groups()
1255 self.starta, aend = m.groups()
1256 self.starta = int(self.starta)
1256 self.starta = int(self.starta)
1257 if aend is None:
1257 if aend is None:
1258 aend = self.starta
1258 aend = self.starta
1259 self.lena = int(aend) - self.starta
1259 self.lena = int(aend) - self.starta
1260 if self.starta:
1260 if self.starta:
1261 self.lena += 1
1261 self.lena += 1
1262 for x in xrange(self.lena):
1262 for x in xrange(self.lena):
1263 l = lr.readline()
1263 l = lr.readline()
1264 if l.startswith('---'):
1264 if l.startswith('---'):
1265 # lines addition, old block is empty
1265 # lines addition, old block is empty
1266 lr.push(l)
1266 lr.push(l)
1267 break
1267 break
1268 s = l[2:]
1268 s = l[2:]
1269 if l.startswith('- ') or l.startswith('! '):
1269 if l.startswith('- ') or l.startswith('! '):
1270 u = '-' + s
1270 u = '-' + s
1271 elif l.startswith(' '):
1271 elif l.startswith(' '):
1272 u = ' ' + s
1272 u = ' ' + s
1273 else:
1273 else:
1274 raise PatchError(_("bad hunk #%d old text line %d") %
1274 raise PatchError(_("bad hunk #%d old text line %d") %
1275 (self.number, x))
1275 (self.number, x))
1276 self.a.append(u)
1276 self.a.append(u)
1277 self.hunk.append(u)
1277 self.hunk.append(u)
1278
1278
1279 l = lr.readline()
1279 l = lr.readline()
1280 if l.startswith('\ '):
1280 if l.startswith('\ '):
1281 s = self.a[-1][:-1]
1281 s = self.a[-1][:-1]
1282 self.a[-1] = s
1282 self.a[-1] = s
1283 self.hunk[-1] = s
1283 self.hunk[-1] = s
1284 l = lr.readline()
1284 l = lr.readline()
1285 m = contextdesc.match(l)
1285 m = contextdesc.match(l)
1286 if not m:
1286 if not m:
1287 raise PatchError(_("bad hunk #%d") % self.number)
1287 raise PatchError(_("bad hunk #%d") % self.number)
1288 self.startb, bend = m.groups()
1288 self.startb, bend = m.groups()
1289 self.startb = int(self.startb)
1289 self.startb = int(self.startb)
1290 if bend is None:
1290 if bend is None:
1291 bend = self.startb
1291 bend = self.startb
1292 self.lenb = int(bend) - self.startb
1292 self.lenb = int(bend) - self.startb
1293 if self.startb:
1293 if self.startb:
1294 self.lenb += 1
1294 self.lenb += 1
1295 hunki = 1
1295 hunki = 1
1296 for x in xrange(self.lenb):
1296 for x in xrange(self.lenb):
1297 l = lr.readline()
1297 l = lr.readline()
1298 if l.startswith('\ '):
1298 if l.startswith('\ '):
1299 # XXX: the only way to hit this is with an invalid line range.
1299 # XXX: the only way to hit this is with an invalid line range.
1300 # The no-eol marker is not counted in the line range, but I
1300 # The no-eol marker is not counted in the line range, but I
1301 # guess there are diff(1) out there which behave differently.
1301 # guess there are diff(1) out there which behave differently.
1302 s = self.b[-1][:-1]
1302 s = self.b[-1][:-1]
1303 self.b[-1] = s
1303 self.b[-1] = s
1304 self.hunk[hunki - 1] = s
1304 self.hunk[hunki - 1] = s
1305 continue
1305 continue
1306 if not l:
1306 if not l:
1307 # line deletions, new block is empty and we hit EOF
1307 # line deletions, new block is empty and we hit EOF
1308 lr.push(l)
1308 lr.push(l)
1309 break
1309 break
1310 s = l[2:]
1310 s = l[2:]
1311 if l.startswith('+ ') or l.startswith('! '):
1311 if l.startswith('+ ') or l.startswith('! '):
1312 u = '+' + s
1312 u = '+' + s
1313 elif l.startswith(' '):
1313 elif l.startswith(' '):
1314 u = ' ' + s
1314 u = ' ' + s
1315 elif len(self.b) == 0:
1315 elif len(self.b) == 0:
1316 # line deletions, new block is empty
1316 # line deletions, new block is empty
1317 lr.push(l)
1317 lr.push(l)
1318 break
1318 break
1319 else:
1319 else:
1320 raise PatchError(_("bad hunk #%d old text line %d") %
1320 raise PatchError(_("bad hunk #%d old text line %d") %
1321 (self.number, x))
1321 (self.number, x))
1322 self.b.append(s)
1322 self.b.append(s)
1323 while True:
1323 while True:
1324 if hunki >= len(self.hunk):
1324 if hunki >= len(self.hunk):
1325 h = ""
1325 h = ""
1326 else:
1326 else:
1327 h = self.hunk[hunki]
1327 h = self.hunk[hunki]
1328 hunki += 1
1328 hunki += 1
1329 if h == u:
1329 if h == u:
1330 break
1330 break
1331 elif h.startswith('-'):
1331 elif h.startswith('-'):
1332 continue
1332 continue
1333 else:
1333 else:
1334 self.hunk.insert(hunki - 1, u)
1334 self.hunk.insert(hunki - 1, u)
1335 break
1335 break
1336
1336
1337 if not self.a:
1337 if not self.a:
1338 # this happens when lines were only added to the hunk
1338 # this happens when lines were only added to the hunk
1339 for x in self.hunk:
1339 for x in self.hunk:
1340 if x.startswith('-') or x.startswith(' '):
1340 if x.startswith('-') or x.startswith(' '):
1341 self.a.append(x)
1341 self.a.append(x)
1342 if not self.b:
1342 if not self.b:
1343 # this happens when lines were only deleted from the hunk
1343 # this happens when lines were only deleted from the hunk
1344 for x in self.hunk:
1344 for x in self.hunk:
1345 if x.startswith('+') or x.startswith(' '):
1345 if x.startswith('+') or x.startswith(' '):
1346 self.b.append(x[1:])
1346 self.b.append(x[1:])
1347 # @@ -start,len +start,len @@
1347 # @@ -start,len +start,len @@
1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1349 self.startb, self.lenb)
1349 self.startb, self.lenb)
1350 self.hunk[0] = self.desc
1350 self.hunk[0] = self.desc
1351 self._fixnewline(lr)
1351 self._fixnewline(lr)
1352
1352
1353 def _fixnewline(self, lr):
1353 def _fixnewline(self, lr):
1354 l = lr.readline()
1354 l = lr.readline()
1355 if l.startswith('\ '):
1355 if l.startswith('\ '):
1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1357 else:
1357 else:
1358 lr.push(l)
1358 lr.push(l)
1359
1359
1360 def complete(self):
1360 def complete(self):
1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1362
1362
1363 def _fuzzit(self, old, new, fuzz, toponly):
1363 def _fuzzit(self, old, new, fuzz, toponly):
1364 # this removes context lines from the top and bottom of list 'l'. It
1364 # this removes context lines from the top and bottom of list 'l'. It
1365 # checks the hunk to make sure only context lines are removed, and then
1365 # checks the hunk to make sure only context lines are removed, and then
1366 # returns a new shortened list of lines.
1366 # returns a new shortened list of lines.
1367 fuzz = min(fuzz, len(old))
1367 fuzz = min(fuzz, len(old))
1368 if fuzz:
1368 if fuzz:
1369 top = 0
1369 top = 0
1370 bot = 0
1370 bot = 0
1371 hlen = len(self.hunk)
1371 hlen = len(self.hunk)
1372 for x in xrange(hlen - 1):
1372 for x in xrange(hlen - 1):
1373 # the hunk starts with the @@ line, so use x+1
1373 # the hunk starts with the @@ line, so use x+1
1374 if self.hunk[x + 1][0] == ' ':
1374 if self.hunk[x + 1][0] == ' ':
1375 top += 1
1375 top += 1
1376 else:
1376 else:
1377 break
1377 break
1378 if not toponly:
1378 if not toponly:
1379 for x in xrange(hlen - 1):
1379 for x in xrange(hlen - 1):
1380 if self.hunk[hlen - bot - 1][0] == ' ':
1380 if self.hunk[hlen - bot - 1][0] == ' ':
1381 bot += 1
1381 bot += 1
1382 else:
1382 else:
1383 break
1383 break
1384
1384
1385 bot = min(fuzz, bot)
1385 bot = min(fuzz, bot)
1386 top = min(fuzz, top)
1386 top = min(fuzz, top)
1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1388 return old, new, 0
1388 return old, new, 0
1389
1389
1390 def fuzzit(self, fuzz, toponly):
1390 def fuzzit(self, fuzz, toponly):
1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1392 oldstart = self.starta + top
1392 oldstart = self.starta + top
1393 newstart = self.startb + top
1393 newstart = self.startb + top
1394 # zero length hunk ranges already have their start decremented
1394 # zero length hunk ranges already have their start decremented
1395 if self.lena and oldstart > 0:
1395 if self.lena and oldstart > 0:
1396 oldstart -= 1
1396 oldstart -= 1
1397 if self.lenb and newstart > 0:
1397 if self.lenb and newstart > 0:
1398 newstart -= 1
1398 newstart -= 1
1399 return old, oldstart, new, newstart
1399 return old, oldstart, new, newstart
1400
1400
1401 class binhunk(object):
1401 class binhunk(object):
1402 'A binary patch file.'
1402 'A binary patch file.'
1403 def __init__(self, lr, fname):
1403 def __init__(self, lr, fname):
1404 self.text = None
1404 self.text = None
1405 self.delta = False
1405 self.delta = False
1406 self.hunk = ['GIT binary patch\n']
1406 self.hunk = ['GIT binary patch\n']
1407 self._fname = fname
1407 self._fname = fname
1408 self._read(lr)
1408 self._read(lr)
1409
1409
1410 def complete(self):
1410 def complete(self):
1411 return self.text is not None
1411 return self.text is not None
1412
1412
1413 def new(self, lines):
1413 def new(self, lines):
1414 if self.delta:
1414 if self.delta:
1415 return [applybindelta(self.text, ''.join(lines))]
1415 return [applybindelta(self.text, ''.join(lines))]
1416 return [self.text]
1416 return [self.text]
1417
1417
1418 def _read(self, lr):
1418 def _read(self, lr):
1419 def getline(lr, hunk):
1419 def getline(lr, hunk):
1420 l = lr.readline()
1420 l = lr.readline()
1421 hunk.append(l)
1421 hunk.append(l)
1422 return l.rstrip('\r\n')
1422 return l.rstrip('\r\n')
1423
1423
1424 size = 0
1424 size = 0
1425 while True:
1425 while True:
1426 line = getline(lr, self.hunk)
1426 line = getline(lr, self.hunk)
1427 if not line:
1427 if not line:
1428 raise PatchError(_('could not extract "%s" binary data')
1428 raise PatchError(_('could not extract "%s" binary data')
1429 % self._fname)
1429 % self._fname)
1430 if line.startswith('literal '):
1430 if line.startswith('literal '):
1431 size = int(line[8:].rstrip())
1431 size = int(line[8:].rstrip())
1432 break
1432 break
1433 if line.startswith('delta '):
1433 if line.startswith('delta '):
1434 size = int(line[6:].rstrip())
1434 size = int(line[6:].rstrip())
1435 self.delta = True
1435 self.delta = True
1436 break
1436 break
1437 dec = []
1437 dec = []
1438 line = getline(lr, self.hunk)
1438 line = getline(lr, self.hunk)
1439 while len(line) > 1:
1439 while len(line) > 1:
1440 l = line[0]
1440 l = line[0]
1441 if l <= 'Z' and l >= 'A':
1441 if l <= 'Z' and l >= 'A':
1442 l = ord(l) - ord('A') + 1
1442 l = ord(l) - ord('A') + 1
1443 else:
1443 else:
1444 l = ord(l) - ord('a') + 27
1444 l = ord(l) - ord('a') + 27
1445 try:
1445 try:
1446 dec.append(util.b85decode(line[1:])[:l])
1446 dec.append(util.b85decode(line[1:])[:l])
1447 except ValueError as e:
1447 except ValueError as e:
1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1449 % (self._fname, str(e)))
1449 % (self._fname, str(e)))
1450 line = getline(lr, self.hunk)
1450 line = getline(lr, self.hunk)
1451 text = zlib.decompress(''.join(dec))
1451 text = zlib.decompress(''.join(dec))
1452 if len(text) != size:
1452 if len(text) != size:
1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1454 % (self._fname, len(text), size))
1454 % (self._fname, len(text), size))
1455 self.text = text
1455 self.text = text
1456
1456
1457 def parsefilename(str):
1457 def parsefilename(str):
1458 # --- filename \t|space stuff
1458 # --- filename \t|space stuff
1459 s = str[4:].rstrip('\r\n')
1459 s = str[4:].rstrip('\r\n')
1460 i = s.find('\t')
1460 i = s.find('\t')
1461 if i < 0:
1461 if i < 0:
1462 i = s.find(' ')
1462 i = s.find(' ')
1463 if i < 0:
1463 if i < 0:
1464 return s
1464 return s
1465 return s[:i]
1465 return s[:i]
1466
1466
1467 def reversehunks(hunks):
1467 def reversehunks(hunks):
1468 '''reverse the signs in the hunks given as argument
1468 '''reverse the signs in the hunks given as argument
1469
1469
1470 This function operates on hunks coming out of patch.filterpatch, that is
1470 This function operates on hunks coming out of patch.filterpatch, that is
1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1472
1472
1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1474 ... --- a/folder1/g
1474 ... --- a/folder1/g
1475 ... +++ b/folder1/g
1475 ... +++ b/folder1/g
1476 ... @@ -1,7 +1,7 @@
1476 ... @@ -1,7 +1,7 @@
1477 ... +firstline
1477 ... +firstline
1478 ... c
1478 ... c
1479 ... 1
1479 ... 1
1480 ... 2
1480 ... 2
1481 ... + 3
1481 ... + 3
1482 ... -4
1482 ... -4
1483 ... 5
1483 ... 5
1484 ... d
1484 ... d
1485 ... +lastline"""
1485 ... +lastline"""
1486 >>> hunks = parsepatch(rawpatch)
1486 >>> hunks = parsepatch(rawpatch)
1487 >>> hunkscomingfromfilterpatch = []
1487 >>> hunkscomingfromfilterpatch = []
1488 >>> for h in hunks:
1488 >>> for h in hunks:
1489 ... hunkscomingfromfilterpatch.append(h)
1489 ... hunkscomingfromfilterpatch.append(h)
1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1491
1491
1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1493 >>> from . import util
1493 >>> from . import util
1494 >>> fp = util.stringio()
1494 >>> fp = util.stringio()
1495 >>> for c in reversedhunks:
1495 >>> for c in reversedhunks:
1496 ... c.write(fp)
1496 ... c.write(fp)
1497 >>> fp.seek(0)
1497 >>> fp.seek(0)
1498 >>> reversedpatch = fp.read()
1498 >>> reversedpatch = fp.read()
1499 >>> print reversedpatch
1499 >>> print reversedpatch
1500 diff --git a/folder1/g b/folder1/g
1500 diff --git a/folder1/g b/folder1/g
1501 --- a/folder1/g
1501 --- a/folder1/g
1502 +++ b/folder1/g
1502 +++ b/folder1/g
1503 @@ -1,4 +1,3 @@
1503 @@ -1,4 +1,3 @@
1504 -firstline
1504 -firstline
1505 c
1505 c
1506 1
1506 1
1507 2
1507 2
1508 @@ -2,6 +1,6 @@
1508 @@ -2,6 +1,6 @@
1509 c
1509 c
1510 1
1510 1
1511 2
1511 2
1512 - 3
1512 - 3
1513 +4
1513 +4
1514 5
1514 5
1515 d
1515 d
1516 @@ -6,3 +5,2 @@
1516 @@ -6,3 +5,2 @@
1517 5
1517 5
1518 d
1518 d
1519 -lastline
1519 -lastline
1520
1520
1521 '''
1521 '''
1522
1522
1523 newhunks = []
1523 newhunks = []
1524 for c in hunks:
1524 for c in hunks:
1525 if util.safehasattr(c, 'reversehunk'):
1525 if util.safehasattr(c, 'reversehunk'):
1526 c = c.reversehunk()
1526 c = c.reversehunk()
1527 newhunks.append(c)
1527 newhunks.append(c)
1528 return newhunks
1528 return newhunks
1529
1529
1530 def parsepatch(originalchunks):
1530 def parsepatch(originalchunks):
1531 """patch -> [] of headers -> [] of hunks """
1531 """patch -> [] of headers -> [] of hunks """
1532 class parser(object):
1532 class parser(object):
1533 """patch parsing state machine"""
1533 """patch parsing state machine"""
1534 def __init__(self):
1534 def __init__(self):
1535 self.fromline = 0
1535 self.fromline = 0
1536 self.toline = 0
1536 self.toline = 0
1537 self.proc = ''
1537 self.proc = ''
1538 self.header = None
1538 self.header = None
1539 self.context = []
1539 self.context = []
1540 self.before = []
1540 self.before = []
1541 self.hunk = []
1541 self.hunk = []
1542 self.headers = []
1542 self.headers = []
1543
1543
1544 def addrange(self, limits):
1544 def addrange(self, limits):
1545 fromstart, fromend, tostart, toend, proc = limits
1545 fromstart, fromend, tostart, toend, proc = limits
1546 self.fromline = int(fromstart)
1546 self.fromline = int(fromstart)
1547 self.toline = int(tostart)
1547 self.toline = int(tostart)
1548 self.proc = proc
1548 self.proc = proc
1549
1549
1550 def addcontext(self, context):
1550 def addcontext(self, context):
1551 if self.hunk:
1551 if self.hunk:
1552 h = recordhunk(self.header, self.fromline, self.toline,
1552 h = recordhunk(self.header, self.fromline, self.toline,
1553 self.proc, self.before, self.hunk, context)
1553 self.proc, self.before, self.hunk, context)
1554 self.header.hunks.append(h)
1554 self.header.hunks.append(h)
1555 self.fromline += len(self.before) + h.removed
1555 self.fromline += len(self.before) + h.removed
1556 self.toline += len(self.before) + h.added
1556 self.toline += len(self.before) + h.added
1557 self.before = []
1557 self.before = []
1558 self.hunk = []
1558 self.hunk = []
1559 self.context = context
1559 self.context = context
1560
1560
1561 def addhunk(self, hunk):
1561 def addhunk(self, hunk):
1562 if self.context:
1562 if self.context:
1563 self.before = self.context
1563 self.before = self.context
1564 self.context = []
1564 self.context = []
1565 self.hunk = hunk
1565 self.hunk = hunk
1566
1566
1567 def newfile(self, hdr):
1567 def newfile(self, hdr):
1568 self.addcontext([])
1568 self.addcontext([])
1569 h = header(hdr)
1569 h = header(hdr)
1570 self.headers.append(h)
1570 self.headers.append(h)
1571 self.header = h
1571 self.header = h
1572
1572
1573 def addother(self, line):
1573 def addother(self, line):
1574 pass # 'other' lines are ignored
1574 pass # 'other' lines are ignored
1575
1575
1576 def finished(self):
1576 def finished(self):
1577 self.addcontext([])
1577 self.addcontext([])
1578 return self.headers
1578 return self.headers
1579
1579
1580 transitions = {
1580 transitions = {
1581 'file': {'context': addcontext,
1581 'file': {'context': addcontext,
1582 'file': newfile,
1582 'file': newfile,
1583 'hunk': addhunk,
1583 'hunk': addhunk,
1584 'range': addrange},
1584 'range': addrange},
1585 'context': {'file': newfile,
1585 'context': {'file': newfile,
1586 'hunk': addhunk,
1586 'hunk': addhunk,
1587 'range': addrange,
1587 'range': addrange,
1588 'other': addother},
1588 'other': addother},
1589 'hunk': {'context': addcontext,
1589 'hunk': {'context': addcontext,
1590 'file': newfile,
1590 'file': newfile,
1591 'range': addrange},
1591 'range': addrange},
1592 'range': {'context': addcontext,
1592 'range': {'context': addcontext,
1593 'hunk': addhunk},
1593 'hunk': addhunk},
1594 'other': {'other': addother},
1594 'other': {'other': addother},
1595 }
1595 }
1596
1596
1597 p = parser()
1597 p = parser()
1598 fp = stringio()
1598 fp = stringio()
1599 fp.write(''.join(originalchunks))
1599 fp.write(''.join(originalchunks))
1600 fp.seek(0)
1600 fp.seek(0)
1601
1601
1602 state = 'context'
1602 state = 'context'
1603 for newstate, data in scanpatch(fp):
1603 for newstate, data in scanpatch(fp):
1604 try:
1604 try:
1605 p.transitions[state][newstate](p, data)
1605 p.transitions[state][newstate](p, data)
1606 except KeyError:
1606 except KeyError:
1607 raise PatchError('unhandled transition: %s -> %s' %
1607 raise PatchError('unhandled transition: %s -> %s' %
1608 (state, newstate))
1608 (state, newstate))
1609 state = newstate
1609 state = newstate
1610 del fp
1610 del fp
1611 return p.finished()
1611 return p.finished()
1612
1612
1613 def pathtransform(path, strip, prefix):
1613 def pathtransform(path, strip, prefix):
1614 '''turn a path from a patch into a path suitable for the repository
1614 '''turn a path from a patch into a path suitable for the repository
1615
1615
1616 prefix, if not empty, is expected to be normalized with a / at the end.
1616 prefix, if not empty, is expected to be normalized with a / at the end.
1617
1617
1618 Returns (stripped components, path in repository).
1618 Returns (stripped components, path in repository).
1619
1619
1620 >>> pathtransform('a/b/c', 0, '')
1620 >>> pathtransform('a/b/c', 0, '')
1621 ('', 'a/b/c')
1621 ('', 'a/b/c')
1622 >>> pathtransform(' a/b/c ', 0, '')
1622 >>> pathtransform(' a/b/c ', 0, '')
1623 ('', ' a/b/c')
1623 ('', ' a/b/c')
1624 >>> pathtransform(' a/b/c ', 2, '')
1624 >>> pathtransform(' a/b/c ', 2, '')
1625 ('a/b/', 'c')
1625 ('a/b/', 'c')
1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1627 ('', 'd/e/a/b/c')
1627 ('', 'd/e/a/b/c')
1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1629 ('a//b/', 'd/e/c')
1629 ('a//b/', 'd/e/c')
1630 >>> pathtransform('a/b/c', 3, '')
1630 >>> pathtransform('a/b/c', 3, '')
1631 Traceback (most recent call last):
1631 Traceback (most recent call last):
1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1633 '''
1633 '''
1634 pathlen = len(path)
1634 pathlen = len(path)
1635 i = 0
1635 i = 0
1636 if strip == 0:
1636 if strip == 0:
1637 return '', prefix + path.rstrip()
1637 return '', prefix + path.rstrip()
1638 count = strip
1638 count = strip
1639 while count > 0:
1639 while count > 0:
1640 i = path.find('/', i)
1640 i = path.find('/', i)
1641 if i == -1:
1641 if i == -1:
1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1643 (count, strip, path))
1643 (count, strip, path))
1644 i += 1
1644 i += 1
1645 # consume '//' in the path
1645 # consume '//' in the path
1646 while i < pathlen - 1 and path[i] == '/':
1646 while i < pathlen - 1 and path[i] == '/':
1647 i += 1
1647 i += 1
1648 count -= 1
1648 count -= 1
1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1650
1650
1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1652 nulla = afile_orig == "/dev/null"
1652 nulla = afile_orig == "/dev/null"
1653 nullb = bfile_orig == "/dev/null"
1653 nullb = bfile_orig == "/dev/null"
1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1657 gooda = not nulla and backend.exists(afile)
1657 gooda = not nulla and backend.exists(afile)
1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1659 if afile == bfile:
1659 if afile == bfile:
1660 goodb = gooda
1660 goodb = gooda
1661 else:
1661 else:
1662 goodb = not nullb and backend.exists(bfile)
1662 goodb = not nullb and backend.exists(bfile)
1663 missing = not goodb and not gooda and not create
1663 missing = not goodb and not gooda and not create
1664
1664
1665 # some diff programs apparently produce patches where the afile is
1665 # some diff programs apparently produce patches where the afile is
1666 # not /dev/null, but afile starts with bfile
1666 # not /dev/null, but afile starts with bfile
1667 abasedir = afile[:afile.rfind('/') + 1]
1667 abasedir = afile[:afile.rfind('/') + 1]
1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1670 and hunk.starta == 0 and hunk.lena == 0):
1670 and hunk.starta == 0 and hunk.lena == 0):
1671 create = True
1671 create = True
1672 missing = False
1672 missing = False
1673
1673
1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1675 # diff is between a file and its backup. In this case, the original
1675 # diff is between a file and its backup. In this case, the original
1676 # file should be patched (see original mpatch code).
1676 # file should be patched (see original mpatch code).
1677 isbackup = (abase == bbase and bfile.startswith(afile))
1677 isbackup = (abase == bbase and bfile.startswith(afile))
1678 fname = None
1678 fname = None
1679 if not missing:
1679 if not missing:
1680 if gooda and goodb:
1680 if gooda and goodb:
1681 if isbackup:
1681 if isbackup:
1682 fname = afile
1682 fname = afile
1683 else:
1683 else:
1684 fname = bfile
1684 fname = bfile
1685 elif gooda:
1685 elif gooda:
1686 fname = afile
1686 fname = afile
1687
1687
1688 if not fname:
1688 if not fname:
1689 if not nullb:
1689 if not nullb:
1690 if isbackup:
1690 if isbackup:
1691 fname = afile
1691 fname = afile
1692 else:
1692 else:
1693 fname = bfile
1693 fname = bfile
1694 elif not nulla:
1694 elif not nulla:
1695 fname = afile
1695 fname = afile
1696 else:
1696 else:
1697 raise PatchError(_("undefined source and destination files"))
1697 raise PatchError(_("undefined source and destination files"))
1698
1698
1699 gp = patchmeta(fname)
1699 gp = patchmeta(fname)
1700 if create:
1700 if create:
1701 gp.op = 'ADD'
1701 gp.op = 'ADD'
1702 elif remove:
1702 elif remove:
1703 gp.op = 'DELETE'
1703 gp.op = 'DELETE'
1704 return gp
1704 return gp
1705
1705
1706 def scanpatch(fp):
1706 def scanpatch(fp):
1707 """like patch.iterhunks, but yield different events
1707 """like patch.iterhunks, but yield different events
1708
1708
1709 - ('file', [header_lines + fromfile + tofile])
1709 - ('file', [header_lines + fromfile + tofile])
1710 - ('context', [context_lines])
1710 - ('context', [context_lines])
1711 - ('hunk', [hunk_lines])
1711 - ('hunk', [hunk_lines])
1712 - ('range', (-start,len, +start,len, proc))
1712 - ('range', (-start,len, +start,len, proc))
1713 """
1713 """
1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1715 lr = linereader(fp)
1715 lr = linereader(fp)
1716
1716
1717 def scanwhile(first, p):
1717 def scanwhile(first, p):
1718 """scan lr while predicate holds"""
1718 """scan lr while predicate holds"""
1719 lines = [first]
1719 lines = [first]
1720 for line in iter(lr.readline, ''):
1720 for line in iter(lr.readline, ''):
1721 if p(line):
1721 if p(line):
1722 lines.append(line)
1722 lines.append(line)
1723 else:
1723 else:
1724 lr.push(line)
1724 lr.push(line)
1725 break
1725 break
1726 return lines
1726 return lines
1727
1727
1728 for line in iter(lr.readline, ''):
1728 for line in iter(lr.readline, ''):
1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1730 def notheader(line):
1730 def notheader(line):
1731 s = line.split(None, 1)
1731 s = line.split(None, 1)
1732 return not s or s[0] not in ('---', 'diff')
1732 return not s or s[0] not in ('---', 'diff')
1733 header = scanwhile(line, notheader)
1733 header = scanwhile(line, notheader)
1734 fromfile = lr.readline()
1734 fromfile = lr.readline()
1735 if fromfile.startswith('---'):
1735 if fromfile.startswith('---'):
1736 tofile = lr.readline()
1736 tofile = lr.readline()
1737 header += [fromfile, tofile]
1737 header += [fromfile, tofile]
1738 else:
1738 else:
1739 lr.push(fromfile)
1739 lr.push(fromfile)
1740 yield 'file', header
1740 yield 'file', header
1741 elif line[0] == ' ':
1741 elif line[0] == ' ':
1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1743 elif line[0] in '-+':
1743 elif line[0] in '-+':
1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1745 else:
1745 else:
1746 m = lines_re.match(line)
1746 m = lines_re.match(line)
1747 if m:
1747 if m:
1748 yield 'range', m.groups()
1748 yield 'range', m.groups()
1749 else:
1749 else:
1750 yield 'other', line
1750 yield 'other', line
1751
1751
1752 def scangitpatch(lr, firstline):
1752 def scangitpatch(lr, firstline):
1753 """
1753 """
1754 Git patches can emit:
1754 Git patches can emit:
1755 - rename a to b
1755 - rename a to b
1756 - change b
1756 - change b
1757 - copy a to c
1757 - copy a to c
1758 - change c
1758 - change c
1759
1759
1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1761 found for it would have been renamed already. And we cannot copy
1761 found for it would have been renamed already. And we cannot copy
1762 from 'b' instead because 'b' would have been changed already. So
1762 from 'b' instead because 'b' would have been changed already. So
1763 we scan the git patch for copy and rename commands so we can
1763 we scan the git patch for copy and rename commands so we can
1764 perform the copies ahead of time.
1764 perform the copies ahead of time.
1765 """
1765 """
1766 pos = 0
1766 pos = 0
1767 try:
1767 try:
1768 pos = lr.fp.tell()
1768 pos = lr.fp.tell()
1769 fp = lr.fp
1769 fp = lr.fp
1770 except IOError:
1770 except IOError:
1771 fp = stringio(lr.fp.read())
1771 fp = stringio(lr.fp.read())
1772 gitlr = linereader(fp)
1772 gitlr = linereader(fp)
1773 gitlr.push(firstline)
1773 gitlr.push(firstline)
1774 gitpatches = readgitpatch(gitlr)
1774 gitpatches = readgitpatch(gitlr)
1775 fp.seek(pos)
1775 fp.seek(pos)
1776 return gitpatches
1776 return gitpatches
1777
1777
1778 def iterhunks(fp):
1778 def iterhunks(fp):
1779 """Read a patch and yield the following events:
1779 """Read a patch and yield the following events:
1780 - ("file", afile, bfile, firsthunk): select a new target file.
1780 - ("file", afile, bfile, firsthunk): select a new target file.
1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1782 "file" event.
1782 "file" event.
1783 - ("git", gitchanges): current diff is in git format, gitchanges
1783 - ("git", gitchanges): current diff is in git format, gitchanges
1784 maps filenames to gitpatch records. Unique event.
1784 maps filenames to gitpatch records. Unique event.
1785 """
1785 """
1786 afile = ""
1786 afile = ""
1787 bfile = ""
1787 bfile = ""
1788 state = None
1788 state = None
1789 hunknum = 0
1789 hunknum = 0
1790 emitfile = newfile = False
1790 emitfile = newfile = False
1791 gitpatches = None
1791 gitpatches = None
1792
1792
1793 # our states
1793 # our states
1794 BFILE = 1
1794 BFILE = 1
1795 context = None
1795 context = None
1796 lr = linereader(fp)
1796 lr = linereader(fp)
1797
1797
1798 for x in iter(lr.readline, ''):
1798 for x in iter(lr.readline, ''):
1799 if state == BFILE and (
1799 if state == BFILE and (
1800 (not context and x[0] == '@')
1800 (not context and x[0] == '@')
1801 or (context is not False and x.startswith('***************'))
1801 or (context is not False and x.startswith('***************'))
1802 or x.startswith('GIT binary patch')):
1802 or x.startswith('GIT binary patch')):
1803 gp = None
1803 gp = None
1804 if (gitpatches and
1804 if (gitpatches and
1805 gitpatches[-1].ispatching(afile, bfile)):
1805 gitpatches[-1].ispatching(afile, bfile)):
1806 gp = gitpatches.pop()
1806 gp = gitpatches.pop()
1807 if x.startswith('GIT binary patch'):
1807 if x.startswith('GIT binary patch'):
1808 h = binhunk(lr, gp.path)
1808 h = binhunk(lr, gp.path)
1809 else:
1809 else:
1810 if context is None and x.startswith('***************'):
1810 if context is None and x.startswith('***************'):
1811 context = True
1811 context = True
1812 h = hunk(x, hunknum + 1, lr, context)
1812 h = hunk(x, hunknum + 1, lr, context)
1813 hunknum += 1
1813 hunknum += 1
1814 if emitfile:
1814 if emitfile:
1815 emitfile = False
1815 emitfile = False
1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1817 yield 'hunk', h
1817 yield 'hunk', h
1818 elif x.startswith('diff --git a/'):
1818 elif x.startswith('diff --git a/'):
1819 m = gitre.match(x.rstrip(' \r\n'))
1819 m = gitre.match(x.rstrip(' \r\n'))
1820 if not m:
1820 if not m:
1821 continue
1821 continue
1822 if gitpatches is None:
1822 if gitpatches is None:
1823 # scan whole input for git metadata
1823 # scan whole input for git metadata
1824 gitpatches = scangitpatch(lr, x)
1824 gitpatches = scangitpatch(lr, x)
1825 yield 'git', [g.copy() for g in gitpatches
1825 yield 'git', [g.copy() for g in gitpatches
1826 if g.op in ('COPY', 'RENAME')]
1826 if g.op in ('COPY', 'RENAME')]
1827 gitpatches.reverse()
1827 gitpatches.reverse()
1828 afile = 'a/' + m.group(1)
1828 afile = 'a/' + m.group(1)
1829 bfile = 'b/' + m.group(2)
1829 bfile = 'b/' + m.group(2)
1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1831 gp = gitpatches.pop()
1831 gp = gitpatches.pop()
1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1833 if not gitpatches:
1833 if not gitpatches:
1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1835 % afile[2:])
1835 % afile[2:])
1836 gp = gitpatches[-1]
1836 gp = gitpatches[-1]
1837 newfile = True
1837 newfile = True
1838 elif x.startswith('---'):
1838 elif x.startswith('---'):
1839 # check for a unified diff
1839 # check for a unified diff
1840 l2 = lr.readline()
1840 l2 = lr.readline()
1841 if not l2.startswith('+++'):
1841 if not l2.startswith('+++'):
1842 lr.push(l2)
1842 lr.push(l2)
1843 continue
1843 continue
1844 newfile = True
1844 newfile = True
1845 context = False
1845 context = False
1846 afile = parsefilename(x)
1846 afile = parsefilename(x)
1847 bfile = parsefilename(l2)
1847 bfile = parsefilename(l2)
1848 elif x.startswith('***'):
1848 elif x.startswith('***'):
1849 # check for a context diff
1849 # check for a context diff
1850 l2 = lr.readline()
1850 l2 = lr.readline()
1851 if not l2.startswith('---'):
1851 if not l2.startswith('---'):
1852 lr.push(l2)
1852 lr.push(l2)
1853 continue
1853 continue
1854 l3 = lr.readline()
1854 l3 = lr.readline()
1855 lr.push(l3)
1855 lr.push(l3)
1856 if not l3.startswith("***************"):
1856 if not l3.startswith("***************"):
1857 lr.push(l2)
1857 lr.push(l2)
1858 continue
1858 continue
1859 newfile = True
1859 newfile = True
1860 context = True
1860 context = True
1861 afile = parsefilename(x)
1861 afile = parsefilename(x)
1862 bfile = parsefilename(l2)
1862 bfile = parsefilename(l2)
1863
1863
1864 if newfile:
1864 if newfile:
1865 newfile = False
1865 newfile = False
1866 emitfile = True
1866 emitfile = True
1867 state = BFILE
1867 state = BFILE
1868 hunknum = 0
1868 hunknum = 0
1869
1869
1870 while gitpatches:
1870 while gitpatches:
1871 gp = gitpatches.pop()
1871 gp = gitpatches.pop()
1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1873
1873
1874 def applybindelta(binchunk, data):
1874 def applybindelta(binchunk, data):
1875 """Apply a binary delta hunk
1875 """Apply a binary delta hunk
1876 The algorithm used is the algorithm from git's patch-delta.c
1876 The algorithm used is the algorithm from git's patch-delta.c
1877 """
1877 """
1878 def deltahead(binchunk):
1878 def deltahead(binchunk):
1879 i = 0
1879 i = 0
1880 for c in binchunk:
1880 for c in binchunk:
1881 i += 1
1881 i += 1
1882 if not (ord(c) & 0x80):
1882 if not (ord(c) & 0x80):
1883 return i
1883 return i
1884 return i
1884 return i
1885 out = ""
1885 out = ""
1886 s = deltahead(binchunk)
1886 s = deltahead(binchunk)
1887 binchunk = binchunk[s:]
1887 binchunk = binchunk[s:]
1888 s = deltahead(binchunk)
1888 s = deltahead(binchunk)
1889 binchunk = binchunk[s:]
1889 binchunk = binchunk[s:]
1890 i = 0
1890 i = 0
1891 while i < len(binchunk):
1891 while i < len(binchunk):
1892 cmd = ord(binchunk[i])
1892 cmd = ord(binchunk[i])
1893 i += 1
1893 i += 1
1894 if (cmd & 0x80):
1894 if (cmd & 0x80):
1895 offset = 0
1895 offset = 0
1896 size = 0
1896 size = 0
1897 if (cmd & 0x01):
1897 if (cmd & 0x01):
1898 offset = ord(binchunk[i])
1898 offset = ord(binchunk[i])
1899 i += 1
1899 i += 1
1900 if (cmd & 0x02):
1900 if (cmd & 0x02):
1901 offset |= ord(binchunk[i]) << 8
1901 offset |= ord(binchunk[i]) << 8
1902 i += 1
1902 i += 1
1903 if (cmd & 0x04):
1903 if (cmd & 0x04):
1904 offset |= ord(binchunk[i]) << 16
1904 offset |= ord(binchunk[i]) << 16
1905 i += 1
1905 i += 1
1906 if (cmd & 0x08):
1906 if (cmd & 0x08):
1907 offset |= ord(binchunk[i]) << 24
1907 offset |= ord(binchunk[i]) << 24
1908 i += 1
1908 i += 1
1909 if (cmd & 0x10):
1909 if (cmd & 0x10):
1910 size = ord(binchunk[i])
1910 size = ord(binchunk[i])
1911 i += 1
1911 i += 1
1912 if (cmd & 0x20):
1912 if (cmd & 0x20):
1913 size |= ord(binchunk[i]) << 8
1913 size |= ord(binchunk[i]) << 8
1914 i += 1
1914 i += 1
1915 if (cmd & 0x40):
1915 if (cmd & 0x40):
1916 size |= ord(binchunk[i]) << 16
1916 size |= ord(binchunk[i]) << 16
1917 i += 1
1917 i += 1
1918 if size == 0:
1918 if size == 0:
1919 size = 0x10000
1919 size = 0x10000
1920 offset_end = offset + size
1920 offset_end = offset + size
1921 out += data[offset:offset_end]
1921 out += data[offset:offset_end]
1922 elif cmd != 0:
1922 elif cmd != 0:
1923 offset_end = i + cmd
1923 offset_end = i + cmd
1924 out += binchunk[i:offset_end]
1924 out += binchunk[i:offset_end]
1925 i += cmd
1925 i += cmd
1926 else:
1926 else:
1927 raise PatchError(_('unexpected delta opcode 0'))
1927 raise PatchError(_('unexpected delta opcode 0'))
1928 return out
1928 return out
1929
1929
1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1931 """Reads a patch from fp and tries to apply it.
1931 """Reads a patch from fp and tries to apply it.
1932
1932
1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1934 there was any fuzz.
1934 there was any fuzz.
1935
1935
1936 If 'eolmode' is 'strict', the patch content and patched file are
1936 If 'eolmode' is 'strict', the patch content and patched file are
1937 read in binary mode. Otherwise, line endings are ignored when
1937 read in binary mode. Otherwise, line endings are ignored when
1938 patching then normalized according to 'eolmode'.
1938 patching then normalized according to 'eolmode'.
1939 """
1939 """
1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1941 prefix=prefix, eolmode=eolmode)
1941 prefix=prefix, eolmode=eolmode)
1942
1942
1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1944 eolmode='strict'):
1944 eolmode='strict'):
1945
1945
1946 if prefix:
1946 if prefix:
1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1948 prefix)
1948 prefix)
1949 if prefix != '':
1949 if prefix != '':
1950 prefix += '/'
1950 prefix += '/'
1951 def pstrip(p):
1951 def pstrip(p):
1952 return pathtransform(p, strip - 1, prefix)[1]
1952 return pathtransform(p, strip - 1, prefix)[1]
1953
1953
1954 rejects = 0
1954 rejects = 0
1955 err = 0
1955 err = 0
1956 current_file = None
1956 current_file = None
1957
1957
1958 for state, values in iterhunks(fp):
1958 for state, values in iterhunks(fp):
1959 if state == 'hunk':
1959 if state == 'hunk':
1960 if not current_file:
1960 if not current_file:
1961 continue
1961 continue
1962 ret = current_file.apply(values)
1962 ret = current_file.apply(values)
1963 if ret > 0:
1963 if ret > 0:
1964 err = 1
1964 err = 1
1965 elif state == 'file':
1965 elif state == 'file':
1966 if current_file:
1966 if current_file:
1967 rejects += current_file.close()
1967 rejects += current_file.close()
1968 current_file = None
1968 current_file = None
1969 afile, bfile, first_hunk, gp = values
1969 afile, bfile, first_hunk, gp = values
1970 if gp:
1970 if gp:
1971 gp.path = pstrip(gp.path)
1971 gp.path = pstrip(gp.path)
1972 if gp.oldpath:
1972 if gp.oldpath:
1973 gp.oldpath = pstrip(gp.oldpath)
1973 gp.oldpath = pstrip(gp.oldpath)
1974 else:
1974 else:
1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1976 prefix)
1976 prefix)
1977 if gp.op == 'RENAME':
1977 if gp.op == 'RENAME':
1978 backend.unlink(gp.oldpath)
1978 backend.unlink(gp.oldpath)
1979 if not first_hunk:
1979 if not first_hunk:
1980 if gp.op == 'DELETE':
1980 if gp.op == 'DELETE':
1981 backend.unlink(gp.path)
1981 backend.unlink(gp.path)
1982 continue
1982 continue
1983 data, mode = None, None
1983 data, mode = None, None
1984 if gp.op in ('RENAME', 'COPY'):
1984 if gp.op in ('RENAME', 'COPY'):
1985 data, mode = store.getfile(gp.oldpath)[:2]
1985 data, mode = store.getfile(gp.oldpath)[:2]
1986 if data is None:
1986 if data is None:
1987 # This means that the old path does not exist
1987 # This means that the old path does not exist
1988 raise PatchError(_("source file '%s' does not exist")
1988 raise PatchError(_("source file '%s' does not exist")
1989 % gp.oldpath)
1989 % gp.oldpath)
1990 if gp.mode:
1990 if gp.mode:
1991 mode = gp.mode
1991 mode = gp.mode
1992 if gp.op == 'ADD':
1992 if gp.op == 'ADD':
1993 # Added files without content have no hunk and
1993 # Added files without content have no hunk and
1994 # must be created
1994 # must be created
1995 data = ''
1995 data = ''
1996 if data or mode:
1996 if data or mode:
1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
1998 and backend.exists(gp.path)):
1998 and backend.exists(gp.path)):
1999 raise PatchError(_("cannot create %s: destination "
1999 raise PatchError(_("cannot create %s: destination "
2000 "already exists") % gp.path)
2000 "already exists") % gp.path)
2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2002 continue
2002 continue
2003 try:
2003 try:
2004 current_file = patcher(ui, gp, backend, store,
2004 current_file = patcher(ui, gp, backend, store,
2005 eolmode=eolmode)
2005 eolmode=eolmode)
2006 except PatchError as inst:
2006 except PatchError as inst:
2007 ui.warn(str(inst) + '\n')
2007 ui.warn(str(inst) + '\n')
2008 current_file = None
2008 current_file = None
2009 rejects += 1
2009 rejects += 1
2010 continue
2010 continue
2011 elif state == 'git':
2011 elif state == 'git':
2012 for gp in values:
2012 for gp in values:
2013 path = pstrip(gp.oldpath)
2013 path = pstrip(gp.oldpath)
2014 data, mode = backend.getfile(path)
2014 data, mode = backend.getfile(path)
2015 if data is None:
2015 if data is None:
2016 # The error ignored here will trigger a getfile()
2016 # The error ignored here will trigger a getfile()
2017 # error in a place more appropriate for error
2017 # error in a place more appropriate for error
2018 # handling, and will not interrupt the patching
2018 # handling, and will not interrupt the patching
2019 # process.
2019 # process.
2020 pass
2020 pass
2021 else:
2021 else:
2022 store.setfile(path, data, mode)
2022 store.setfile(path, data, mode)
2023 else:
2023 else:
2024 raise error.Abort(_('unsupported parser state: %s') % state)
2024 raise error.Abort(_('unsupported parser state: %s') % state)
2025
2025
2026 if current_file:
2026 if current_file:
2027 rejects += current_file.close()
2027 rejects += current_file.close()
2028
2028
2029 if rejects:
2029 if rejects:
2030 return -1
2030 return -1
2031 return err
2031 return err
2032
2032
2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2034 similarity):
2034 similarity):
2035 """use <patcher> to apply <patchname> to the working directory.
2035 """use <patcher> to apply <patchname> to the working directory.
2036 returns whether patch was applied with fuzz factor."""
2036 returns whether patch was applied with fuzz factor."""
2037
2037
2038 fuzz = False
2038 fuzz = False
2039 args = []
2039 args = []
2040 cwd = repo.root
2040 cwd = repo.root
2041 if cwd:
2041 if cwd:
2042 args.append('-d %s' % util.shellquote(cwd))
2042 args.append('-d %s' % util.shellquote(cwd))
2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2044 util.shellquote(patchname)))
2044 util.shellquote(patchname)))
2045 try:
2045 try:
2046 for line in util.iterfile(fp):
2046 for line in util.iterfile(fp):
2047 line = line.rstrip()
2047 line = line.rstrip()
2048 ui.note(line + '\n')
2048 ui.note(line + '\n')
2049 if line.startswith('patching file '):
2049 if line.startswith('patching file '):
2050 pf = util.parsepatchoutput(line)
2050 pf = util.parsepatchoutput(line)
2051 printed_file = False
2051 printed_file = False
2052 files.add(pf)
2052 files.add(pf)
2053 elif line.find('with fuzz') >= 0:
2053 elif line.find('with fuzz') >= 0:
2054 fuzz = True
2054 fuzz = True
2055 if not printed_file:
2055 if not printed_file:
2056 ui.warn(pf + '\n')
2056 ui.warn(pf + '\n')
2057 printed_file = True
2057 printed_file = True
2058 ui.warn(line + '\n')
2058 ui.warn(line + '\n')
2059 elif line.find('saving rejects to file') >= 0:
2059 elif line.find('saving rejects to file') >= 0:
2060 ui.warn(line + '\n')
2060 ui.warn(line + '\n')
2061 elif line.find('FAILED') >= 0:
2061 elif line.find('FAILED') >= 0:
2062 if not printed_file:
2062 if not printed_file:
2063 ui.warn(pf + '\n')
2063 ui.warn(pf + '\n')
2064 printed_file = True
2064 printed_file = True
2065 ui.warn(line + '\n')
2065 ui.warn(line + '\n')
2066 finally:
2066 finally:
2067 if files:
2067 if files:
2068 scmutil.marktouched(repo, files, similarity)
2068 scmutil.marktouched(repo, files, similarity)
2069 code = fp.close()
2069 code = fp.close()
2070 if code:
2070 if code:
2071 raise PatchError(_("patch command failed: %s") %
2071 raise PatchError(_("patch command failed: %s") %
2072 util.explainexit(code)[0])
2072 util.explainexit(code)[0])
2073 return fuzz
2073 return fuzz
2074
2074
2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2076 eolmode='strict'):
2076 eolmode='strict'):
2077 if files is None:
2077 if files is None:
2078 files = set()
2078 files = set()
2079 if eolmode is None:
2079 if eolmode is None:
2080 eolmode = ui.config('patch', 'eol', 'strict')
2080 eolmode = ui.config('patch', 'eol')
2081 if eolmode.lower() not in eolmodes:
2081 if eolmode.lower() not in eolmodes:
2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2083 eolmode = eolmode.lower()
2083 eolmode = eolmode.lower()
2084
2084
2085 store = filestore()
2085 store = filestore()
2086 try:
2086 try:
2087 fp = open(patchobj, 'rb')
2087 fp = open(patchobj, 'rb')
2088 except TypeError:
2088 except TypeError:
2089 fp = patchobj
2089 fp = patchobj
2090 try:
2090 try:
2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2092 eolmode=eolmode)
2092 eolmode=eolmode)
2093 finally:
2093 finally:
2094 if fp != patchobj:
2094 if fp != patchobj:
2095 fp.close()
2095 fp.close()
2096 files.update(backend.close())
2096 files.update(backend.close())
2097 store.close()
2097 store.close()
2098 if ret < 0:
2098 if ret < 0:
2099 raise PatchError(_('patch failed to apply'))
2099 raise PatchError(_('patch failed to apply'))
2100 return ret > 0
2100 return ret > 0
2101
2101
2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2103 eolmode='strict', similarity=0):
2103 eolmode='strict', similarity=0):
2104 """use builtin patch to apply <patchobj> to the working directory.
2104 """use builtin patch to apply <patchobj> to the working directory.
2105 returns whether patch was applied with fuzz factor."""
2105 returns whether patch was applied with fuzz factor."""
2106 backend = workingbackend(ui, repo, similarity)
2106 backend = workingbackend(ui, repo, similarity)
2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2108
2108
2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2110 eolmode='strict'):
2110 eolmode='strict'):
2111 backend = repobackend(ui, repo, ctx, store)
2111 backend = repobackend(ui, repo, ctx, store)
2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2113
2113
2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2115 similarity=0):
2115 similarity=0):
2116 """Apply <patchname> to the working directory.
2116 """Apply <patchname> to the working directory.
2117
2117
2118 'eolmode' specifies how end of lines should be handled. It can be:
2118 'eolmode' specifies how end of lines should be handled. It can be:
2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2121 - 'lf': EOLs are ignored when patching and reset to LF
2121 - 'lf': EOLs are ignored when patching and reset to LF
2122 - None: get it from user settings, default to 'strict'
2122 - None: get it from user settings, default to 'strict'
2123 'eolmode' is ignored when using an external patcher program.
2123 'eolmode' is ignored when using an external patcher program.
2124
2124
2125 Returns whether patch was applied with fuzz factor.
2125 Returns whether patch was applied with fuzz factor.
2126 """
2126 """
2127 patcher = ui.config('ui', 'patch')
2127 patcher = ui.config('ui', 'patch')
2128 if files is None:
2128 if files is None:
2129 files = set()
2129 files = set()
2130 if patcher:
2130 if patcher:
2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2132 files, similarity)
2132 files, similarity)
2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2134 similarity)
2134 similarity)
2135
2135
2136 def changedfiles(ui, repo, patchpath, strip=1):
2136 def changedfiles(ui, repo, patchpath, strip=1):
2137 backend = fsbackend(ui, repo.root)
2137 backend = fsbackend(ui, repo.root)
2138 with open(patchpath, 'rb') as fp:
2138 with open(patchpath, 'rb') as fp:
2139 changed = set()
2139 changed = set()
2140 for state, values in iterhunks(fp):
2140 for state, values in iterhunks(fp):
2141 if state == 'file':
2141 if state == 'file':
2142 afile, bfile, first_hunk, gp = values
2142 afile, bfile, first_hunk, gp = values
2143 if gp:
2143 if gp:
2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2145 if gp.oldpath:
2145 if gp.oldpath:
2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2147 else:
2147 else:
2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2149 '')
2149 '')
2150 changed.add(gp.path)
2150 changed.add(gp.path)
2151 if gp.op == 'RENAME':
2151 if gp.op == 'RENAME':
2152 changed.add(gp.oldpath)
2152 changed.add(gp.oldpath)
2153 elif state not in ('hunk', 'git'):
2153 elif state not in ('hunk', 'git'):
2154 raise error.Abort(_('unsupported parser state: %s') % state)
2154 raise error.Abort(_('unsupported parser state: %s') % state)
2155 return changed
2155 return changed
2156
2156
2157 class GitDiffRequired(Exception):
2157 class GitDiffRequired(Exception):
2158 pass
2158 pass
2159
2159
2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2161 '''return diffopts with all features supported and parsed'''
2161 '''return diffopts with all features supported and parsed'''
2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2163 git=True, whitespace=True, formatchanging=True)
2163 git=True, whitespace=True, formatchanging=True)
2164
2164
2165 diffopts = diffallopts
2165 diffopts = diffallopts
2166
2166
2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2168 whitespace=False, formatchanging=False):
2168 whitespace=False, formatchanging=False):
2169 '''return diffopts with only opted-in features parsed
2169 '''return diffopts with only opted-in features parsed
2170
2170
2171 Features:
2171 Features:
2172 - git: git-style diffs
2172 - git: git-style diffs
2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2174 - formatchanging: options that will likely break or cause correctness issues
2174 - formatchanging: options that will likely break or cause correctness issues
2175 with most diff parsers
2175 with most diff parsers
2176 '''
2176 '''
2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2178 if opts:
2178 if opts:
2179 v = opts.get(key)
2179 v = opts.get(key)
2180 # diffopts flags are either None-default (which is passed
2180 # diffopts flags are either None-default (which is passed
2181 # through unchanged, so we can identify unset values), or
2181 # through unchanged, so we can identify unset values), or
2182 # some other falsey default (eg --unified, which defaults
2182 # some other falsey default (eg --unified, which defaults
2183 # to an empty string). We only want to override the config
2183 # to an empty string). We only want to override the config
2184 # entries from hgrc with command line values if they
2184 # entries from hgrc with command line values if they
2185 # appear to have been set, which is any truthy value,
2185 # appear to have been set, which is any truthy value,
2186 # True, or False.
2186 # True, or False.
2187 if v or isinstance(v, bool):
2187 if v or isinstance(v, bool):
2188 return v
2188 return v
2189 if forceplain is not None and ui.plain():
2189 if forceplain is not None and ui.plain():
2190 return forceplain
2190 return forceplain
2191 return getter(section, name or key, None, untrusted=untrusted)
2191 return getter(section, name or key, None, untrusted=untrusted)
2192
2192
2193 # core options, expected to be understood by every diff parser
2193 # core options, expected to be understood by every diff parser
2194 buildopts = {
2194 buildopts = {
2195 'nodates': get('nodates'),
2195 'nodates': get('nodates'),
2196 'showfunc': get('show_function', 'showfunc'),
2196 'showfunc': get('show_function', 'showfunc'),
2197 'context': get('unified', getter=ui.config),
2197 'context': get('unified', getter=ui.config),
2198 }
2198 }
2199
2199
2200 if git:
2200 if git:
2201 buildopts['git'] = get('git')
2201 buildopts['git'] = get('git')
2202
2202
2203 # since this is in the experimental section, we need to call
2203 # since this is in the experimental section, we need to call
2204 # ui.configbool directory
2204 # ui.configbool directory
2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2206 'extendedheader.similarity')
2206 'extendedheader.similarity')
2207
2207
2208 # need to inspect the ui object instead of using get() since we want to
2208 # need to inspect the ui object instead of using get() since we want to
2209 # test for an int
2209 # test for an int
2210 hconf = ui.config('experimental', 'extendedheader.index')
2210 hconf = ui.config('experimental', 'extendedheader.index')
2211 if hconf is not None:
2211 if hconf is not None:
2212 hlen = None
2212 hlen = None
2213 try:
2213 try:
2214 # the hash config could be an integer (for length of hash) or a
2214 # the hash config could be an integer (for length of hash) or a
2215 # word (e.g. short, full, none)
2215 # word (e.g. short, full, none)
2216 hlen = int(hconf)
2216 hlen = int(hconf)
2217 if hlen < 0 or hlen > 40:
2217 if hlen < 0 or hlen > 40:
2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2219 ui.warn(msg % hlen)
2219 ui.warn(msg % hlen)
2220 except ValueError:
2220 except ValueError:
2221 # default value
2221 # default value
2222 if hconf == 'short' or hconf == '':
2222 if hconf == 'short' or hconf == '':
2223 hlen = 12
2223 hlen = 12
2224 elif hconf == 'full':
2224 elif hconf == 'full':
2225 hlen = 40
2225 hlen = 40
2226 elif hconf != 'none':
2226 elif hconf != 'none':
2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2228 ui.warn(msg % hconf)
2228 ui.warn(msg % hconf)
2229 finally:
2229 finally:
2230 buildopts['index'] = hlen
2230 buildopts['index'] = hlen
2231
2231
2232 if whitespace:
2232 if whitespace:
2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2235 'ignorewsamount')
2235 'ignorewsamount')
2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2237 'ignoreblanklines')
2237 'ignoreblanklines')
2238 if formatchanging:
2238 if formatchanging:
2239 buildopts['text'] = opts and opts.get('text')
2239 buildopts['text'] = opts and opts.get('text')
2240 binary = None if opts is None else opts.get('binary')
2240 binary = None if opts is None else opts.get('binary')
2241 buildopts['nobinary'] = (not binary if binary is not None
2241 buildopts['nobinary'] = (not binary if binary is not None
2242 else get('nobinary', forceplain=False))
2242 else get('nobinary', forceplain=False))
2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2244
2244
2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2246
2246
2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2249 '''yields diff of changes to files between two nodes, or node and
2249 '''yields diff of changes to files between two nodes, or node and
2250 working directory.
2250 working directory.
2251
2251
2252 if node1 is None, use first dirstate parent instead.
2252 if node1 is None, use first dirstate parent instead.
2253 if node2 is None, compare node1 with working directory.
2253 if node2 is None, compare node1 with working directory.
2254
2254
2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2256 every time some change cannot be represented with the current
2256 every time some change cannot be represented with the current
2257 patch format. Return False to upgrade to git patch format, True to
2257 patch format. Return False to upgrade to git patch format, True to
2258 accept the loss or raise an exception to abort the diff. It is
2258 accept the loss or raise an exception to abort the diff. It is
2259 called with the name of current file being diffed as 'fn'. If set
2259 called with the name of current file being diffed as 'fn'. If set
2260 to None, patches will always be upgraded to git format when
2260 to None, patches will always be upgraded to git format when
2261 necessary.
2261 necessary.
2262
2262
2263 prefix is a filename prefix that is prepended to all filenames on
2263 prefix is a filename prefix that is prepended to all filenames on
2264 display (used for subrepos).
2264 display (used for subrepos).
2265
2265
2266 relroot, if not empty, must be normalized with a trailing /. Any match
2266 relroot, if not empty, must be normalized with a trailing /. Any match
2267 patterns that fall outside it will be ignored.
2267 patterns that fall outside it will be ignored.
2268
2268
2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2270 information.'''
2270 information.'''
2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2272 changes=changes, opts=opts,
2272 changes=changes, opts=opts,
2273 losedatafn=losedatafn, prefix=prefix,
2273 losedatafn=losedatafn, prefix=prefix,
2274 relroot=relroot, copy=copy):
2274 relroot=relroot, copy=copy):
2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2276 if header and (text or len(header) > 1):
2276 if header and (text or len(header) > 1):
2277 yield '\n'.join(header) + '\n'
2277 yield '\n'.join(header) + '\n'
2278 if text:
2278 if text:
2279 yield text
2279 yield text
2280
2280
2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2284 where `header` is a list of diff headers and `hunks` is an iterable of
2284 where `header` is a list of diff headers and `hunks` is an iterable of
2285 (`hunkrange`, `hunklines`) tuples.
2285 (`hunkrange`, `hunklines`) tuples.
2286
2286
2287 See diff() for the meaning of parameters.
2287 See diff() for the meaning of parameters.
2288 """
2288 """
2289
2289
2290 if opts is None:
2290 if opts is None:
2291 opts = mdiff.defaultopts
2291 opts = mdiff.defaultopts
2292
2292
2293 if not node1 and not node2:
2293 if not node1 and not node2:
2294 node1 = repo.dirstate.p1()
2294 node1 = repo.dirstate.p1()
2295
2295
2296 def lrugetfilectx():
2296 def lrugetfilectx():
2297 cache = {}
2297 cache = {}
2298 order = collections.deque()
2298 order = collections.deque()
2299 def getfilectx(f, ctx):
2299 def getfilectx(f, ctx):
2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2301 if f not in cache:
2301 if f not in cache:
2302 if len(cache) > 20:
2302 if len(cache) > 20:
2303 del cache[order.popleft()]
2303 del cache[order.popleft()]
2304 cache[f] = fctx.filelog()
2304 cache[f] = fctx.filelog()
2305 else:
2305 else:
2306 order.remove(f)
2306 order.remove(f)
2307 order.append(f)
2307 order.append(f)
2308 return fctx
2308 return fctx
2309 return getfilectx
2309 return getfilectx
2310 getfilectx = lrugetfilectx()
2310 getfilectx = lrugetfilectx()
2311
2311
2312 ctx1 = repo[node1]
2312 ctx1 = repo[node1]
2313 ctx2 = repo[node2]
2313 ctx2 = repo[node2]
2314
2314
2315 relfiltered = False
2315 relfiltered = False
2316 if relroot != '' and match.always():
2316 if relroot != '' and match.always():
2317 # as a special case, create a new matcher with just the relroot
2317 # as a special case, create a new matcher with just the relroot
2318 pats = [relroot]
2318 pats = [relroot]
2319 match = scmutil.match(ctx2, pats, default='path')
2319 match = scmutil.match(ctx2, pats, default='path')
2320 relfiltered = True
2320 relfiltered = True
2321
2321
2322 if not changes:
2322 if not changes:
2323 changes = repo.status(ctx1, ctx2, match=match)
2323 changes = repo.status(ctx1, ctx2, match=match)
2324 modified, added, removed = changes[:3]
2324 modified, added, removed = changes[:3]
2325
2325
2326 if not modified and not added and not removed:
2326 if not modified and not added and not removed:
2327 return []
2327 return []
2328
2328
2329 if repo.ui.debugflag:
2329 if repo.ui.debugflag:
2330 hexfunc = hex
2330 hexfunc = hex
2331 else:
2331 else:
2332 hexfunc = short
2332 hexfunc = short
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334
2334
2335 if copy is None:
2335 if copy is None:
2336 copy = {}
2336 copy = {}
2337 if opts.git or opts.upgrade:
2337 if opts.git or opts.upgrade:
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339
2339
2340 if relroot is not None:
2340 if relroot is not None:
2341 if not relfiltered:
2341 if not relfiltered:
2342 # XXX this would ideally be done in the matcher, but that is
2342 # XXX this would ideally be done in the matcher, but that is
2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2344 # need to 'and' all the patterns from the matcher with relroot.
2344 # need to 'and' all the patterns from the matcher with relroot.
2345 def filterrel(l):
2345 def filterrel(l):
2346 return [f for f in l if f.startswith(relroot)]
2346 return [f for f in l if f.startswith(relroot)]
2347 modified = filterrel(modified)
2347 modified = filterrel(modified)
2348 added = filterrel(added)
2348 added = filterrel(added)
2349 removed = filterrel(removed)
2349 removed = filterrel(removed)
2350 relfiltered = True
2350 relfiltered = True
2351 # filter out copies where either side isn't inside the relative root
2351 # filter out copies where either side isn't inside the relative root
2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2353 if dst.startswith(relroot)
2353 if dst.startswith(relroot)
2354 and src.startswith(relroot)))
2354 and src.startswith(relroot)))
2355
2355
2356 modifiedset = set(modified)
2356 modifiedset = set(modified)
2357 addedset = set(added)
2357 addedset = set(added)
2358 removedset = set(removed)
2358 removedset = set(removed)
2359 for f in modified:
2359 for f in modified:
2360 if f not in ctx1:
2360 if f not in ctx1:
2361 # Fix up added, since merged-in additions appear as
2361 # Fix up added, since merged-in additions appear as
2362 # modifications during merges
2362 # modifications during merges
2363 modifiedset.remove(f)
2363 modifiedset.remove(f)
2364 addedset.add(f)
2364 addedset.add(f)
2365 for f in removed:
2365 for f in removed:
2366 if f not in ctx1:
2366 if f not in ctx1:
2367 # Merged-in additions that are then removed are reported as removed.
2367 # Merged-in additions that are then removed are reported as removed.
2368 # They are not in ctx1, so We don't want to show them in the diff.
2368 # They are not in ctx1, so We don't want to show them in the diff.
2369 removedset.remove(f)
2369 removedset.remove(f)
2370 modified = sorted(modifiedset)
2370 modified = sorted(modifiedset)
2371 added = sorted(addedset)
2371 added = sorted(addedset)
2372 removed = sorted(removedset)
2372 removed = sorted(removedset)
2373 for dst, src in copy.items():
2373 for dst, src in copy.items():
2374 if src not in ctx1:
2374 if src not in ctx1:
2375 # Files merged in during a merge and then copied/renamed are
2375 # Files merged in during a merge and then copied/renamed are
2376 # reported as copies. We want to show them in the diff as additions.
2376 # reported as copies. We want to show them in the diff as additions.
2377 del copy[dst]
2377 del copy[dst]
2378
2378
2379 def difffn(opts, losedata):
2379 def difffn(opts, losedata):
2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2381 copy, getfilectx, opts, losedata, prefix, relroot)
2381 copy, getfilectx, opts, losedata, prefix, relroot)
2382 if opts.upgrade and not opts.git:
2382 if opts.upgrade and not opts.git:
2383 try:
2383 try:
2384 def losedata(fn):
2384 def losedata(fn):
2385 if not losedatafn or not losedatafn(fn=fn):
2385 if not losedatafn or not losedatafn(fn=fn):
2386 raise GitDiffRequired
2386 raise GitDiffRequired
2387 # Buffer the whole output until we are sure it can be generated
2387 # Buffer the whole output until we are sure it can be generated
2388 return list(difffn(opts.copy(git=False), losedata))
2388 return list(difffn(opts.copy(git=False), losedata))
2389 except GitDiffRequired:
2389 except GitDiffRequired:
2390 return difffn(opts.copy(git=True), None)
2390 return difffn(opts.copy(git=True), None)
2391 else:
2391 else:
2392 return difffn(opts, None)
2392 return difffn(opts, None)
2393
2393
2394 def difflabel(func, *args, **kw):
2394 def difflabel(func, *args, **kw):
2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2396 headprefixes = [('diff', 'diff.diffline'),
2396 headprefixes = [('diff', 'diff.diffline'),
2397 ('copy', 'diff.extended'),
2397 ('copy', 'diff.extended'),
2398 ('rename', 'diff.extended'),
2398 ('rename', 'diff.extended'),
2399 ('old', 'diff.extended'),
2399 ('old', 'diff.extended'),
2400 ('new', 'diff.extended'),
2400 ('new', 'diff.extended'),
2401 ('deleted', 'diff.extended'),
2401 ('deleted', 'diff.extended'),
2402 ('index', 'diff.extended'),
2402 ('index', 'diff.extended'),
2403 ('similarity', 'diff.extended'),
2403 ('similarity', 'diff.extended'),
2404 ('---', 'diff.file_a'),
2404 ('---', 'diff.file_a'),
2405 ('+++', 'diff.file_b')]
2405 ('+++', 'diff.file_b')]
2406 textprefixes = [('@', 'diff.hunk'),
2406 textprefixes = [('@', 'diff.hunk'),
2407 ('-', 'diff.deleted'),
2407 ('-', 'diff.deleted'),
2408 ('+', 'diff.inserted')]
2408 ('+', 'diff.inserted')]
2409 head = False
2409 head = False
2410 for chunk in func(*args, **kw):
2410 for chunk in func(*args, **kw):
2411 lines = chunk.split('\n')
2411 lines = chunk.split('\n')
2412 for i, line in enumerate(lines):
2412 for i, line in enumerate(lines):
2413 if i != 0:
2413 if i != 0:
2414 yield ('\n', '')
2414 yield ('\n', '')
2415 if head:
2415 if head:
2416 if line.startswith('@'):
2416 if line.startswith('@'):
2417 head = False
2417 head = False
2418 else:
2418 else:
2419 if line and line[0] not in ' +-@\\':
2419 if line and line[0] not in ' +-@\\':
2420 head = True
2420 head = True
2421 stripline = line
2421 stripline = line
2422 diffline = False
2422 diffline = False
2423 if not head and line and line[0] in '+-':
2423 if not head and line and line[0] in '+-':
2424 # highlight tabs and trailing whitespace, but only in
2424 # highlight tabs and trailing whitespace, but only in
2425 # changed lines
2425 # changed lines
2426 stripline = line.rstrip()
2426 stripline = line.rstrip()
2427 diffline = True
2427 diffline = True
2428
2428
2429 prefixes = textprefixes
2429 prefixes = textprefixes
2430 if head:
2430 if head:
2431 prefixes = headprefixes
2431 prefixes = headprefixes
2432 for prefix, label in prefixes:
2432 for prefix, label in prefixes:
2433 if stripline.startswith(prefix):
2433 if stripline.startswith(prefix):
2434 if diffline:
2434 if diffline:
2435 for token in tabsplitter.findall(stripline):
2435 for token in tabsplitter.findall(stripline):
2436 if '\t' == token[0]:
2436 if '\t' == token[0]:
2437 yield (token, 'diff.tab')
2437 yield (token, 'diff.tab')
2438 else:
2438 else:
2439 yield (token, label)
2439 yield (token, label)
2440 else:
2440 else:
2441 yield (stripline, label)
2441 yield (stripline, label)
2442 break
2442 break
2443 else:
2443 else:
2444 yield (line, '')
2444 yield (line, '')
2445 if line != stripline:
2445 if line != stripline:
2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2447
2447
2448 def diffui(*args, **kw):
2448 def diffui(*args, **kw):
2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2450 return difflabel(diff, *args, **kw)
2450 return difflabel(diff, *args, **kw)
2451
2451
2452 def _filepairs(modified, added, removed, copy, opts):
2452 def _filepairs(modified, added, removed, copy, opts):
2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2454 before and f2 is the the name after. For added files, f1 will be None,
2454 before and f2 is the the name after. For added files, f1 will be None,
2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2456 or 'rename' (the latter two only if opts.git is set).'''
2456 or 'rename' (the latter two only if opts.git is set).'''
2457 gone = set()
2457 gone = set()
2458
2458
2459 copyto = dict([(v, k) for k, v in copy.items()])
2459 copyto = dict([(v, k) for k, v in copy.items()])
2460
2460
2461 addedset, removedset = set(added), set(removed)
2461 addedset, removedset = set(added), set(removed)
2462
2462
2463 for f in sorted(modified + added + removed):
2463 for f in sorted(modified + added + removed):
2464 copyop = None
2464 copyop = None
2465 f1, f2 = f, f
2465 f1, f2 = f, f
2466 if f in addedset:
2466 if f in addedset:
2467 f1 = None
2467 f1 = None
2468 if f in copy:
2468 if f in copy:
2469 if opts.git:
2469 if opts.git:
2470 f1 = copy[f]
2470 f1 = copy[f]
2471 if f1 in removedset and f1 not in gone:
2471 if f1 in removedset and f1 not in gone:
2472 copyop = 'rename'
2472 copyop = 'rename'
2473 gone.add(f1)
2473 gone.add(f1)
2474 else:
2474 else:
2475 copyop = 'copy'
2475 copyop = 'copy'
2476 elif f in removedset:
2476 elif f in removedset:
2477 f2 = None
2477 f2 = None
2478 if opts.git:
2478 if opts.git:
2479 # have we already reported a copy above?
2479 # have we already reported a copy above?
2480 if (f in copyto and copyto[f] in addedset
2480 if (f in copyto and copyto[f] in addedset
2481 and copy[copyto[f]] == f):
2481 and copy[copyto[f]] == f):
2482 continue
2482 continue
2483 yield f1, f2, copyop
2483 yield f1, f2, copyop
2484
2484
2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2487 '''given input data, generate a diff and yield it in blocks
2487 '''given input data, generate a diff and yield it in blocks
2488
2488
2489 If generating a diff would lose data like flags or binary data and
2489 If generating a diff would lose data like flags or binary data and
2490 losedatafn is not None, it will be called.
2490 losedatafn is not None, it will be called.
2491
2491
2492 relroot is removed and prefix is added to every path in the diff output.
2492 relroot is removed and prefix is added to every path in the diff output.
2493
2493
2494 If relroot is not empty, this function expects every path in modified,
2494 If relroot is not empty, this function expects every path in modified,
2495 added, removed and copy to start with it.'''
2495 added, removed and copy to start with it.'''
2496
2496
2497 def gitindex(text):
2497 def gitindex(text):
2498 if not text:
2498 if not text:
2499 text = ""
2499 text = ""
2500 l = len(text)
2500 l = len(text)
2501 s = hashlib.sha1('blob %d\0' % l)
2501 s = hashlib.sha1('blob %d\0' % l)
2502 s.update(text)
2502 s.update(text)
2503 return s.hexdigest()
2503 return s.hexdigest()
2504
2504
2505 if opts.noprefix:
2505 if opts.noprefix:
2506 aprefix = bprefix = ''
2506 aprefix = bprefix = ''
2507 else:
2507 else:
2508 aprefix = 'a/'
2508 aprefix = 'a/'
2509 bprefix = 'b/'
2509 bprefix = 'b/'
2510
2510
2511 def diffline(f, revs):
2511 def diffline(f, revs):
2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2513 return 'diff %s %s' % (revinfo, f)
2513 return 'diff %s %s' % (revinfo, f)
2514
2514
2515 def isempty(fctx):
2515 def isempty(fctx):
2516 return fctx is None or fctx.size() == 0
2516 return fctx is None or fctx.size() == 0
2517
2517
2518 date1 = util.datestr(ctx1.date())
2518 date1 = util.datestr(ctx1.date())
2519 date2 = util.datestr(ctx2.date())
2519 date2 = util.datestr(ctx2.date())
2520
2520
2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2522
2522
2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2524 or repo.ui.configbool('devel', 'check-relroot')):
2524 or repo.ui.configbool('devel', 'check-relroot')):
2525 for f in modified + added + removed + copy.keys() + copy.values():
2525 for f in modified + added + removed + copy.keys() + copy.values():
2526 if f is not None and not f.startswith(relroot):
2526 if f is not None and not f.startswith(relroot):
2527 raise AssertionError(
2527 raise AssertionError(
2528 "file %s doesn't start with relroot %s" % (f, relroot))
2528 "file %s doesn't start with relroot %s" % (f, relroot))
2529
2529
2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2531 content1 = None
2531 content1 = None
2532 content2 = None
2532 content2 = None
2533 fctx1 = None
2533 fctx1 = None
2534 fctx2 = None
2534 fctx2 = None
2535 flag1 = None
2535 flag1 = None
2536 flag2 = None
2536 flag2 = None
2537 if f1:
2537 if f1:
2538 fctx1 = getfilectx(f1, ctx1)
2538 fctx1 = getfilectx(f1, ctx1)
2539 if opts.git or losedatafn:
2539 if opts.git or losedatafn:
2540 flag1 = ctx1.flags(f1)
2540 flag1 = ctx1.flags(f1)
2541 if f2:
2541 if f2:
2542 fctx2 = getfilectx(f2, ctx2)
2542 fctx2 = getfilectx(f2, ctx2)
2543 if opts.git or losedatafn:
2543 if opts.git or losedatafn:
2544 flag2 = ctx2.flags(f2)
2544 flag2 = ctx2.flags(f2)
2545 # if binary is True, output "summary" or "base85", but not "text diff"
2545 # if binary is True, output "summary" or "base85", but not "text diff"
2546 binary = not opts.text and any(f.isbinary()
2546 binary = not opts.text and any(f.isbinary()
2547 for f in [fctx1, fctx2] if f is not None)
2547 for f in [fctx1, fctx2] if f is not None)
2548
2548
2549 if losedatafn and not opts.git:
2549 if losedatafn and not opts.git:
2550 if (binary or
2550 if (binary or
2551 # copy/rename
2551 # copy/rename
2552 f2 in copy or
2552 f2 in copy or
2553 # empty file creation
2553 # empty file creation
2554 (not f1 and isempty(fctx2)) or
2554 (not f1 and isempty(fctx2)) or
2555 # empty file deletion
2555 # empty file deletion
2556 (isempty(fctx1) and not f2) or
2556 (isempty(fctx1) and not f2) or
2557 # create with flags
2557 # create with flags
2558 (not f1 and flag2) or
2558 (not f1 and flag2) or
2559 # change flags
2559 # change flags
2560 (f1 and f2 and flag1 != flag2)):
2560 (f1 and f2 and flag1 != flag2)):
2561 losedatafn(f2 or f1)
2561 losedatafn(f2 or f1)
2562
2562
2563 path1 = f1 or f2
2563 path1 = f1 or f2
2564 path2 = f2 or f1
2564 path2 = f2 or f1
2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2567 header = []
2567 header = []
2568 if opts.git:
2568 if opts.git:
2569 header.append('diff --git %s%s %s%s' %
2569 header.append('diff --git %s%s %s%s' %
2570 (aprefix, path1, bprefix, path2))
2570 (aprefix, path1, bprefix, path2))
2571 if not f1: # added
2571 if not f1: # added
2572 header.append('new file mode %s' % gitmode[flag2])
2572 header.append('new file mode %s' % gitmode[flag2])
2573 elif not f2: # removed
2573 elif not f2: # removed
2574 header.append('deleted file mode %s' % gitmode[flag1])
2574 header.append('deleted file mode %s' % gitmode[flag1])
2575 else: # modified/copied/renamed
2575 else: # modified/copied/renamed
2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2577 if mode1 != mode2:
2577 if mode1 != mode2:
2578 header.append('old mode %s' % mode1)
2578 header.append('old mode %s' % mode1)
2579 header.append('new mode %s' % mode2)
2579 header.append('new mode %s' % mode2)
2580 if copyop is not None:
2580 if copyop is not None:
2581 if opts.showsimilarity:
2581 if opts.showsimilarity:
2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2583 header.append('similarity index %d%%' % sim)
2583 header.append('similarity index %d%%' % sim)
2584 header.append('%s from %s' % (copyop, path1))
2584 header.append('%s from %s' % (copyop, path1))
2585 header.append('%s to %s' % (copyop, path2))
2585 header.append('%s to %s' % (copyop, path2))
2586 elif revs and not repo.ui.quiet:
2586 elif revs and not repo.ui.quiet:
2587 header.append(diffline(path1, revs))
2587 header.append(diffline(path1, revs))
2588
2588
2589 # fctx.is | diffopts | what to | is fctx.data()
2589 # fctx.is | diffopts | what to | is fctx.data()
2590 # binary() | text nobinary git index | output? | outputted?
2590 # binary() | text nobinary git index | output? | outputted?
2591 # ------------------------------------|----------------------------
2591 # ------------------------------------|----------------------------
2592 # yes | no no no * | summary | no
2592 # yes | no no no * | summary | no
2593 # yes | no no yes * | base85 | yes
2593 # yes | no no yes * | base85 | yes
2594 # yes | no yes no * | summary | no
2594 # yes | no yes no * | summary | no
2595 # yes | no yes yes 0 | summary | no
2595 # yes | no yes yes 0 | summary | no
2596 # yes | no yes yes >0 | summary | semi [1]
2596 # yes | no yes yes >0 | summary | semi [1]
2597 # yes | yes * * * | text diff | yes
2597 # yes | yes * * * | text diff | yes
2598 # no | * * * * | text diff | yes
2598 # no | * * * * | text diff | yes
2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2601 opts.index)):
2601 opts.index)):
2602 # fast path: no binary content will be displayed, content1 and
2602 # fast path: no binary content will be displayed, content1 and
2603 # content2 are only used for equivalent test. cmp() could have a
2603 # content2 are only used for equivalent test. cmp() could have a
2604 # fast path.
2604 # fast path.
2605 if fctx1 is not None:
2605 if fctx1 is not None:
2606 content1 = b'\0'
2606 content1 = b'\0'
2607 if fctx2 is not None:
2607 if fctx2 is not None:
2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2609 content2 = b'\0' # not different
2609 content2 = b'\0' # not different
2610 else:
2610 else:
2611 content2 = b'\0\0'
2611 content2 = b'\0\0'
2612 else:
2612 else:
2613 # normal path: load contents
2613 # normal path: load contents
2614 if fctx1 is not None:
2614 if fctx1 is not None:
2615 content1 = fctx1.data()
2615 content1 = fctx1.data()
2616 if fctx2 is not None:
2616 if fctx2 is not None:
2617 content2 = fctx2.data()
2617 content2 = fctx2.data()
2618
2618
2619 if binary and opts.git and not opts.nobinary:
2619 if binary and opts.git and not opts.nobinary:
2620 text = mdiff.b85diff(content1, content2)
2620 text = mdiff.b85diff(content1, content2)
2621 if text:
2621 if text:
2622 header.append('index %s..%s' %
2622 header.append('index %s..%s' %
2623 (gitindex(content1), gitindex(content2)))
2623 (gitindex(content1), gitindex(content2)))
2624 hunks = (None, [text]),
2624 hunks = (None, [text]),
2625 else:
2625 else:
2626 if opts.git and opts.index > 0:
2626 if opts.git and opts.index > 0:
2627 flag = flag1
2627 flag = flag1
2628 if flag is None:
2628 if flag is None:
2629 flag = flag2
2629 flag = flag2
2630 header.append('index %s..%s %s' %
2630 header.append('index %s..%s %s' %
2631 (gitindex(content1)[0:opts.index],
2631 (gitindex(content1)[0:opts.index],
2632 gitindex(content2)[0:opts.index],
2632 gitindex(content2)[0:opts.index],
2633 gitmode[flag]))
2633 gitmode[flag]))
2634
2634
2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2636 content2, date2,
2636 content2, date2,
2637 path1, path2, opts=opts)
2637 path1, path2, opts=opts)
2638 header.extend(uheaders)
2638 header.extend(uheaders)
2639 yield header, hunks
2639 yield header, hunks
2640
2640
2641 def diffstatsum(stats):
2641 def diffstatsum(stats):
2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2643 for f, a, r, b in stats:
2643 for f, a, r, b in stats:
2644 maxfile = max(maxfile, encoding.colwidth(f))
2644 maxfile = max(maxfile, encoding.colwidth(f))
2645 maxtotal = max(maxtotal, a + r)
2645 maxtotal = max(maxtotal, a + r)
2646 addtotal += a
2646 addtotal += a
2647 removetotal += r
2647 removetotal += r
2648 binary = binary or b
2648 binary = binary or b
2649
2649
2650 return maxfile, maxtotal, addtotal, removetotal, binary
2650 return maxfile, maxtotal, addtotal, removetotal, binary
2651
2651
2652 def diffstatdata(lines):
2652 def diffstatdata(lines):
2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2654
2654
2655 results = []
2655 results = []
2656 filename, adds, removes, isbinary = None, 0, 0, False
2656 filename, adds, removes, isbinary = None, 0, 0, False
2657
2657
2658 def addresult():
2658 def addresult():
2659 if filename:
2659 if filename:
2660 results.append((filename, adds, removes, isbinary))
2660 results.append((filename, adds, removes, isbinary))
2661
2661
2662 # inheader is used to track if a line is in the
2662 # inheader is used to track if a line is in the
2663 # header portion of the diff. This helps properly account
2663 # header portion of the diff. This helps properly account
2664 # for lines that start with '--' or '++'
2664 # for lines that start with '--' or '++'
2665 inheader = False
2665 inheader = False
2666
2666
2667 for line in lines:
2667 for line in lines:
2668 if line.startswith('diff'):
2668 if line.startswith('diff'):
2669 addresult()
2669 addresult()
2670 # starting a new file diff
2670 # starting a new file diff
2671 # set numbers to 0 and reset inheader
2671 # set numbers to 0 and reset inheader
2672 inheader = True
2672 inheader = True
2673 adds, removes, isbinary = 0, 0, False
2673 adds, removes, isbinary = 0, 0, False
2674 if line.startswith('diff --git a/'):
2674 if line.startswith('diff --git a/'):
2675 filename = gitre.search(line).group(2)
2675 filename = gitre.search(line).group(2)
2676 elif line.startswith('diff -r'):
2676 elif line.startswith('diff -r'):
2677 # format: "diff -r ... -r ... filename"
2677 # format: "diff -r ... -r ... filename"
2678 filename = diffre.search(line).group(1)
2678 filename = diffre.search(line).group(1)
2679 elif line.startswith('@@'):
2679 elif line.startswith('@@'):
2680 inheader = False
2680 inheader = False
2681 elif line.startswith('+') and not inheader:
2681 elif line.startswith('+') and not inheader:
2682 adds += 1
2682 adds += 1
2683 elif line.startswith('-') and not inheader:
2683 elif line.startswith('-') and not inheader:
2684 removes += 1
2684 removes += 1
2685 elif (line.startswith('GIT binary patch') or
2685 elif (line.startswith('GIT binary patch') or
2686 line.startswith('Binary file')):
2686 line.startswith('Binary file')):
2687 isbinary = True
2687 isbinary = True
2688 addresult()
2688 addresult()
2689 return results
2689 return results
2690
2690
2691 def diffstat(lines, width=80):
2691 def diffstat(lines, width=80):
2692 output = []
2692 output = []
2693 stats = diffstatdata(lines)
2693 stats = diffstatdata(lines)
2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2695
2695
2696 countwidth = len(str(maxtotal))
2696 countwidth = len(str(maxtotal))
2697 if hasbinary and countwidth < 3:
2697 if hasbinary and countwidth < 3:
2698 countwidth = 3
2698 countwidth = 3
2699 graphwidth = width - countwidth - maxname - 6
2699 graphwidth = width - countwidth - maxname - 6
2700 if graphwidth < 10:
2700 if graphwidth < 10:
2701 graphwidth = 10
2701 graphwidth = 10
2702
2702
2703 def scale(i):
2703 def scale(i):
2704 if maxtotal <= graphwidth:
2704 if maxtotal <= graphwidth:
2705 return i
2705 return i
2706 # If diffstat runs out of room it doesn't print anything,
2706 # If diffstat runs out of room it doesn't print anything,
2707 # which isn't very useful, so always print at least one + or -
2707 # which isn't very useful, so always print at least one + or -
2708 # if there were at least some changes.
2708 # if there were at least some changes.
2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2710
2710
2711 for filename, adds, removes, isbinary in stats:
2711 for filename, adds, removes, isbinary in stats:
2712 if isbinary:
2712 if isbinary:
2713 count = 'Bin'
2713 count = 'Bin'
2714 else:
2714 else:
2715 count = '%d' % (adds + removes)
2715 count = '%d' % (adds + removes)
2716 pluses = '+' * scale(adds)
2716 pluses = '+' * scale(adds)
2717 minuses = '-' * scale(removes)
2717 minuses = '-' * scale(removes)
2718 output.append(' %s%s | %*s %s%s\n' %
2718 output.append(' %s%s | %*s %s%s\n' %
2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2720 countwidth, count, pluses, minuses))
2720 countwidth, count, pluses, minuses))
2721
2721
2722 if stats:
2722 if stats:
2723 output.append(_(' %d files changed, %d insertions(+), '
2723 output.append(_(' %d files changed, %d insertions(+), '
2724 '%d deletions(-)\n')
2724 '%d deletions(-)\n')
2725 % (len(stats), totaladds, totalremoves))
2725 % (len(stats), totaladds, totalremoves))
2726
2726
2727 return ''.join(output)
2727 return ''.join(output)
2728
2728
2729 def diffstatui(*args, **kw):
2729 def diffstatui(*args, **kw):
2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2731 ui.write()
2731 ui.write()
2732 '''
2732 '''
2733
2733
2734 for line in diffstat(*args, **kw).splitlines():
2734 for line in diffstat(*args, **kw).splitlines():
2735 if line and line[-1] in '+-':
2735 if line and line[-1] in '+-':
2736 name, graph = line.rsplit(' ', 1)
2736 name, graph = line.rsplit(' ', 1)
2737 yield (name + ' ', '')
2737 yield (name + ' ', '')
2738 m = re.search(br'\++', graph)
2738 m = re.search(br'\++', graph)
2739 if m:
2739 if m:
2740 yield (m.group(0), 'diffstat.inserted')
2740 yield (m.group(0), 'diffstat.inserted')
2741 m = re.search(br'-+', graph)
2741 m = re.search(br'-+', graph)
2742 if m:
2742 if m:
2743 yield (m.group(0), 'diffstat.deleted')
2743 yield (m.group(0), 'diffstat.deleted')
2744 else:
2744 else:
2745 yield (line, '')
2745 yield (line, '')
2746 yield ('\n', '')
2746 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now