##// END OF EJS Templates
patch: refactor file creation/removal detection...
Patrick Mezard -
r14451:c78d41db default
parent child Browse files
Show More
@@ -1,691 +1,691 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a DSCM
10 # Keyword expansion hack against the grain of a DSCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56 The more specific you are in your filename patterns the less you
56 The more specific you are in your filename patterns the less you
57 lose speed in huge repositories.
57 lose speed in huge repositories.
58
58
59 For [keywordmaps] template mapping and expansion demonstration and
59 For [keywordmaps] template mapping and expansion demonstration and
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 available templates and filters.
61 available templates and filters.
62
62
63 Three additional date template filters are provided:
63 Three additional date template filters are provided:
64
64
65 :``utcdate``: "2006/09/18 15:13:13"
65 :``utcdate``: "2006/09/18 15:13:13"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68
68
69 The default template mappings (view with :hg:`kwdemo -d`) can be
69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 replaced with customized keywords and templates. Again, run
70 replaced with customized keywords and templates. Again, run
71 :hg:`kwdemo` to control the results of your configuration changes.
71 :hg:`kwdemo` to control the results of your configuration changes.
72
72
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 to avoid storing expanded keywords in the change history.
74 to avoid storing expanded keywords in the change history.
75
75
76 To force expansion after enabling it, or a configuration change, run
76 To force expansion after enabling it, or a configuration change, run
77 :hg:`kwexpand`.
77 :hg:`kwexpand`.
78
78
79 Expansions spanning more than one line and incremental expansions,
79 Expansions spanning more than one line and incremental expansions,
80 like CVS' $Log$, are not supported. A keyword template map "Log =
80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 {desc}" expands to the first line of the changeset description.
81 {desc}" expands to the first line of the changeset description.
82 '''
82 '''
83
83
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 from mercurial import scmutil
86 from mercurial import scmutil
87 from mercurial.hgweb import webcommands
87 from mercurial.hgweb import webcommands
88 from mercurial.i18n import _
88 from mercurial.i18n import _
89 import os, re, shutil, tempfile
89 import os, re, shutil, tempfile
90
90
91 commands.optionalrepo += ' kwdemo'
91 commands.optionalrepo += ' kwdemo'
92
92
93 cmdtable = {}
93 cmdtable = {}
94 command = cmdutil.command(cmdtable)
94 command = cmdutil.command(cmdtable)
95
95
96 # hg commands that do not act on keywords
96 # hg commands that do not act on keywords
97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 ' outgoing push tip verify convert email glog')
98 ' outgoing push tip verify convert email glog')
99
99
100 # hg commands that trigger expansion only when writing to working dir,
100 # hg commands that trigger expansion only when writing to working dir,
101 # not when reading filelog, and unexpand when reading from working dir
101 # not when reading filelog, and unexpand when reading from working dir
102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
102 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
103
103
104 # names of extensions using dorecord
104 # names of extensions using dorecord
105 recordextensions = 'record'
105 recordextensions = 'record'
106
106
107 colortable = {
107 colortable = {
108 'kwfiles.enabled': 'green bold',
108 'kwfiles.enabled': 'green bold',
109 'kwfiles.deleted': 'cyan bold underline',
109 'kwfiles.deleted': 'cyan bold underline',
110 'kwfiles.enabledunknown': 'green',
110 'kwfiles.enabledunknown': 'green',
111 'kwfiles.ignored': 'bold',
111 'kwfiles.ignored': 'bold',
112 'kwfiles.ignoredunknown': 'none'
112 'kwfiles.ignoredunknown': 'none'
113 }
113 }
114
114
115 # date like in cvs' $Date
115 # date like in cvs' $Date
116 def utcdate(text):
116 def utcdate(text):
117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
117 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
118 '''
118 '''
119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
119 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
120 # date like in svn's $Date
120 # date like in svn's $Date
121 def svnisodate(text):
121 def svnisodate(text):
122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
122 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
123 +0200 (Tue, 18 Aug 2009)".
123 +0200 (Tue, 18 Aug 2009)".
124 '''
124 '''
125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
125 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
126 # date like in svn's $Id
126 # date like in svn's $Id
127 def svnutcdate(text):
127 def svnutcdate(text):
128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
128 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
129 11:00:13Z".
129 11:00:13Z".
130 '''
130 '''
131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
131 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
132
132
133 templatefilters.filters.update({'utcdate': utcdate,
133 templatefilters.filters.update({'utcdate': utcdate,
134 'svnisodate': svnisodate,
134 'svnisodate': svnisodate,
135 'svnutcdate': svnutcdate})
135 'svnutcdate': svnutcdate})
136
136
137 # make keyword tools accessible
137 # make keyword tools accessible
138 kwtools = {'templater': None, 'hgcmd': ''}
138 kwtools = {'templater': None, 'hgcmd': ''}
139
139
140 def _defaultkwmaps(ui):
140 def _defaultkwmaps(ui):
141 '''Returns default keywordmaps according to keywordset configuration.'''
141 '''Returns default keywordmaps according to keywordset configuration.'''
142 templates = {
142 templates = {
143 'Revision': '{node|short}',
143 'Revision': '{node|short}',
144 'Author': '{author|user}',
144 'Author': '{author|user}',
145 }
145 }
146 kwsets = ({
146 kwsets = ({
147 'Date': '{date|utcdate}',
147 'Date': '{date|utcdate}',
148 'RCSfile': '{file|basename},v',
148 'RCSfile': '{file|basename},v',
149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
149 'RCSFile': '{file|basename},v', # kept for backwards compatibility
150 # with hg-keyword
150 # with hg-keyword
151 'Source': '{root}/{file},v',
151 'Source': '{root}/{file},v',
152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
152 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
153 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
154 }, {
154 }, {
155 'Date': '{date|svnisodate}',
155 'Date': '{date|svnisodate}',
156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
156 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
157 'LastChangedRevision': '{node|short}',
157 'LastChangedRevision': '{node|short}',
158 'LastChangedBy': '{author|user}',
158 'LastChangedBy': '{author|user}',
159 'LastChangedDate': '{date|svnisodate}',
159 'LastChangedDate': '{date|svnisodate}',
160 })
160 })
161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
161 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
162 return templates
162 return templates
163
163
164 def _shrinktext(text, subfunc):
164 def _shrinktext(text, subfunc):
165 '''Helper for keyword expansion removal in text.
165 '''Helper for keyword expansion removal in text.
166 Depending on subfunc also returns number of substitutions.'''
166 Depending on subfunc also returns number of substitutions.'''
167 return subfunc(r'$\1$', text)
167 return subfunc(r'$\1$', text)
168
168
169 def _preselect(wstatus, changed):
169 def _preselect(wstatus, changed):
170 '''Retrieves modfied and added files from a working directory state
170 '''Retrieves modfied and added files from a working directory state
171 and returns the subset of each contained in given changed files
171 and returns the subset of each contained in given changed files
172 retrieved from a change context.'''
172 retrieved from a change context.'''
173 modified, added = wstatus[:2]
173 modified, added = wstatus[:2]
174 modified = [f for f in modified if f in changed]
174 modified = [f for f in modified if f in changed]
175 added = [f for f in added if f in changed]
175 added = [f for f in added if f in changed]
176 return modified, added
176 return modified, added
177
177
178
178
179 class kwtemplater(object):
179 class kwtemplater(object):
180 '''
180 '''
181 Sets up keyword templates, corresponding keyword regex, and
181 Sets up keyword templates, corresponding keyword regex, and
182 provides keyword substitution functions.
182 provides keyword substitution functions.
183 '''
183 '''
184
184
185 def __init__(self, ui, repo, inc, exc):
185 def __init__(self, ui, repo, inc, exc):
186 self.ui = ui
186 self.ui = ui
187 self.repo = repo
187 self.repo = repo
188 self.match = match.match(repo.root, '', [], inc, exc)
188 self.match = match.match(repo.root, '', [], inc, exc)
189 self.restrict = kwtools['hgcmd'] in restricted.split()
189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 self.record = False
190 self.record = False
191
191
192 kwmaps = self.ui.configitems('keywordmaps')
192 kwmaps = self.ui.configitems('keywordmaps')
193 if kwmaps: # override default templates
193 if kwmaps: # override default templates
194 self.templates = dict((k, templater.parsestring(v, False))
194 self.templates = dict((k, templater.parsestring(v, False))
195 for k, v in kwmaps)
195 for k, v in kwmaps)
196 else:
196 else:
197 self.templates = _defaultkwmaps(self.ui)
197 self.templates = _defaultkwmaps(self.ui)
198
198
199 @util.propertycache
199 @util.propertycache
200 def escape(self):
200 def escape(self):
201 '''Returns bar-separated and escaped keywords.'''
201 '''Returns bar-separated and escaped keywords.'''
202 return '|'.join(map(re.escape, self.templates.keys()))
202 return '|'.join(map(re.escape, self.templates.keys()))
203
203
204 @util.propertycache
204 @util.propertycache
205 def rekw(self):
205 def rekw(self):
206 '''Returns regex for unexpanded keywords.'''
206 '''Returns regex for unexpanded keywords.'''
207 return re.compile(r'\$(%s)\$' % self.escape)
207 return re.compile(r'\$(%s)\$' % self.escape)
208
208
209 @util.propertycache
209 @util.propertycache
210 def rekwexp(self):
210 def rekwexp(self):
211 '''Returns regex for expanded keywords.'''
211 '''Returns regex for expanded keywords.'''
212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
212 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213
213
214 def substitute(self, data, path, ctx, subfunc):
214 def substitute(self, data, path, ctx, subfunc):
215 '''Replaces keywords in data with expanded template.'''
215 '''Replaces keywords in data with expanded template.'''
216 def kwsub(mobj):
216 def kwsub(mobj):
217 kw = mobj.group(1)
217 kw = mobj.group(1)
218 ct = cmdutil.changeset_templater(self.ui, self.repo,
218 ct = cmdutil.changeset_templater(self.ui, self.repo,
219 False, None, '', False)
219 False, None, '', False)
220 ct.use_template(self.templates[kw])
220 ct.use_template(self.templates[kw])
221 self.ui.pushbuffer()
221 self.ui.pushbuffer()
222 ct.show(ctx, root=self.repo.root, file=path)
222 ct.show(ctx, root=self.repo.root, file=path)
223 ekw = templatefilters.firstline(self.ui.popbuffer())
223 ekw = templatefilters.firstline(self.ui.popbuffer())
224 return '$%s: %s $' % (kw, ekw)
224 return '$%s: %s $' % (kw, ekw)
225 return subfunc(kwsub, data)
225 return subfunc(kwsub, data)
226
226
227 def linkctx(self, path, fileid):
227 def linkctx(self, path, fileid):
228 '''Similar to filelog.linkrev, but returns a changectx.'''
228 '''Similar to filelog.linkrev, but returns a changectx.'''
229 return self.repo.filectx(path, fileid=fileid).changectx()
229 return self.repo.filectx(path, fileid=fileid).changectx()
230
230
231 def expand(self, path, node, data):
231 def expand(self, path, node, data):
232 '''Returns data with keywords expanded.'''
232 '''Returns data with keywords expanded.'''
233 if not self.restrict and self.match(path) and not util.binary(data):
233 if not self.restrict and self.match(path) and not util.binary(data):
234 ctx = self.linkctx(path, node)
234 ctx = self.linkctx(path, node)
235 return self.substitute(data, path, ctx, self.rekw.sub)
235 return self.substitute(data, path, ctx, self.rekw.sub)
236 return data
236 return data
237
237
238 def iskwfile(self, cand, ctx):
238 def iskwfile(self, cand, ctx):
239 '''Returns subset of candidates which are configured for keyword
239 '''Returns subset of candidates which are configured for keyword
240 expansion are not symbolic links.'''
240 expansion are not symbolic links.'''
241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
241 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
242
242
243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
243 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
244 '''Overwrites selected files expanding/shrinking keywords.'''
244 '''Overwrites selected files expanding/shrinking keywords.'''
245 if self.restrict or lookup or self.record: # exclude kw_copy
245 if self.restrict or lookup or self.record: # exclude kw_copy
246 candidates = self.iskwfile(candidates, ctx)
246 candidates = self.iskwfile(candidates, ctx)
247 if not candidates:
247 if not candidates:
248 return
248 return
249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
249 kwcmd = self.restrict and lookup # kwexpand/kwshrink
250 if self.restrict or expand and lookup:
250 if self.restrict or expand and lookup:
251 mf = ctx.manifest()
251 mf = ctx.manifest()
252 lctx = ctx
252 lctx = ctx
253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
253 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
254 msg = (expand and _('overwriting %s expanding keywords\n')
254 msg = (expand and _('overwriting %s expanding keywords\n')
255 or _('overwriting %s shrinking keywords\n'))
255 or _('overwriting %s shrinking keywords\n'))
256 for f in candidates:
256 for f in candidates:
257 if self.restrict:
257 if self.restrict:
258 data = self.repo.file(f).read(mf[f])
258 data = self.repo.file(f).read(mf[f])
259 else:
259 else:
260 data = self.repo.wread(f)
260 data = self.repo.wread(f)
261 if util.binary(data):
261 if util.binary(data):
262 continue
262 continue
263 if expand:
263 if expand:
264 if lookup:
264 if lookup:
265 lctx = self.linkctx(f, mf[f])
265 lctx = self.linkctx(f, mf[f])
266 data, found = self.substitute(data, f, lctx, re_kw.subn)
266 data, found = self.substitute(data, f, lctx, re_kw.subn)
267 elif self.restrict:
267 elif self.restrict:
268 found = re_kw.search(data)
268 found = re_kw.search(data)
269 else:
269 else:
270 data, found = _shrinktext(data, re_kw.subn)
270 data, found = _shrinktext(data, re_kw.subn)
271 if found:
271 if found:
272 self.ui.note(msg % f)
272 self.ui.note(msg % f)
273 self.repo.wwrite(f, data, ctx.flags(f))
273 self.repo.wwrite(f, data, ctx.flags(f))
274 if kwcmd:
274 if kwcmd:
275 self.repo.dirstate.normal(f)
275 self.repo.dirstate.normal(f)
276 elif self.record:
276 elif self.record:
277 self.repo.dirstate.normallookup(f)
277 self.repo.dirstate.normallookup(f)
278
278
279 def shrink(self, fname, text):
279 def shrink(self, fname, text):
280 '''Returns text with all keyword substitutions removed.'''
280 '''Returns text with all keyword substitutions removed.'''
281 if self.match(fname) and not util.binary(text):
281 if self.match(fname) and not util.binary(text):
282 return _shrinktext(text, self.rekwexp.sub)
282 return _shrinktext(text, self.rekwexp.sub)
283 return text
283 return text
284
284
285 def shrinklines(self, fname, lines):
285 def shrinklines(self, fname, lines):
286 '''Returns lines with keyword substitutions removed.'''
286 '''Returns lines with keyword substitutions removed.'''
287 if self.match(fname):
287 if self.match(fname):
288 text = ''.join(lines)
288 text = ''.join(lines)
289 if not util.binary(text):
289 if not util.binary(text):
290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
290 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
291 return lines
291 return lines
292
292
293 def wread(self, fname, data):
293 def wread(self, fname, data):
294 '''If in restricted mode returns data read from wdir with
294 '''If in restricted mode returns data read from wdir with
295 keyword substitutions removed.'''
295 keyword substitutions removed.'''
296 return self.restrict and self.shrink(fname, data) or data
296 return self.restrict and self.shrink(fname, data) or data
297
297
298 class kwfilelog(filelog.filelog):
298 class kwfilelog(filelog.filelog):
299 '''
299 '''
300 Subclass of filelog to hook into its read, add, cmp methods.
300 Subclass of filelog to hook into its read, add, cmp methods.
301 Keywords are "stored" unexpanded, and processed on reading.
301 Keywords are "stored" unexpanded, and processed on reading.
302 '''
302 '''
303 def __init__(self, opener, kwt, path):
303 def __init__(self, opener, kwt, path):
304 super(kwfilelog, self).__init__(opener, path)
304 super(kwfilelog, self).__init__(opener, path)
305 self.kwt = kwt
305 self.kwt = kwt
306 self.path = path
306 self.path = path
307
307
308 def read(self, node):
308 def read(self, node):
309 '''Expands keywords when reading filelog.'''
309 '''Expands keywords when reading filelog.'''
310 data = super(kwfilelog, self).read(node)
310 data = super(kwfilelog, self).read(node)
311 if self.renamed(node):
311 if self.renamed(node):
312 return data
312 return data
313 return self.kwt.expand(self.path, node, data)
313 return self.kwt.expand(self.path, node, data)
314
314
315 def add(self, text, meta, tr, link, p1=None, p2=None):
315 def add(self, text, meta, tr, link, p1=None, p2=None):
316 '''Removes keyword substitutions when adding to filelog.'''
316 '''Removes keyword substitutions when adding to filelog.'''
317 text = self.kwt.shrink(self.path, text)
317 text = self.kwt.shrink(self.path, text)
318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
318 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
319
319
320 def cmp(self, node, text):
320 def cmp(self, node, text):
321 '''Removes keyword substitutions for comparison.'''
321 '''Removes keyword substitutions for comparison.'''
322 text = self.kwt.shrink(self.path, text)
322 text = self.kwt.shrink(self.path, text)
323 return super(kwfilelog, self).cmp(node, text)
323 return super(kwfilelog, self).cmp(node, text)
324
324
325 def _status(ui, repo, kwt, *pats, **opts):
325 def _status(ui, repo, kwt, *pats, **opts):
326 '''Bails out if [keyword] configuration is not active.
326 '''Bails out if [keyword] configuration is not active.
327 Returns status of working directory.'''
327 Returns status of working directory.'''
328 if kwt:
328 if kwt:
329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
329 return repo.status(match=scmutil.match(repo, pats, opts), clean=True,
330 unknown=opts.get('unknown') or opts.get('all'))
330 unknown=opts.get('unknown') or opts.get('all'))
331 if ui.configitems('keyword'):
331 if ui.configitems('keyword'):
332 raise util.Abort(_('[keyword] patterns cannot match'))
332 raise util.Abort(_('[keyword] patterns cannot match'))
333 raise util.Abort(_('no [keyword] patterns configured'))
333 raise util.Abort(_('no [keyword] patterns configured'))
334
334
335 def _kwfwrite(ui, repo, expand, *pats, **opts):
335 def _kwfwrite(ui, repo, expand, *pats, **opts):
336 '''Selects files and passes them to kwtemplater.overwrite.'''
336 '''Selects files and passes them to kwtemplater.overwrite.'''
337 wctx = repo[None]
337 wctx = repo[None]
338 if len(wctx.parents()) > 1:
338 if len(wctx.parents()) > 1:
339 raise util.Abort(_('outstanding uncommitted merge'))
339 raise util.Abort(_('outstanding uncommitted merge'))
340 kwt = kwtools['templater']
340 kwt = kwtools['templater']
341 wlock = repo.wlock()
341 wlock = repo.wlock()
342 try:
342 try:
343 status = _status(ui, repo, kwt, *pats, **opts)
343 status = _status(ui, repo, kwt, *pats, **opts)
344 modified, added, removed, deleted, unknown, ignored, clean = status
344 modified, added, removed, deleted, unknown, ignored, clean = status
345 if modified or added or removed or deleted:
345 if modified or added or removed or deleted:
346 raise util.Abort(_('outstanding uncommitted changes'))
346 raise util.Abort(_('outstanding uncommitted changes'))
347 kwt.overwrite(wctx, clean, True, expand)
347 kwt.overwrite(wctx, clean, True, expand)
348 finally:
348 finally:
349 wlock.release()
349 wlock.release()
350
350
351 @command('kwdemo',
351 @command('kwdemo',
352 [('d', 'default', None, _('show default keyword template maps')),
352 [('d', 'default', None, _('show default keyword template maps')),
353 ('f', 'rcfile', '',
353 ('f', 'rcfile', '',
354 _('read maps from rcfile'), _('FILE'))],
354 _('read maps from rcfile'), _('FILE'))],
355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
355 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
356 def demo(ui, repo, *args, **opts):
356 def demo(ui, repo, *args, **opts):
357 '''print [keywordmaps] configuration and an expansion example
357 '''print [keywordmaps] configuration and an expansion example
358
358
359 Show current, custom, or default keyword template maps and their
359 Show current, custom, or default keyword template maps and their
360 expansions.
360 expansions.
361
361
362 Extend the current configuration by specifying maps as arguments
362 Extend the current configuration by specifying maps as arguments
363 and using -f/--rcfile to source an external hgrc file.
363 and using -f/--rcfile to source an external hgrc file.
364
364
365 Use -d/--default to disable current configuration.
365 Use -d/--default to disable current configuration.
366
366
367 See :hg:`help templates` for information on templates and filters.
367 See :hg:`help templates` for information on templates and filters.
368 '''
368 '''
369 def demoitems(section, items):
369 def demoitems(section, items):
370 ui.write('[%s]\n' % section)
370 ui.write('[%s]\n' % section)
371 for k, v in sorted(items):
371 for k, v in sorted(items):
372 ui.write('%s = %s\n' % (k, v))
372 ui.write('%s = %s\n' % (k, v))
373
373
374 fn = 'demo.txt'
374 fn = 'demo.txt'
375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
375 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
376 ui.note(_('creating temporary repository at %s\n') % tmpdir)
377 repo = localrepo.localrepository(ui, tmpdir, True)
377 repo = localrepo.localrepository(ui, tmpdir, True)
378 ui.setconfig('keyword', fn, '')
378 ui.setconfig('keyword', fn, '')
379 svn = ui.configbool('keywordset', 'svn')
379 svn = ui.configbool('keywordset', 'svn')
380 # explicitly set keywordset for demo output
380 # explicitly set keywordset for demo output
381 ui.setconfig('keywordset', 'svn', svn)
381 ui.setconfig('keywordset', 'svn', svn)
382
382
383 uikwmaps = ui.configitems('keywordmaps')
383 uikwmaps = ui.configitems('keywordmaps')
384 if args or opts.get('rcfile'):
384 if args or opts.get('rcfile'):
385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
385 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
386 if uikwmaps:
386 if uikwmaps:
387 ui.status(_('\textending current template maps\n'))
387 ui.status(_('\textending current template maps\n'))
388 if opts.get('default') or not uikwmaps:
388 if opts.get('default') or not uikwmaps:
389 if svn:
389 if svn:
390 ui.status(_('\toverriding default svn keywordset\n'))
390 ui.status(_('\toverriding default svn keywordset\n'))
391 else:
391 else:
392 ui.status(_('\toverriding default cvs keywordset\n'))
392 ui.status(_('\toverriding default cvs keywordset\n'))
393 if opts.get('rcfile'):
393 if opts.get('rcfile'):
394 ui.readconfig(opts.get('rcfile'))
394 ui.readconfig(opts.get('rcfile'))
395 if args:
395 if args:
396 # simulate hgrc parsing
396 # simulate hgrc parsing
397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
397 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
398 fp = repo.opener('hgrc', 'w')
398 fp = repo.opener('hgrc', 'w')
399 fp.writelines(rcmaps)
399 fp.writelines(rcmaps)
400 fp.close()
400 fp.close()
401 ui.readconfig(repo.join('hgrc'))
401 ui.readconfig(repo.join('hgrc'))
402 kwmaps = dict(ui.configitems('keywordmaps'))
402 kwmaps = dict(ui.configitems('keywordmaps'))
403 elif opts.get('default'):
403 elif opts.get('default'):
404 if svn:
404 if svn:
405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
405 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
406 else:
406 else:
407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
407 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
408 kwmaps = _defaultkwmaps(ui)
408 kwmaps = _defaultkwmaps(ui)
409 if uikwmaps:
409 if uikwmaps:
410 ui.status(_('\tdisabling current template maps\n'))
410 ui.status(_('\tdisabling current template maps\n'))
411 for k, v in kwmaps.iteritems():
411 for k, v in kwmaps.iteritems():
412 ui.setconfig('keywordmaps', k, v)
412 ui.setconfig('keywordmaps', k, v)
413 else:
413 else:
414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
414 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
415 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
416
416
417 uisetup(ui)
417 uisetup(ui)
418 reposetup(ui, repo)
418 reposetup(ui, repo)
419 ui.write('[extensions]\nkeyword =\n')
419 ui.write('[extensions]\nkeyword =\n')
420 demoitems('keyword', ui.configitems('keyword'))
420 demoitems('keyword', ui.configitems('keyword'))
421 demoitems('keywordset', ui.configitems('keywordset'))
421 demoitems('keywordset', ui.configitems('keywordset'))
422 demoitems('keywordmaps', kwmaps.iteritems())
422 demoitems('keywordmaps', kwmaps.iteritems())
423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
423 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
424 repo.wopener.write(fn, keywords)
424 repo.wopener.write(fn, keywords)
425 repo[None].add([fn])
425 repo[None].add([fn])
426 ui.note(_('\nkeywords written to %s:\n') % fn)
426 ui.note(_('\nkeywords written to %s:\n') % fn)
427 ui.note(keywords)
427 ui.note(keywords)
428 repo.dirstate.setbranch('demobranch')
428 repo.dirstate.setbranch('demobranch')
429 for name, cmd in ui.configitems('hooks'):
429 for name, cmd in ui.configitems('hooks'):
430 if name.split('.', 1)[0].find('commit') > -1:
430 if name.split('.', 1)[0].find('commit') > -1:
431 repo.ui.setconfig('hooks', name, '')
431 repo.ui.setconfig('hooks', name, '')
432 msg = _('hg keyword configuration and expansion example')
432 msg = _('hg keyword configuration and expansion example')
433 ui.note("hg ci -m '%s'\n" % msg)
433 ui.note("hg ci -m '%s'\n" % msg)
434 repo.commit(text=msg)
434 repo.commit(text=msg)
435 ui.status(_('\n\tkeywords expanded\n'))
435 ui.status(_('\n\tkeywords expanded\n'))
436 ui.write(repo.wread(fn))
436 ui.write(repo.wread(fn))
437 shutil.rmtree(tmpdir, ignore_errors=True)
437 shutil.rmtree(tmpdir, ignore_errors=True)
438
438
439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
439 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
440 def expand(ui, repo, *pats, **opts):
440 def expand(ui, repo, *pats, **opts):
441 '''expand keywords in the working directory
441 '''expand keywords in the working directory
442
442
443 Run after (re)enabling keyword expansion.
443 Run after (re)enabling keyword expansion.
444
444
445 kwexpand refuses to run if given files contain local changes.
445 kwexpand refuses to run if given files contain local changes.
446 '''
446 '''
447 # 3rd argument sets expansion to True
447 # 3rd argument sets expansion to True
448 _kwfwrite(ui, repo, True, *pats, **opts)
448 _kwfwrite(ui, repo, True, *pats, **opts)
449
449
450 @command('kwfiles',
450 @command('kwfiles',
451 [('A', 'all', None, _('show keyword status flags of all files')),
451 [('A', 'all', None, _('show keyword status flags of all files')),
452 ('i', 'ignore', None, _('show files excluded from expansion')),
452 ('i', 'ignore', None, _('show files excluded from expansion')),
453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
453 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
454 ] + commands.walkopts,
454 ] + commands.walkopts,
455 _('hg kwfiles [OPTION]... [FILE]...'))
455 _('hg kwfiles [OPTION]... [FILE]...'))
456 def files(ui, repo, *pats, **opts):
456 def files(ui, repo, *pats, **opts):
457 '''show files configured for keyword expansion
457 '''show files configured for keyword expansion
458
458
459 List which files in the working directory are matched by the
459 List which files in the working directory are matched by the
460 [keyword] configuration patterns.
460 [keyword] configuration patterns.
461
461
462 Useful to prevent inadvertent keyword expansion and to speed up
462 Useful to prevent inadvertent keyword expansion and to speed up
463 execution by including only files that are actual candidates for
463 execution by including only files that are actual candidates for
464 expansion.
464 expansion.
465
465
466 See :hg:`help keyword` on how to construct patterns both for
466 See :hg:`help keyword` on how to construct patterns both for
467 inclusion and exclusion of files.
467 inclusion and exclusion of files.
468
468
469 With -A/--all and -v/--verbose the codes used to show the status
469 With -A/--all and -v/--verbose the codes used to show the status
470 of files are::
470 of files are::
471
471
472 K = keyword expansion candidate
472 K = keyword expansion candidate
473 k = keyword expansion candidate (not tracked)
473 k = keyword expansion candidate (not tracked)
474 I = ignored
474 I = ignored
475 i = ignored (not tracked)
475 i = ignored (not tracked)
476 '''
476 '''
477 kwt = kwtools['templater']
477 kwt = kwtools['templater']
478 status = _status(ui, repo, kwt, *pats, **opts)
478 status = _status(ui, repo, kwt, *pats, **opts)
479 cwd = pats and repo.getcwd() or ''
479 cwd = pats and repo.getcwd() or ''
480 modified, added, removed, deleted, unknown, ignored, clean = status
480 modified, added, removed, deleted, unknown, ignored, clean = status
481 files = []
481 files = []
482 if not opts.get('unknown') or opts.get('all'):
482 if not opts.get('unknown') or opts.get('all'):
483 files = sorted(modified + added + clean)
483 files = sorted(modified + added + clean)
484 wctx = repo[None]
484 wctx = repo[None]
485 kwfiles = kwt.iskwfile(files, wctx)
485 kwfiles = kwt.iskwfile(files, wctx)
486 kwdeleted = kwt.iskwfile(deleted, wctx)
486 kwdeleted = kwt.iskwfile(deleted, wctx)
487 kwunknown = kwt.iskwfile(unknown, wctx)
487 kwunknown = kwt.iskwfile(unknown, wctx)
488 if not opts.get('ignore') or opts.get('all'):
488 if not opts.get('ignore') or opts.get('all'):
489 showfiles = kwfiles, kwdeleted, kwunknown
489 showfiles = kwfiles, kwdeleted, kwunknown
490 else:
490 else:
491 showfiles = [], [], []
491 showfiles = [], [], []
492 if opts.get('all') or opts.get('ignore'):
492 if opts.get('all') or opts.get('ignore'):
493 showfiles += ([f for f in files if f not in kwfiles],
493 showfiles += ([f for f in files if f not in kwfiles],
494 [f for f in unknown if f not in kwunknown])
494 [f for f in unknown if f not in kwunknown])
495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
495 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
496 kwstates = zip('K!kIi', showfiles, kwlabels)
496 kwstates = zip('K!kIi', showfiles, kwlabels)
497 for char, filenames, kwstate in kwstates:
497 for char, filenames, kwstate in kwstates:
498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
498 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
499 for f in filenames:
499 for f in filenames:
500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
500 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
501
501
502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
502 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
503 def shrink(ui, repo, *pats, **opts):
503 def shrink(ui, repo, *pats, **opts):
504 '''revert expanded keywords in the working directory
504 '''revert expanded keywords in the working directory
505
505
506 Must be run before changing/disabling active keywords.
506 Must be run before changing/disabling active keywords.
507
507
508 kwshrink refuses to run if given files contain local changes.
508 kwshrink refuses to run if given files contain local changes.
509 '''
509 '''
510 # 3rd argument sets expansion to False
510 # 3rd argument sets expansion to False
511 _kwfwrite(ui, repo, False, *pats, **opts)
511 _kwfwrite(ui, repo, False, *pats, **opts)
512
512
513
513
514 def uisetup(ui):
514 def uisetup(ui):
515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
515 ''' Monkeypatches dispatch._parse to retrieve user command.'''
516
516
517 def kwdispatch_parse(orig, ui, args):
517 def kwdispatch_parse(orig, ui, args):
518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
518 '''Monkeypatch dispatch._parse to obtain running hg command.'''
519 cmd, func, args, options, cmdoptions = orig(ui, args)
519 cmd, func, args, options, cmdoptions = orig(ui, args)
520 kwtools['hgcmd'] = cmd
520 kwtools['hgcmd'] = cmd
521 return cmd, func, args, options, cmdoptions
521 return cmd, func, args, options, cmdoptions
522
522
523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
523 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
524
524
525 def reposetup(ui, repo):
525 def reposetup(ui, repo):
526 '''Sets up repo as kwrepo for keyword substitution.
526 '''Sets up repo as kwrepo for keyword substitution.
527 Overrides file method to return kwfilelog instead of filelog
527 Overrides file method to return kwfilelog instead of filelog
528 if file matches user configuration.
528 if file matches user configuration.
529 Wraps commit to overwrite configured files with updated
529 Wraps commit to overwrite configured files with updated
530 keyword substitutions.
530 keyword substitutions.
531 Monkeypatches patch and webcommands.'''
531 Monkeypatches patch and webcommands.'''
532
532
533 try:
533 try:
534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
534 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
535 or '.hg' in util.splitpath(repo.root)
535 or '.hg' in util.splitpath(repo.root)
536 or repo._url.startswith('bundle:')):
536 or repo._url.startswith('bundle:')):
537 return
537 return
538 except AttributeError:
538 except AttributeError:
539 pass
539 pass
540
540
541 inc, exc = [], ['.hg*']
541 inc, exc = [], ['.hg*']
542 for pat, opt in ui.configitems('keyword'):
542 for pat, opt in ui.configitems('keyword'):
543 if opt != 'ignore':
543 if opt != 'ignore':
544 inc.append(pat)
544 inc.append(pat)
545 else:
545 else:
546 exc.append(pat)
546 exc.append(pat)
547 if not inc:
547 if not inc:
548 return
548 return
549
549
550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
550 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
551
551
552 class kwrepo(repo.__class__):
552 class kwrepo(repo.__class__):
553 def file(self, f):
553 def file(self, f):
554 if f[0] == '/':
554 if f[0] == '/':
555 f = f[1:]
555 f = f[1:]
556 return kwfilelog(self.sopener, kwt, f)
556 return kwfilelog(self.sopener, kwt, f)
557
557
558 def wread(self, filename):
558 def wread(self, filename):
559 data = super(kwrepo, self).wread(filename)
559 data = super(kwrepo, self).wread(filename)
560 return kwt.wread(filename, data)
560 return kwt.wread(filename, data)
561
561
562 def commit(self, *args, **opts):
562 def commit(self, *args, **opts):
563 # use custom commitctx for user commands
563 # use custom commitctx for user commands
564 # other extensions can still wrap repo.commitctx directly
564 # other extensions can still wrap repo.commitctx directly
565 self.commitctx = self.kwcommitctx
565 self.commitctx = self.kwcommitctx
566 try:
566 try:
567 return super(kwrepo, self).commit(*args, **opts)
567 return super(kwrepo, self).commit(*args, **opts)
568 finally:
568 finally:
569 del self.commitctx
569 del self.commitctx
570
570
571 def kwcommitctx(self, ctx, error=False):
571 def kwcommitctx(self, ctx, error=False):
572 n = super(kwrepo, self).commitctx(ctx, error)
572 n = super(kwrepo, self).commitctx(ctx, error)
573 # no lock needed, only called from repo.commit() which already locks
573 # no lock needed, only called from repo.commit() which already locks
574 if not kwt.record:
574 if not kwt.record:
575 restrict = kwt.restrict
575 restrict = kwt.restrict
576 kwt.restrict = True
576 kwt.restrict = True
577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
577 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
578 False, True)
578 False, True)
579 kwt.restrict = restrict
579 kwt.restrict = restrict
580 return n
580 return n
581
581
582 def rollback(self, dryrun=False):
582 def rollback(self, dryrun=False):
583 wlock = self.wlock()
583 wlock = self.wlock()
584 try:
584 try:
585 if not dryrun:
585 if not dryrun:
586 changed = self['.'].files()
586 changed = self['.'].files()
587 ret = super(kwrepo, self).rollback(dryrun)
587 ret = super(kwrepo, self).rollback(dryrun)
588 if not dryrun:
588 if not dryrun:
589 ctx = self['.']
589 ctx = self['.']
590 modified, added = _preselect(self[None].status(), changed)
590 modified, added = _preselect(self[None].status(), changed)
591 kwt.overwrite(ctx, modified, True, True)
591 kwt.overwrite(ctx, modified, True, True)
592 kwt.overwrite(ctx, added, True, False)
592 kwt.overwrite(ctx, added, True, False)
593 return ret
593 return ret
594 finally:
594 finally:
595 wlock.release()
595 wlock.release()
596
596
597 # monkeypatches
597 # monkeypatches
598 def kwpatchfile_init(orig, self, ui, fname, backend, mode,
598 def kwpatchfile_init(orig, self, ui, fname, backend, mode, create, remove,
599 missing=False, eolmode=None):
599 missing=False, eolmode=None):
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 rejects or conflicts due to expanded keywords in working dir.'''
601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, fname, backend, mode, missing, eolmode)
602 orig(self, ui, fname, backend, mode, create, remove, missing, eolmode)
603 # shrink keywords read from working dir
603 # shrink keywords read from working dir
604 self.lines = kwt.shrinklines(self.fname, self.lines)
604 self.lines = kwt.shrinklines(self.fname, self.lines)
605
605
606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
606 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
607 opts=None, prefix=''):
607 opts=None, prefix=''):
608 '''Monkeypatch patch.diff to avoid expansion.'''
608 '''Monkeypatch patch.diff to avoid expansion.'''
609 kwt.restrict = True
609 kwt.restrict = True
610 return orig(repo, node1, node2, match, changes, opts, prefix)
610 return orig(repo, node1, node2, match, changes, opts, prefix)
611
611
612 def kwweb_skip(orig, web, req, tmpl):
612 def kwweb_skip(orig, web, req, tmpl):
613 '''Wraps webcommands.x turning off keyword expansion.'''
613 '''Wraps webcommands.x turning off keyword expansion.'''
614 kwt.match = util.never
614 kwt.match = util.never
615 return orig(web, req, tmpl)
615 return orig(web, req, tmpl)
616
616
617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
617 def kw_copy(orig, ui, repo, pats, opts, rename=False):
618 '''Wraps cmdutil.copy so that copy/rename destinations do not
618 '''Wraps cmdutil.copy so that copy/rename destinations do not
619 contain expanded keywords.
619 contain expanded keywords.
620 Note that the source of a regular file destination may also be a
620 Note that the source of a regular file destination may also be a
621 symlink:
621 symlink:
622 hg cp sym x -> x is symlink
622 hg cp sym x -> x is symlink
623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
623 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
624 For the latter we have to follow the symlink to find out whether its
624 For the latter we have to follow the symlink to find out whether its
625 target is configured for expansion and we therefore must unexpand the
625 target is configured for expansion and we therefore must unexpand the
626 keywords in the destination.'''
626 keywords in the destination.'''
627 orig(ui, repo, pats, opts, rename)
627 orig(ui, repo, pats, opts, rename)
628 if opts.get('dry_run'):
628 if opts.get('dry_run'):
629 return
629 return
630 wctx = repo[None]
630 wctx = repo[None]
631 cwd = repo.getcwd()
631 cwd = repo.getcwd()
632
632
633 def haskwsource(dest):
633 def haskwsource(dest):
634 '''Returns true if dest is a regular file and configured for
634 '''Returns true if dest is a regular file and configured for
635 expansion or a symlink which points to a file configured for
635 expansion or a symlink which points to a file configured for
636 expansion. '''
636 expansion. '''
637 source = repo.dirstate.copied(dest)
637 source = repo.dirstate.copied(dest)
638 if 'l' in wctx.flags(source):
638 if 'l' in wctx.flags(source):
639 source = scmutil.canonpath(repo.root, cwd,
639 source = scmutil.canonpath(repo.root, cwd,
640 os.path.realpath(source))
640 os.path.realpath(source))
641 return kwt.match(source)
641 return kwt.match(source)
642
642
643 candidates = [f for f in repo.dirstate.copies() if
643 candidates = [f for f in repo.dirstate.copies() if
644 not 'l' in wctx.flags(f) and haskwsource(f)]
644 not 'l' in wctx.flags(f) and haskwsource(f)]
645 kwt.overwrite(wctx, candidates, False, False)
645 kwt.overwrite(wctx, candidates, False, False)
646
646
647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
647 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
648 '''Wraps record.dorecord expanding keywords after recording.'''
648 '''Wraps record.dorecord expanding keywords after recording.'''
649 wlock = repo.wlock()
649 wlock = repo.wlock()
650 try:
650 try:
651 # record returns 0 even when nothing has changed
651 # record returns 0 even when nothing has changed
652 # therefore compare nodes before and after
652 # therefore compare nodes before and after
653 kwt.record = True
653 kwt.record = True
654 ctx = repo['.']
654 ctx = repo['.']
655 wstatus = repo[None].status()
655 wstatus = repo[None].status()
656 ret = orig(ui, repo, commitfunc, *pats, **opts)
656 ret = orig(ui, repo, commitfunc, *pats, **opts)
657 recctx = repo['.']
657 recctx = repo['.']
658 if ctx != recctx:
658 if ctx != recctx:
659 modified, added = _preselect(wstatus, recctx.files())
659 modified, added = _preselect(wstatus, recctx.files())
660 kwt.restrict = False
660 kwt.restrict = False
661 kwt.overwrite(recctx, modified, False, True)
661 kwt.overwrite(recctx, modified, False, True)
662 kwt.overwrite(recctx, added, False, True, True)
662 kwt.overwrite(recctx, added, False, True, True)
663 kwt.restrict = True
663 kwt.restrict = True
664 return ret
664 return ret
665 finally:
665 finally:
666 wlock.release()
666 wlock.release()
667
667
668 def kwfilectx_cmp(orig, self, fctx):
668 def kwfilectx_cmp(orig, self, fctx):
669 # keyword affects data size, comparing wdir and filelog size does
669 # keyword affects data size, comparing wdir and filelog size does
670 # not make sense
670 # not make sense
671 if (fctx._filerev is None and
671 if (fctx._filerev is None and
672 (self._repo._encodefilterpats or
672 (self._repo._encodefilterpats or
673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
673 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
674 self.size() == fctx.size()):
674 self.size() == fctx.size()):
675 return self._filelog.cmp(self._filenode, fctx.data())
675 return self._filelog.cmp(self._filenode, fctx.data())
676 return True
676 return True
677
677
678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
678 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
679 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
680 extensions.wrapfunction(patch, 'diff', kw_diff)
680 extensions.wrapfunction(patch, 'diff', kw_diff)
681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
681 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
682 for c in 'annotate changeset rev filediff diff'.split():
682 for c in 'annotate changeset rev filediff diff'.split():
683 extensions.wrapfunction(webcommands, c, kwweb_skip)
683 extensions.wrapfunction(webcommands, c, kwweb_skip)
684 for name in recordextensions.split():
684 for name in recordextensions.split():
685 try:
685 try:
686 record = extensions.find(name)
686 record = extensions.find(name)
687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
687 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
688 except KeyError:
688 except KeyError:
689 pass
689 pass
690
690
691 repo.__class__ = kwrepo
691 repo.__class__ = kwrepo
@@ -1,1767 +1,1751 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21
21
22 # public functions
22 # public functions
23
23
24 def split(stream):
24 def split(stream):
25 '''return an iterator of individual patches from a stream'''
25 '''return an iterator of individual patches from a stream'''
26 def isheader(line, inheader):
26 def isheader(line, inheader):
27 if inheader and line[0] in (' ', '\t'):
27 if inheader and line[0] in (' ', '\t'):
28 # continuation
28 # continuation
29 return True
29 return True
30 if line[0] in (' ', '-', '+'):
30 if line[0] in (' ', '-', '+'):
31 # diff line - don't check for header pattern in there
31 # diff line - don't check for header pattern in there
32 return False
32 return False
33 l = line.split(': ', 1)
33 l = line.split(': ', 1)
34 return len(l) == 2 and ' ' not in l[0]
34 return len(l) == 2 and ' ' not in l[0]
35
35
36 def chunk(lines):
36 def chunk(lines):
37 return cStringIO.StringIO(''.join(lines))
37 return cStringIO.StringIO(''.join(lines))
38
38
39 def hgsplit(stream, cur):
39 def hgsplit(stream, cur):
40 inheader = True
40 inheader = True
41
41
42 for line in stream:
42 for line in stream:
43 if not line.strip():
43 if not line.strip():
44 inheader = False
44 inheader = False
45 if not inheader and line.startswith('# HG changeset patch'):
45 if not inheader and line.startswith('# HG changeset patch'):
46 yield chunk(cur)
46 yield chunk(cur)
47 cur = []
47 cur = []
48 inheader = True
48 inheader = True
49
49
50 cur.append(line)
50 cur.append(line)
51
51
52 if cur:
52 if cur:
53 yield chunk(cur)
53 yield chunk(cur)
54
54
55 def mboxsplit(stream, cur):
55 def mboxsplit(stream, cur):
56 for line in stream:
56 for line in stream:
57 if line.startswith('From '):
57 if line.startswith('From '):
58 for c in split(chunk(cur[1:])):
58 for c in split(chunk(cur[1:])):
59 yield c
59 yield c
60 cur = []
60 cur = []
61
61
62 cur.append(line)
62 cur.append(line)
63
63
64 if cur:
64 if cur:
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67
67
68 def mimesplit(stream, cur):
68 def mimesplit(stream, cur):
69 def msgfp(m):
69 def msgfp(m):
70 fp = cStringIO.StringIO()
70 fp = cStringIO.StringIO()
71 g = email.Generator.Generator(fp, mangle_from_=False)
71 g = email.Generator.Generator(fp, mangle_from_=False)
72 g.flatten(m)
72 g.flatten(m)
73 fp.seek(0)
73 fp.seek(0)
74 return fp
74 return fp
75
75
76 for line in stream:
76 for line in stream:
77 cur.append(line)
77 cur.append(line)
78 c = chunk(cur)
78 c = chunk(cur)
79
79
80 m = email.Parser.Parser().parse(c)
80 m = email.Parser.Parser().parse(c)
81 if not m.is_multipart():
81 if not m.is_multipart():
82 yield msgfp(m)
82 yield msgfp(m)
83 else:
83 else:
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 for part in m.walk():
85 for part in m.walk():
86 ct = part.get_content_type()
86 ct = part.get_content_type()
87 if ct not in ok_types:
87 if ct not in ok_types:
88 continue
88 continue
89 yield msgfp(part)
89 yield msgfp(part)
90
90
91 def headersplit(stream, cur):
91 def headersplit(stream, cur):
92 inheader = False
92 inheader = False
93
93
94 for line in stream:
94 for line in stream:
95 if not inheader and isheader(line, inheader):
95 if not inheader and isheader(line, inheader):
96 yield chunk(cur)
96 yield chunk(cur)
97 cur = []
97 cur = []
98 inheader = True
98 inheader = True
99 if inheader and not isheader(line, inheader):
99 if inheader and not isheader(line, inheader):
100 inheader = False
100 inheader = False
101
101
102 cur.append(line)
102 cur.append(line)
103
103
104 if cur:
104 if cur:
105 yield chunk(cur)
105 yield chunk(cur)
106
106
107 def remainder(cur):
107 def remainder(cur):
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 class fiter(object):
110 class fiter(object):
111 def __init__(self, fp):
111 def __init__(self, fp):
112 self.fp = fp
112 self.fp = fp
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return self
115 return self
116
116
117 def next(self):
117 def next(self):
118 l = self.fp.readline()
118 l = self.fp.readline()
119 if not l:
119 if not l:
120 raise StopIteration
120 raise StopIteration
121 return l
121 return l
122
122
123 inheader = False
123 inheader = False
124 cur = []
124 cur = []
125
125
126 mimeheaders = ['content-type']
126 mimeheaders = ['content-type']
127
127
128 if not hasattr(stream, 'next'):
128 if not hasattr(stream, 'next'):
129 # http responses, for example, have readline but not next
129 # http responses, for example, have readline but not next
130 stream = fiter(stream)
130 stream = fiter(stream)
131
131
132 for line in stream:
132 for line in stream:
133 cur.append(line)
133 cur.append(line)
134 if line.startswith('# HG changeset patch'):
134 if line.startswith('# HG changeset patch'):
135 return hgsplit(stream, cur)
135 return hgsplit(stream, cur)
136 elif line.startswith('From '):
136 elif line.startswith('From '):
137 return mboxsplit(stream, cur)
137 return mboxsplit(stream, cur)
138 elif isheader(line, inheader):
138 elif isheader(line, inheader):
139 inheader = True
139 inheader = True
140 if line.split(':', 1)[0].lower() in mimeheaders:
140 if line.split(':', 1)[0].lower() in mimeheaders:
141 # let email parser handle this
141 # let email parser handle this
142 return mimesplit(stream, cur)
142 return mimesplit(stream, cur)
143 elif line.startswith('--- ') and inheader:
143 elif line.startswith('--- ') and inheader:
144 # No evil headers seen by diff start, split by hand
144 # No evil headers seen by diff start, split by hand
145 return headersplit(stream, cur)
145 return headersplit(stream, cur)
146 # Not enough info, keep reading
146 # Not enough info, keep reading
147
147
148 # if we are here, we have a very plain patch
148 # if we are here, we have a very plain patch
149 return remainder(cur)
149 return remainder(cur)
150
150
151 def extract(ui, fileobj):
151 def extract(ui, fileobj):
152 '''extract patch from data read from fileobj.
152 '''extract patch from data read from fileobj.
153
153
154 patch can be a normal patch or contained in an email message.
154 patch can be a normal patch or contained in an email message.
155
155
156 return tuple (filename, message, user, date, branch, node, p1, p2).
156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 Any item in the returned tuple can be None. If filename is None,
157 Any item in the returned tuple can be None. If filename is None,
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159
159
160 # attempt to detect the start of a patch
160 # attempt to detect the start of a patch
161 # (this heuristic is borrowed from quilt)
161 # (this heuristic is borrowed from quilt)
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166
166
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 tmpfp = os.fdopen(fd, 'w')
168 tmpfp = os.fdopen(fd, 'w')
169 try:
169 try:
170 msg = email.Parser.Parser().parse(fileobj)
170 msg = email.Parser.Parser().parse(fileobj)
171
171
172 subject = msg['Subject']
172 subject = msg['Subject']
173 user = msg['From']
173 user = msg['From']
174 if not subject and not user:
174 if not subject and not user:
175 # Not an email, restore parsed headers if any
175 # Not an email, restore parsed headers if any
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177
177
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 # should try to parse msg['Date']
179 # should try to parse msg['Date']
180 date = None
180 date = None
181 nodeid = None
181 nodeid = None
182 branch = None
182 branch = None
183 parents = []
183 parents = []
184
184
185 if subject:
185 if subject:
186 if subject.startswith('[PATCH'):
186 if subject.startswith('[PATCH'):
187 pend = subject.find(']')
187 pend = subject.find(']')
188 if pend >= 0:
188 if pend >= 0:
189 subject = subject[pend + 1:].lstrip()
189 subject = subject[pend + 1:].lstrip()
190 subject = subject.replace('\n\t', ' ')
190 subject = subject.replace('\n\t', ' ')
191 ui.debug('Subject: %s\n' % subject)
191 ui.debug('Subject: %s\n' % subject)
192 if user:
192 if user:
193 ui.debug('From: %s\n' % user)
193 ui.debug('From: %s\n' % user)
194 diffs_seen = 0
194 diffs_seen = 0
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 message = ''
196 message = ''
197 for part in msg.walk():
197 for part in msg.walk():
198 content_type = part.get_content_type()
198 content_type = part.get_content_type()
199 ui.debug('Content-Type: %s\n' % content_type)
199 ui.debug('Content-Type: %s\n' % content_type)
200 if content_type not in ok_types:
200 if content_type not in ok_types:
201 continue
201 continue
202 payload = part.get_payload(decode=True)
202 payload = part.get_payload(decode=True)
203 m = diffre.search(payload)
203 m = diffre.search(payload)
204 if m:
204 if m:
205 hgpatch = False
205 hgpatch = False
206 hgpatchheader = False
206 hgpatchheader = False
207 ignoretext = False
207 ignoretext = False
208
208
209 ui.debug('found patch at byte %d\n' % m.start(0))
209 ui.debug('found patch at byte %d\n' % m.start(0))
210 diffs_seen += 1
210 diffs_seen += 1
211 cfp = cStringIO.StringIO()
211 cfp = cStringIO.StringIO()
212 for line in payload[:m.start(0)].splitlines():
212 for line in payload[:m.start(0)].splitlines():
213 if line.startswith('# HG changeset patch') and not hgpatch:
213 if line.startswith('# HG changeset patch') and not hgpatch:
214 ui.debug('patch generated by hg export\n')
214 ui.debug('patch generated by hg export\n')
215 hgpatch = True
215 hgpatch = True
216 hgpatchheader = True
216 hgpatchheader = True
217 # drop earlier commit message content
217 # drop earlier commit message content
218 cfp.seek(0)
218 cfp.seek(0)
219 cfp.truncate()
219 cfp.truncate()
220 subject = None
220 subject = None
221 elif hgpatchheader:
221 elif hgpatchheader:
222 if line.startswith('# User '):
222 if line.startswith('# User '):
223 user = line[7:]
223 user = line[7:]
224 ui.debug('From: %s\n' % user)
224 ui.debug('From: %s\n' % user)
225 elif line.startswith("# Date "):
225 elif line.startswith("# Date "):
226 date = line[7:]
226 date = line[7:]
227 elif line.startswith("# Branch "):
227 elif line.startswith("# Branch "):
228 branch = line[9:]
228 branch = line[9:]
229 elif line.startswith("# Node ID "):
229 elif line.startswith("# Node ID "):
230 nodeid = line[10:]
230 nodeid = line[10:]
231 elif line.startswith("# Parent "):
231 elif line.startswith("# Parent "):
232 parents.append(line[10:])
232 parents.append(line[10:])
233 elif not line.startswith("# "):
233 elif not line.startswith("# "):
234 hgpatchheader = False
234 hgpatchheader = False
235 elif line == '---' and gitsendmail:
235 elif line == '---' and gitsendmail:
236 ignoretext = True
236 ignoretext = True
237 if not hgpatchheader and not ignoretext:
237 if not hgpatchheader and not ignoretext:
238 cfp.write(line)
238 cfp.write(line)
239 cfp.write('\n')
239 cfp.write('\n')
240 message = cfp.getvalue()
240 message = cfp.getvalue()
241 if tmpfp:
241 if tmpfp:
242 tmpfp.write(payload)
242 tmpfp.write(payload)
243 if not payload.endswith('\n'):
243 if not payload.endswith('\n'):
244 tmpfp.write('\n')
244 tmpfp.write('\n')
245 elif not diffs_seen and message and content_type == 'text/plain':
245 elif not diffs_seen and message and content_type == 'text/plain':
246 message += '\n' + payload
246 message += '\n' + payload
247 except:
247 except:
248 tmpfp.close()
248 tmpfp.close()
249 os.unlink(tmpname)
249 os.unlink(tmpname)
250 raise
250 raise
251
251
252 if subject and not message.startswith(subject):
252 if subject and not message.startswith(subject):
253 message = '%s\n%s' % (subject, message)
253 message = '%s\n%s' % (subject, message)
254 tmpfp.close()
254 tmpfp.close()
255 if not diffs_seen:
255 if not diffs_seen:
256 os.unlink(tmpname)
256 os.unlink(tmpname)
257 return None, message, user, date, branch, None, None, None
257 return None, message, user, date, branch, None, None, None
258 p1 = parents and parents.pop(0) or None
258 p1 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
260 return tmpname, message, user, date, branch, nodeid, p1, p2
260 return tmpname, message, user, date, branch, nodeid, p1, p2
261
261
262 class patchmeta(object):
262 class patchmeta(object):
263 """Patched file metadata
263 """Patched file metadata
264
264
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 'islink' is True if the file is a symlink and 'isexec' is True if
269 'islink' is True if the file is a symlink and 'isexec' is True if
270 the file is executable. Otherwise, 'mode' is None.
270 the file is executable. Otherwise, 'mode' is None.
271 """
271 """
272 def __init__(self, path):
272 def __init__(self, path):
273 self.path = path
273 self.path = path
274 self.oldpath = None
274 self.oldpath = None
275 self.mode = None
275 self.mode = None
276 self.op = 'MODIFY'
276 self.op = 'MODIFY'
277 self.binary = False
277 self.binary = False
278
278
279 def setmode(self, mode):
279 def setmode(self, mode):
280 islink = mode & 020000
280 islink = mode & 020000
281 isexec = mode & 0100
281 isexec = mode & 0100
282 self.mode = (islink, isexec)
282 self.mode = (islink, isexec)
283
283
284 def __repr__(self):
284 def __repr__(self):
285 return "<patchmeta %s %r>" % (self.op, self.path)
285 return "<patchmeta %s %r>" % (self.op, self.path)
286
286
287 def readgitpatch(lr):
287 def readgitpatch(lr):
288 """extract git-style metadata about patches from <patchname>"""
288 """extract git-style metadata about patches from <patchname>"""
289
289
290 # Filter patch for git information
290 # Filter patch for git information
291 gp = None
291 gp = None
292 gitpatches = []
292 gitpatches = []
293 for line in lr:
293 for line in lr:
294 line = line.rstrip(' \r\n')
294 line = line.rstrip(' \r\n')
295 if line.startswith('diff --git'):
295 if line.startswith('diff --git'):
296 m = gitre.match(line)
296 m = gitre.match(line)
297 if m:
297 if m:
298 if gp:
298 if gp:
299 gitpatches.append(gp)
299 gitpatches.append(gp)
300 dst = m.group(2)
300 dst = m.group(2)
301 gp = patchmeta(dst)
301 gp = patchmeta(dst)
302 elif gp:
302 elif gp:
303 if line.startswith('--- '):
303 if line.startswith('--- '):
304 gitpatches.append(gp)
304 gitpatches.append(gp)
305 gp = None
305 gp = None
306 continue
306 continue
307 if line.startswith('rename from '):
307 if line.startswith('rename from '):
308 gp.op = 'RENAME'
308 gp.op = 'RENAME'
309 gp.oldpath = line[12:]
309 gp.oldpath = line[12:]
310 elif line.startswith('rename to '):
310 elif line.startswith('rename to '):
311 gp.path = line[10:]
311 gp.path = line[10:]
312 elif line.startswith('copy from '):
312 elif line.startswith('copy from '):
313 gp.op = 'COPY'
313 gp.op = 'COPY'
314 gp.oldpath = line[10:]
314 gp.oldpath = line[10:]
315 elif line.startswith('copy to '):
315 elif line.startswith('copy to '):
316 gp.path = line[8:]
316 gp.path = line[8:]
317 elif line.startswith('deleted file'):
317 elif line.startswith('deleted file'):
318 gp.op = 'DELETE'
318 gp.op = 'DELETE'
319 elif line.startswith('new file mode '):
319 elif line.startswith('new file mode '):
320 gp.op = 'ADD'
320 gp.op = 'ADD'
321 gp.setmode(int(line[-6:], 8))
321 gp.setmode(int(line[-6:], 8))
322 elif line.startswith('new mode '):
322 elif line.startswith('new mode '):
323 gp.setmode(int(line[-6:], 8))
323 gp.setmode(int(line[-6:], 8))
324 elif line.startswith('GIT binary patch'):
324 elif line.startswith('GIT binary patch'):
325 gp.binary = True
325 gp.binary = True
326 if gp:
326 if gp:
327 gitpatches.append(gp)
327 gitpatches.append(gp)
328
328
329 return gitpatches
329 return gitpatches
330
330
331 class linereader(object):
331 class linereader(object):
332 # simple class to allow pushing lines back into the input stream
332 # simple class to allow pushing lines back into the input stream
333 def __init__(self, fp):
333 def __init__(self, fp):
334 self.fp = fp
334 self.fp = fp
335 self.buf = []
335 self.buf = []
336
336
337 def push(self, line):
337 def push(self, line):
338 if line is not None:
338 if line is not None:
339 self.buf.append(line)
339 self.buf.append(line)
340
340
341 def readline(self):
341 def readline(self):
342 if self.buf:
342 if self.buf:
343 l = self.buf[0]
343 l = self.buf[0]
344 del self.buf[0]
344 del self.buf[0]
345 return l
345 return l
346 return self.fp.readline()
346 return self.fp.readline()
347
347
348 def __iter__(self):
348 def __iter__(self):
349 while 1:
349 while 1:
350 l = self.readline()
350 l = self.readline()
351 if not l:
351 if not l:
352 break
352 break
353 yield l
353 yield l
354
354
355 class abstractbackend(object):
355 class abstractbackend(object):
356 def __init__(self, ui):
356 def __init__(self, ui):
357 self.ui = ui
357 self.ui = ui
358
358
359 def getfile(self, fname):
359 def getfile(self, fname):
360 """Return target file data and flags as a (data, (islink,
360 """Return target file data and flags as a (data, (islink,
361 isexec)) tuple.
361 isexec)) tuple.
362 """
362 """
363 raise NotImplementedError
363 raise NotImplementedError
364
364
365 def setfile(self, fname, data, mode):
365 def setfile(self, fname, data, mode):
366 """Write data to target file fname and set its mode. mode is a
366 """Write data to target file fname and set its mode. mode is a
367 (islink, isexec) tuple. If data is None, the file content should
367 (islink, isexec) tuple. If data is None, the file content should
368 be left unchanged.
368 be left unchanged.
369 """
369 """
370 raise NotImplementedError
370 raise NotImplementedError
371
371
372 def unlink(self, fname):
372 def unlink(self, fname):
373 """Unlink target file."""
373 """Unlink target file."""
374 raise NotImplementedError
374 raise NotImplementedError
375
375
376 def writerej(self, fname, failed, total, lines):
376 def writerej(self, fname, failed, total, lines):
377 """Write rejected lines for fname. total is the number of hunks
377 """Write rejected lines for fname. total is the number of hunks
378 which failed to apply and total the total number of hunks for this
378 which failed to apply and total the total number of hunks for this
379 files.
379 files.
380 """
380 """
381 pass
381 pass
382
382
383 def copy(self, src, dst):
383 def copy(self, src, dst):
384 """Copy src file into dst file. Create intermediate directories if
384 """Copy src file into dst file. Create intermediate directories if
385 necessary. Files are specified relatively to the patching base
385 necessary. Files are specified relatively to the patching base
386 directory.
386 directory.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def exists(self, fname):
390 def exists(self, fname):
391 raise NotImplementedError
391 raise NotImplementedError
392
392
393 class fsbackend(abstractbackend):
393 class fsbackend(abstractbackend):
394 def __init__(self, ui, basedir):
394 def __init__(self, ui, basedir):
395 super(fsbackend, self).__init__(ui)
395 super(fsbackend, self).__init__(ui)
396 self.opener = scmutil.opener(basedir)
396 self.opener = scmutil.opener(basedir)
397
397
398 def _join(self, f):
398 def _join(self, f):
399 return os.path.join(self.opener.base, f)
399 return os.path.join(self.opener.base, f)
400
400
401 def getfile(self, fname):
401 def getfile(self, fname):
402 path = self._join(fname)
402 path = self._join(fname)
403 if os.path.islink(path):
403 if os.path.islink(path):
404 return (os.readlink(path), (True, False))
404 return (os.readlink(path), (True, False))
405 isexec, islink = False, False
405 isexec, islink = False, False
406 try:
406 try:
407 isexec = os.lstat(path).st_mode & 0100 != 0
407 isexec = os.lstat(path).st_mode & 0100 != 0
408 islink = os.path.islink(path)
408 islink = os.path.islink(path)
409 except OSError, e:
409 except OSError, e:
410 if e.errno != errno.ENOENT:
410 if e.errno != errno.ENOENT:
411 raise
411 raise
412 return (self.opener.read(fname), (islink, isexec))
412 return (self.opener.read(fname), (islink, isexec))
413
413
414 def setfile(self, fname, data, mode):
414 def setfile(self, fname, data, mode):
415 islink, isexec = mode
415 islink, isexec = mode
416 if data is None:
416 if data is None:
417 util.setflags(self._join(fname), islink, isexec)
417 util.setflags(self._join(fname), islink, isexec)
418 return
418 return
419 if islink:
419 if islink:
420 self.opener.symlink(data, fname)
420 self.opener.symlink(data, fname)
421 else:
421 else:
422 self.opener.write(fname, data)
422 self.opener.write(fname, data)
423 if isexec:
423 if isexec:
424 util.setflags(self._join(fname), False, True)
424 util.setflags(self._join(fname), False, True)
425
425
426 def unlink(self, fname):
426 def unlink(self, fname):
427 try:
427 try:
428 util.unlinkpath(self._join(fname))
428 util.unlinkpath(self._join(fname))
429 except OSError, inst:
429 except OSError, inst:
430 if inst.errno != errno.ENOENT:
430 if inst.errno != errno.ENOENT:
431 raise
431 raise
432
432
433 def writerej(self, fname, failed, total, lines):
433 def writerej(self, fname, failed, total, lines):
434 fname = fname + ".rej"
434 fname = fname + ".rej"
435 self.ui.warn(
435 self.ui.warn(
436 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
436 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
437 (failed, total, fname))
437 (failed, total, fname))
438 fp = self.opener(fname, 'w')
438 fp = self.opener(fname, 'w')
439 fp.writelines(lines)
439 fp.writelines(lines)
440 fp.close()
440 fp.close()
441
441
442 def copy(self, src, dst):
442 def copy(self, src, dst):
443 basedir = self.opener.base
443 basedir = self.opener.base
444 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
444 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
445 for x in [src, dst]]
445 for x in [src, dst]]
446 if os.path.lexists(absdst):
446 if os.path.lexists(absdst):
447 raise util.Abort(_("cannot create %s: destination already exists")
447 raise util.Abort(_("cannot create %s: destination already exists")
448 % dst)
448 % dst)
449 dstdir = os.path.dirname(absdst)
449 dstdir = os.path.dirname(absdst)
450 if dstdir and not os.path.isdir(dstdir):
450 if dstdir and not os.path.isdir(dstdir):
451 try:
451 try:
452 os.makedirs(dstdir)
452 os.makedirs(dstdir)
453 except IOError:
453 except IOError:
454 raise util.Abort(
454 raise util.Abort(
455 _("cannot create %s: unable to create destination directory")
455 _("cannot create %s: unable to create destination directory")
456 % dst)
456 % dst)
457 util.copyfile(abssrc, absdst)
457 util.copyfile(abssrc, absdst)
458
458
459 def exists(self, fname):
459 def exists(self, fname):
460 return os.path.lexists(self._join(fname))
460 return os.path.lexists(self._join(fname))
461
461
462 class workingbackend(fsbackend):
462 class workingbackend(fsbackend):
463 def __init__(self, ui, repo, similarity):
463 def __init__(self, ui, repo, similarity):
464 super(workingbackend, self).__init__(ui, repo.root)
464 super(workingbackend, self).__init__(ui, repo.root)
465 self.repo = repo
465 self.repo = repo
466 self.similarity = similarity
466 self.similarity = similarity
467 self.removed = set()
467 self.removed = set()
468 self.changed = set()
468 self.changed = set()
469 self.copied = []
469 self.copied = []
470
470
471 def setfile(self, fname, data, mode):
471 def setfile(self, fname, data, mode):
472 super(workingbackend, self).setfile(fname, data, mode)
472 super(workingbackend, self).setfile(fname, data, mode)
473 self.changed.add(fname)
473 self.changed.add(fname)
474
474
475 def unlink(self, fname):
475 def unlink(self, fname):
476 super(workingbackend, self).unlink(fname)
476 super(workingbackend, self).unlink(fname)
477 self.removed.add(fname)
477 self.removed.add(fname)
478 self.changed.add(fname)
478 self.changed.add(fname)
479
479
480 def copy(self, src, dst):
480 def copy(self, src, dst):
481 super(workingbackend, self).copy(src, dst)
481 super(workingbackend, self).copy(src, dst)
482 self.copied.append((src, dst))
482 self.copied.append((src, dst))
483 self.changed.add(dst)
483 self.changed.add(dst)
484
484
485 def close(self):
485 def close(self):
486 wctx = self.repo[None]
486 wctx = self.repo[None]
487 addremoved = set(self.changed)
487 addremoved = set(self.changed)
488 for src, dst in self.copied:
488 for src, dst in self.copied:
489 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
489 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
490 addremoved.discard(src)
490 addremoved.discard(src)
491 if (not self.similarity) and self.removed:
491 if (not self.similarity) and self.removed:
492 wctx.forget(sorted(self.removed))
492 wctx.forget(sorted(self.removed))
493 if addremoved:
493 if addremoved:
494 cwd = self.repo.getcwd()
494 cwd = self.repo.getcwd()
495 if cwd:
495 if cwd:
496 addremoved = [util.pathto(self.repo.root, cwd, f)
496 addremoved = [util.pathto(self.repo.root, cwd, f)
497 for f in addremoved]
497 for f in addremoved]
498 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
498 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
499 return sorted(self.changed)
499 return sorted(self.changed)
500
500
501 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
501 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
502 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
502 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
503 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
503 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
504 eolmodes = ['strict', 'crlf', 'lf', 'auto']
504 eolmodes = ['strict', 'crlf', 'lf', 'auto']
505
505
506 class patchfile(object):
506 class patchfile(object):
507 def __init__(self, ui, fname, backend, mode, missing=False,
507 def __init__(self, ui, fname, backend, mode, create, remove, missing=False,
508 eolmode='strict'):
508 eolmode='strict'):
509 self.fname = fname
509 self.fname = fname
510 self.eolmode = eolmode
510 self.eolmode = eolmode
511 self.eol = None
511 self.eol = None
512 self.backend = backend
512 self.backend = backend
513 self.ui = ui
513 self.ui = ui
514 self.lines = []
514 self.lines = []
515 self.exists = False
515 self.exists = False
516 self.missing = missing
516 self.missing = missing
517 self.mode = mode
517 self.mode = mode
518 self.create = create
519 self.remove = remove
518 if not missing:
520 if not missing:
519 try:
521 try:
520 data, mode = self.backend.getfile(fname)
522 data, mode = self.backend.getfile(fname)
521 if data:
523 if data:
522 self.lines = data.splitlines(True)
524 self.lines = data.splitlines(True)
523 if self.mode is None:
525 if self.mode is None:
524 self.mode = mode
526 self.mode = mode
525 if self.lines:
527 if self.lines:
526 # Normalize line endings
528 # Normalize line endings
527 if self.lines[0].endswith('\r\n'):
529 if self.lines[0].endswith('\r\n'):
528 self.eol = '\r\n'
530 self.eol = '\r\n'
529 elif self.lines[0].endswith('\n'):
531 elif self.lines[0].endswith('\n'):
530 self.eol = '\n'
532 self.eol = '\n'
531 if eolmode != 'strict':
533 if eolmode != 'strict':
532 nlines = []
534 nlines = []
533 for l in self.lines:
535 for l in self.lines:
534 if l.endswith('\r\n'):
536 if l.endswith('\r\n'):
535 l = l[:-2] + '\n'
537 l = l[:-2] + '\n'
536 nlines.append(l)
538 nlines.append(l)
537 self.lines = nlines
539 self.lines = nlines
538 self.exists = True
540 self.exists = True
539 except IOError:
541 except IOError:
540 if self.mode is None:
542 if self.mode is None:
541 self.mode = (False, False)
543 self.mode = (False, False)
542 else:
544 else:
543 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
545 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
544
546
545 self.hash = {}
547 self.hash = {}
546 self.dirty = 0
548 self.dirty = 0
547 self.offset = 0
549 self.offset = 0
548 self.skew = 0
550 self.skew = 0
549 self.rej = []
551 self.rej = []
550 self.fileprinted = False
552 self.fileprinted = False
551 self.printfile(False)
553 self.printfile(False)
552 self.hunks = 0
554 self.hunks = 0
553
555
554 def writelines(self, fname, lines, mode):
556 def writelines(self, fname, lines, mode):
555 if self.eolmode == 'auto':
557 if self.eolmode == 'auto':
556 eol = self.eol
558 eol = self.eol
557 elif self.eolmode == 'crlf':
559 elif self.eolmode == 'crlf':
558 eol = '\r\n'
560 eol = '\r\n'
559 else:
561 else:
560 eol = '\n'
562 eol = '\n'
561
563
562 if self.eolmode != 'strict' and eol and eol != '\n':
564 if self.eolmode != 'strict' and eol and eol != '\n':
563 rawlines = []
565 rawlines = []
564 for l in lines:
566 for l in lines:
565 if l and l[-1] == '\n':
567 if l and l[-1] == '\n':
566 l = l[:-1] + eol
568 l = l[:-1] + eol
567 rawlines.append(l)
569 rawlines.append(l)
568 lines = rawlines
570 lines = rawlines
569
571
570 self.backend.setfile(fname, ''.join(lines), mode)
572 self.backend.setfile(fname, ''.join(lines), mode)
571
573
572 def printfile(self, warn):
574 def printfile(self, warn):
573 if self.fileprinted:
575 if self.fileprinted:
574 return
576 return
575 if warn or self.ui.verbose:
577 if warn or self.ui.verbose:
576 self.fileprinted = True
578 self.fileprinted = True
577 s = _("patching file %s\n") % self.fname
579 s = _("patching file %s\n") % self.fname
578 if warn:
580 if warn:
579 self.ui.warn(s)
581 self.ui.warn(s)
580 else:
582 else:
581 self.ui.note(s)
583 self.ui.note(s)
582
584
583
585
584 def findlines(self, l, linenum):
586 def findlines(self, l, linenum):
585 # looks through the hash and finds candidate lines. The
587 # looks through the hash and finds candidate lines. The
586 # result is a list of line numbers sorted based on distance
588 # result is a list of line numbers sorted based on distance
587 # from linenum
589 # from linenum
588
590
589 cand = self.hash.get(l, [])
591 cand = self.hash.get(l, [])
590 if len(cand) > 1:
592 if len(cand) > 1:
591 # resort our list of potentials forward then back.
593 # resort our list of potentials forward then back.
592 cand.sort(key=lambda x: abs(x - linenum))
594 cand.sort(key=lambda x: abs(x - linenum))
593 return cand
595 return cand
594
596
595 def write_rej(self):
597 def write_rej(self):
596 # our rejects are a little different from patch(1). This always
598 # our rejects are a little different from patch(1). This always
597 # creates rejects in the same form as the original patch. A file
599 # creates rejects in the same form as the original patch. A file
598 # header is inserted so that you can run the reject through patch again
600 # header is inserted so that you can run the reject through patch again
599 # without having to type the filename.
601 # without having to type the filename.
600 if not self.rej:
602 if not self.rej:
601 return
603 return
602 base = os.path.basename(self.fname)
604 base = os.path.basename(self.fname)
603 lines = ["--- %s\n+++ %s\n" % (base, base)]
605 lines = ["--- %s\n+++ %s\n" % (base, base)]
604 for x in self.rej:
606 for x in self.rej:
605 for l in x.hunk:
607 for l in x.hunk:
606 lines.append(l)
608 lines.append(l)
607 if l[-1] != '\n':
609 if l[-1] != '\n':
608 lines.append("\n\ No newline at end of file\n")
610 lines.append("\n\ No newline at end of file\n")
609 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
611 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
610
612
611 def apply(self, h):
613 def apply(self, h):
612 if not h.complete():
614 if not h.complete():
613 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
615 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
614 (h.number, h.desc, len(h.a), h.lena, len(h.b),
616 (h.number, h.desc, len(h.a), h.lena, len(h.b),
615 h.lenb))
617 h.lenb))
616
618
617 self.hunks += 1
619 self.hunks += 1
618
620
619 if self.missing:
621 if self.missing:
620 self.rej.append(h)
622 self.rej.append(h)
621 return -1
623 return -1
622
624
623 if self.exists and h.createfile():
625 if self.exists and self.create:
624 self.ui.warn(_("file %s already exists\n") % self.fname)
626 self.ui.warn(_("file %s already exists\n") % self.fname)
625 self.rej.append(h)
627 self.rej.append(h)
626 return -1
628 return -1
627
629
628 if isinstance(h, binhunk):
630 if isinstance(h, binhunk):
629 if h.rmfile():
631 if self.remove:
630 self.backend.unlink(self.fname)
632 self.backend.unlink(self.fname)
631 else:
633 else:
632 self.lines[:] = h.new()
634 self.lines[:] = h.new()
633 self.offset += len(h.new())
635 self.offset += len(h.new())
634 self.dirty = True
636 self.dirty = True
635 return 0
637 return 0
636
638
637 horig = h
639 horig = h
638 if (self.eolmode in ('crlf', 'lf')
640 if (self.eolmode in ('crlf', 'lf')
639 or self.eolmode == 'auto' and self.eol):
641 or self.eolmode == 'auto' and self.eol):
640 # If new eols are going to be normalized, then normalize
642 # If new eols are going to be normalized, then normalize
641 # hunk data before patching. Otherwise, preserve input
643 # hunk data before patching. Otherwise, preserve input
642 # line-endings.
644 # line-endings.
643 h = h.getnormalized()
645 h = h.getnormalized()
644
646
645 # fast case first, no offsets, no fuzz
647 # fast case first, no offsets, no fuzz
646 old = h.old()
648 old = h.old()
647 # patch starts counting at 1 unless we are adding the file
649 # patch starts counting at 1 unless we are adding the file
648 if h.starta == 0:
650 if h.starta == 0:
649 start = 0
651 start = 0
650 else:
652 else:
651 start = h.starta + self.offset - 1
653 start = h.starta + self.offset - 1
652 orig_start = start
654 orig_start = start
653 # if there's skew we want to emit the "(offset %d lines)" even
655 # if there's skew we want to emit the "(offset %d lines)" even
654 # when the hunk cleanly applies at start + skew, so skip the
656 # when the hunk cleanly applies at start + skew, so skip the
655 # fast case code
657 # fast case code
656 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
658 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
657 if h.rmfile():
659 if self.remove:
658 self.backend.unlink(self.fname)
660 self.backend.unlink(self.fname)
659 else:
661 else:
660 self.lines[start : start + h.lena] = h.new()
662 self.lines[start : start + h.lena] = h.new()
661 self.offset += h.lenb - h.lena
663 self.offset += h.lenb - h.lena
662 self.dirty = True
664 self.dirty = True
663 return 0
665 return 0
664
666
665 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
667 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
666 self.hash = {}
668 self.hash = {}
667 for x, s in enumerate(self.lines):
669 for x, s in enumerate(self.lines):
668 self.hash.setdefault(s, []).append(x)
670 self.hash.setdefault(s, []).append(x)
669 if h.hunk[-1][0] != ' ':
671 if h.hunk[-1][0] != ' ':
670 # if the hunk tried to put something at the bottom of the file
672 # if the hunk tried to put something at the bottom of the file
671 # override the start line and use eof here
673 # override the start line and use eof here
672 search_start = len(self.lines)
674 search_start = len(self.lines)
673 else:
675 else:
674 search_start = orig_start + self.skew
676 search_start = orig_start + self.skew
675
677
676 for fuzzlen in xrange(3):
678 for fuzzlen in xrange(3):
677 for toponly in [True, False]:
679 for toponly in [True, False]:
678 old = h.old(fuzzlen, toponly)
680 old = h.old(fuzzlen, toponly)
679
681
680 cand = self.findlines(old[0][1:], search_start)
682 cand = self.findlines(old[0][1:], search_start)
681 for l in cand:
683 for l in cand:
682 if diffhelpers.testhunk(old, self.lines, l) == 0:
684 if diffhelpers.testhunk(old, self.lines, l) == 0:
683 newlines = h.new(fuzzlen, toponly)
685 newlines = h.new(fuzzlen, toponly)
684 self.lines[l : l + len(old)] = newlines
686 self.lines[l : l + len(old)] = newlines
685 self.offset += len(newlines) - len(old)
687 self.offset += len(newlines) - len(old)
686 self.skew = l - orig_start
688 self.skew = l - orig_start
687 self.dirty = True
689 self.dirty = True
688 offset = l - orig_start - fuzzlen
690 offset = l - orig_start - fuzzlen
689 if fuzzlen:
691 if fuzzlen:
690 msg = _("Hunk #%d succeeded at %d "
692 msg = _("Hunk #%d succeeded at %d "
691 "with fuzz %d "
693 "with fuzz %d "
692 "(offset %d lines).\n")
694 "(offset %d lines).\n")
693 self.printfile(True)
695 self.printfile(True)
694 self.ui.warn(msg %
696 self.ui.warn(msg %
695 (h.number, l + 1, fuzzlen, offset))
697 (h.number, l + 1, fuzzlen, offset))
696 else:
698 else:
697 msg = _("Hunk #%d succeeded at %d "
699 msg = _("Hunk #%d succeeded at %d "
698 "(offset %d lines).\n")
700 "(offset %d lines).\n")
699 self.ui.note(msg % (h.number, l + 1, offset))
701 self.ui.note(msg % (h.number, l + 1, offset))
700 return fuzzlen
702 return fuzzlen
701 self.printfile(True)
703 self.printfile(True)
702 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
704 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
703 self.rej.append(horig)
705 self.rej.append(horig)
704 return -1
706 return -1
705
707
706 def close(self):
708 def close(self):
707 if self.dirty:
709 if self.dirty:
708 self.writelines(self.fname, self.lines, self.mode)
710 self.writelines(self.fname, self.lines, self.mode)
709 self.write_rej()
711 self.write_rej()
710 return len(self.rej)
712 return len(self.rej)
711
713
712 class hunk(object):
714 class hunk(object):
713 def __init__(self, desc, num, lr, context, create=False, remove=False):
715 def __init__(self, desc, num, lr, context):
714 self.number = num
716 self.number = num
715 self.desc = desc
717 self.desc = desc
716 self.hunk = [desc]
718 self.hunk = [desc]
717 self.a = []
719 self.a = []
718 self.b = []
720 self.b = []
719 self.starta = self.lena = None
721 self.starta = self.lena = None
720 self.startb = self.lenb = None
722 self.startb = self.lenb = None
721 if lr is not None:
723 if lr is not None:
722 if context:
724 if context:
723 self.read_context_hunk(lr)
725 self.read_context_hunk(lr)
724 else:
726 else:
725 self.read_unified_hunk(lr)
727 self.read_unified_hunk(lr)
726 self.create = create
727 self.remove = remove and not create
728
728
729 def getnormalized(self):
729 def getnormalized(self):
730 """Return a copy with line endings normalized to LF."""
730 """Return a copy with line endings normalized to LF."""
731
731
732 def normalize(lines):
732 def normalize(lines):
733 nlines = []
733 nlines = []
734 for line in lines:
734 for line in lines:
735 if line.endswith('\r\n'):
735 if line.endswith('\r\n'):
736 line = line[:-2] + '\n'
736 line = line[:-2] + '\n'
737 nlines.append(line)
737 nlines.append(line)
738 return nlines
738 return nlines
739
739
740 # Dummy object, it is rebuilt manually
740 # Dummy object, it is rebuilt manually
741 nh = hunk(self.desc, self.number, None, None, False, False)
741 nh = hunk(self.desc, self.number, None, None)
742 nh.number = self.number
742 nh.number = self.number
743 nh.desc = self.desc
743 nh.desc = self.desc
744 nh.hunk = self.hunk
744 nh.hunk = self.hunk
745 nh.a = normalize(self.a)
745 nh.a = normalize(self.a)
746 nh.b = normalize(self.b)
746 nh.b = normalize(self.b)
747 nh.starta = self.starta
747 nh.starta = self.starta
748 nh.startb = self.startb
748 nh.startb = self.startb
749 nh.lena = self.lena
749 nh.lena = self.lena
750 nh.lenb = self.lenb
750 nh.lenb = self.lenb
751 nh.create = self.create
752 nh.remove = self.remove
753 return nh
751 return nh
754
752
755 def read_unified_hunk(self, lr):
753 def read_unified_hunk(self, lr):
756 m = unidesc.match(self.desc)
754 m = unidesc.match(self.desc)
757 if not m:
755 if not m:
758 raise PatchError(_("bad hunk #%d") % self.number)
756 raise PatchError(_("bad hunk #%d") % self.number)
759 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
757 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
760 if self.lena is None:
758 if self.lena is None:
761 self.lena = 1
759 self.lena = 1
762 else:
760 else:
763 self.lena = int(self.lena)
761 self.lena = int(self.lena)
764 if self.lenb is None:
762 if self.lenb is None:
765 self.lenb = 1
763 self.lenb = 1
766 else:
764 else:
767 self.lenb = int(self.lenb)
765 self.lenb = int(self.lenb)
768 self.starta = int(self.starta)
766 self.starta = int(self.starta)
769 self.startb = int(self.startb)
767 self.startb = int(self.startb)
770 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
768 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
771 # if we hit eof before finishing out the hunk, the last line will
769 # if we hit eof before finishing out the hunk, the last line will
772 # be zero length. Lets try to fix it up.
770 # be zero length. Lets try to fix it up.
773 while len(self.hunk[-1]) == 0:
771 while len(self.hunk[-1]) == 0:
774 del self.hunk[-1]
772 del self.hunk[-1]
775 del self.a[-1]
773 del self.a[-1]
776 del self.b[-1]
774 del self.b[-1]
777 self.lena -= 1
775 self.lena -= 1
778 self.lenb -= 1
776 self.lenb -= 1
779 self._fixnewline(lr)
777 self._fixnewline(lr)
780
778
781 def read_context_hunk(self, lr):
779 def read_context_hunk(self, lr):
782 self.desc = lr.readline()
780 self.desc = lr.readline()
783 m = contextdesc.match(self.desc)
781 m = contextdesc.match(self.desc)
784 if not m:
782 if not m:
785 raise PatchError(_("bad hunk #%d") % self.number)
783 raise PatchError(_("bad hunk #%d") % self.number)
786 foo, self.starta, foo2, aend, foo3 = m.groups()
784 foo, self.starta, foo2, aend, foo3 = m.groups()
787 self.starta = int(self.starta)
785 self.starta = int(self.starta)
788 if aend is None:
786 if aend is None:
789 aend = self.starta
787 aend = self.starta
790 self.lena = int(aend) - self.starta
788 self.lena = int(aend) - self.starta
791 if self.starta:
789 if self.starta:
792 self.lena += 1
790 self.lena += 1
793 for x in xrange(self.lena):
791 for x in xrange(self.lena):
794 l = lr.readline()
792 l = lr.readline()
795 if l.startswith('---'):
793 if l.startswith('---'):
796 # lines addition, old block is empty
794 # lines addition, old block is empty
797 lr.push(l)
795 lr.push(l)
798 break
796 break
799 s = l[2:]
797 s = l[2:]
800 if l.startswith('- ') or l.startswith('! '):
798 if l.startswith('- ') or l.startswith('! '):
801 u = '-' + s
799 u = '-' + s
802 elif l.startswith(' '):
800 elif l.startswith(' '):
803 u = ' ' + s
801 u = ' ' + s
804 else:
802 else:
805 raise PatchError(_("bad hunk #%d old text line %d") %
803 raise PatchError(_("bad hunk #%d old text line %d") %
806 (self.number, x))
804 (self.number, x))
807 self.a.append(u)
805 self.a.append(u)
808 self.hunk.append(u)
806 self.hunk.append(u)
809
807
810 l = lr.readline()
808 l = lr.readline()
811 if l.startswith('\ '):
809 if l.startswith('\ '):
812 s = self.a[-1][:-1]
810 s = self.a[-1][:-1]
813 self.a[-1] = s
811 self.a[-1] = s
814 self.hunk[-1] = s
812 self.hunk[-1] = s
815 l = lr.readline()
813 l = lr.readline()
816 m = contextdesc.match(l)
814 m = contextdesc.match(l)
817 if not m:
815 if not m:
818 raise PatchError(_("bad hunk #%d") % self.number)
816 raise PatchError(_("bad hunk #%d") % self.number)
819 foo, self.startb, foo2, bend, foo3 = m.groups()
817 foo, self.startb, foo2, bend, foo3 = m.groups()
820 self.startb = int(self.startb)
818 self.startb = int(self.startb)
821 if bend is None:
819 if bend is None:
822 bend = self.startb
820 bend = self.startb
823 self.lenb = int(bend) - self.startb
821 self.lenb = int(bend) - self.startb
824 if self.startb:
822 if self.startb:
825 self.lenb += 1
823 self.lenb += 1
826 hunki = 1
824 hunki = 1
827 for x in xrange(self.lenb):
825 for x in xrange(self.lenb):
828 l = lr.readline()
826 l = lr.readline()
829 if l.startswith('\ '):
827 if l.startswith('\ '):
830 # XXX: the only way to hit this is with an invalid line range.
828 # XXX: the only way to hit this is with an invalid line range.
831 # The no-eol marker is not counted in the line range, but I
829 # The no-eol marker is not counted in the line range, but I
832 # guess there are diff(1) out there which behave differently.
830 # guess there are diff(1) out there which behave differently.
833 s = self.b[-1][:-1]
831 s = self.b[-1][:-1]
834 self.b[-1] = s
832 self.b[-1] = s
835 self.hunk[hunki - 1] = s
833 self.hunk[hunki - 1] = s
836 continue
834 continue
837 if not l:
835 if not l:
838 # line deletions, new block is empty and we hit EOF
836 # line deletions, new block is empty and we hit EOF
839 lr.push(l)
837 lr.push(l)
840 break
838 break
841 s = l[2:]
839 s = l[2:]
842 if l.startswith('+ ') or l.startswith('! '):
840 if l.startswith('+ ') or l.startswith('! '):
843 u = '+' + s
841 u = '+' + s
844 elif l.startswith(' '):
842 elif l.startswith(' '):
845 u = ' ' + s
843 u = ' ' + s
846 elif len(self.b) == 0:
844 elif len(self.b) == 0:
847 # line deletions, new block is empty
845 # line deletions, new block is empty
848 lr.push(l)
846 lr.push(l)
849 break
847 break
850 else:
848 else:
851 raise PatchError(_("bad hunk #%d old text line %d") %
849 raise PatchError(_("bad hunk #%d old text line %d") %
852 (self.number, x))
850 (self.number, x))
853 self.b.append(s)
851 self.b.append(s)
854 while True:
852 while True:
855 if hunki >= len(self.hunk):
853 if hunki >= len(self.hunk):
856 h = ""
854 h = ""
857 else:
855 else:
858 h = self.hunk[hunki]
856 h = self.hunk[hunki]
859 hunki += 1
857 hunki += 1
860 if h == u:
858 if h == u:
861 break
859 break
862 elif h.startswith('-'):
860 elif h.startswith('-'):
863 continue
861 continue
864 else:
862 else:
865 self.hunk.insert(hunki - 1, u)
863 self.hunk.insert(hunki - 1, u)
866 break
864 break
867
865
868 if not self.a:
866 if not self.a:
869 # this happens when lines were only added to the hunk
867 # this happens when lines were only added to the hunk
870 for x in self.hunk:
868 for x in self.hunk:
871 if x.startswith('-') or x.startswith(' '):
869 if x.startswith('-') or x.startswith(' '):
872 self.a.append(x)
870 self.a.append(x)
873 if not self.b:
871 if not self.b:
874 # this happens when lines were only deleted from the hunk
872 # this happens when lines were only deleted from the hunk
875 for x in self.hunk:
873 for x in self.hunk:
876 if x.startswith('+') or x.startswith(' '):
874 if x.startswith('+') or x.startswith(' '):
877 self.b.append(x[1:])
875 self.b.append(x[1:])
878 # @@ -start,len +start,len @@
876 # @@ -start,len +start,len @@
879 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
877 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
880 self.startb, self.lenb)
878 self.startb, self.lenb)
881 self.hunk[0] = self.desc
879 self.hunk[0] = self.desc
882 self._fixnewline(lr)
880 self._fixnewline(lr)
883
881
884 def _fixnewline(self, lr):
882 def _fixnewline(self, lr):
885 l = lr.readline()
883 l = lr.readline()
886 if l.startswith('\ '):
884 if l.startswith('\ '):
887 diffhelpers.fix_newline(self.hunk, self.a, self.b)
885 diffhelpers.fix_newline(self.hunk, self.a, self.b)
888 else:
886 else:
889 lr.push(l)
887 lr.push(l)
890
888
891 def complete(self):
889 def complete(self):
892 return len(self.a) == self.lena and len(self.b) == self.lenb
890 return len(self.a) == self.lena and len(self.b) == self.lenb
893
891
894 def createfile(self):
895 return self.starta == 0 and self.lena == 0 and self.create
896
897 def rmfile(self):
898 return self.startb == 0 and self.lenb == 0 and self.remove
899
900 def fuzzit(self, l, fuzz, toponly):
892 def fuzzit(self, l, fuzz, toponly):
901 # this removes context lines from the top and bottom of list 'l'. It
893 # this removes context lines from the top and bottom of list 'l'. It
902 # checks the hunk to make sure only context lines are removed, and then
894 # checks the hunk to make sure only context lines are removed, and then
903 # returns a new shortened list of lines.
895 # returns a new shortened list of lines.
904 fuzz = min(fuzz, len(l)-1)
896 fuzz = min(fuzz, len(l)-1)
905 if fuzz:
897 if fuzz:
906 top = 0
898 top = 0
907 bot = 0
899 bot = 0
908 hlen = len(self.hunk)
900 hlen = len(self.hunk)
909 for x in xrange(hlen - 1):
901 for x in xrange(hlen - 1):
910 # the hunk starts with the @@ line, so use x+1
902 # the hunk starts with the @@ line, so use x+1
911 if self.hunk[x + 1][0] == ' ':
903 if self.hunk[x + 1][0] == ' ':
912 top += 1
904 top += 1
913 else:
905 else:
914 break
906 break
915 if not toponly:
907 if not toponly:
916 for x in xrange(hlen - 1):
908 for x in xrange(hlen - 1):
917 if self.hunk[hlen - bot - 1][0] == ' ':
909 if self.hunk[hlen - bot - 1][0] == ' ':
918 bot += 1
910 bot += 1
919 else:
911 else:
920 break
912 break
921
913
922 # top and bot now count context in the hunk
914 # top and bot now count context in the hunk
923 # adjust them if either one is short
915 # adjust them if either one is short
924 context = max(top, bot, 3)
916 context = max(top, bot, 3)
925 if bot < context:
917 if bot < context:
926 bot = max(0, fuzz - (context - bot))
918 bot = max(0, fuzz - (context - bot))
927 else:
919 else:
928 bot = min(fuzz, bot)
920 bot = min(fuzz, bot)
929 if top < context:
921 if top < context:
930 top = max(0, fuzz - (context - top))
922 top = max(0, fuzz - (context - top))
931 else:
923 else:
932 top = min(fuzz, top)
924 top = min(fuzz, top)
933
925
934 return l[top:len(l)-bot]
926 return l[top:len(l)-bot]
935 return l
927 return l
936
928
937 def old(self, fuzz=0, toponly=False):
929 def old(self, fuzz=0, toponly=False):
938 return self.fuzzit(self.a, fuzz, toponly)
930 return self.fuzzit(self.a, fuzz, toponly)
939
931
940 def new(self, fuzz=0, toponly=False):
932 def new(self, fuzz=0, toponly=False):
941 return self.fuzzit(self.b, fuzz, toponly)
933 return self.fuzzit(self.b, fuzz, toponly)
942
934
943 class binhunk:
935 class binhunk:
944 'A binary patch file. Only understands literals so far.'
936 'A binary patch file. Only understands literals so far.'
945 def __init__(self, gitpatch, lr):
937 def __init__(self, lr):
946 self.gitpatch = gitpatch
947 self.text = None
938 self.text = None
948 self.hunk = ['GIT binary patch\n']
939 self.hunk = ['GIT binary patch\n']
949 self._read(lr)
940 self._read(lr)
950
941
951 def createfile(self):
952 return self.gitpatch.op == 'ADD'
953
954 def rmfile(self):
955 return self.gitpatch.op == 'DELETE'
956
957 def complete(self):
942 def complete(self):
958 return self.text is not None
943 return self.text is not None
959
944
960 def new(self):
945 def new(self):
961 return [self.text]
946 return [self.text]
962
947
963 def _read(self, lr):
948 def _read(self, lr):
964 line = lr.readline()
949 line = lr.readline()
965 self.hunk.append(line)
950 self.hunk.append(line)
966 while line and not line.startswith('literal '):
951 while line and not line.startswith('literal '):
967 line = lr.readline()
952 line = lr.readline()
968 self.hunk.append(line)
953 self.hunk.append(line)
969 if not line:
954 if not line:
970 raise PatchError(_('could not extract binary patch'))
955 raise PatchError(_('could not extract binary patch'))
971 size = int(line[8:].rstrip())
956 size = int(line[8:].rstrip())
972 dec = []
957 dec = []
973 line = lr.readline()
958 line = lr.readline()
974 self.hunk.append(line)
959 self.hunk.append(line)
975 while len(line) > 1:
960 while len(line) > 1:
976 l = line[0]
961 l = line[0]
977 if l <= 'Z' and l >= 'A':
962 if l <= 'Z' and l >= 'A':
978 l = ord(l) - ord('A') + 1
963 l = ord(l) - ord('A') + 1
979 else:
964 else:
980 l = ord(l) - ord('a') + 27
965 l = ord(l) - ord('a') + 27
981 dec.append(base85.b85decode(line[1:-1])[:l])
966 dec.append(base85.b85decode(line[1:-1])[:l])
982 line = lr.readline()
967 line = lr.readline()
983 self.hunk.append(line)
968 self.hunk.append(line)
984 text = zlib.decompress(''.join(dec))
969 text = zlib.decompress(''.join(dec))
985 if len(text) != size:
970 if len(text) != size:
986 raise PatchError(_('binary patch is %d bytes, not %d') %
971 raise PatchError(_('binary patch is %d bytes, not %d') %
987 len(text), size)
972 len(text), size)
988 self.text = text
973 self.text = text
989
974
990 def parsefilename(str):
975 def parsefilename(str):
991 # --- filename \t|space stuff
976 # --- filename \t|space stuff
992 s = str[4:].rstrip('\r\n')
977 s = str[4:].rstrip('\r\n')
993 i = s.find('\t')
978 i = s.find('\t')
994 if i < 0:
979 if i < 0:
995 i = s.find(' ')
980 i = s.find(' ')
996 if i < 0:
981 if i < 0:
997 return s
982 return s
998 return s[:i]
983 return s[:i]
999
984
1000 def pathstrip(path, strip):
985 def pathstrip(path, strip):
1001 pathlen = len(path)
986 pathlen = len(path)
1002 i = 0
987 i = 0
1003 if strip == 0:
988 if strip == 0:
1004 return '', path.rstrip()
989 return '', path.rstrip()
1005 count = strip
990 count = strip
1006 while count > 0:
991 while count > 0:
1007 i = path.find('/', i)
992 i = path.find('/', i)
1008 if i == -1:
993 if i == -1:
1009 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
994 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1010 (count, strip, path))
995 (count, strip, path))
1011 i += 1
996 i += 1
1012 # consume '//' in the path
997 # consume '//' in the path
1013 while i < pathlen - 1 and path[i] == '/':
998 while i < pathlen - 1 and path[i] == '/':
1014 i += 1
999 i += 1
1015 count -= 1
1000 count -= 1
1016 return path[:i].lstrip(), path[i:].rstrip()
1001 return path[:i].lstrip(), path[i:].rstrip()
1017
1002
1018 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1003 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1019 if gp:
1004 if gp:
1020 # Git patches do not play games. Excluding copies from the
1005 # Git patches do not play games. Excluding copies from the
1021 # following heuristic avoids a lot of confusion
1006 # following heuristic avoids a lot of confusion
1022 fname = pathstrip(gp.path, strip - 1)[1]
1007 fname = pathstrip(gp.path, strip - 1)[1]
1023 missing = not hunk.createfile() and not backend.exists(fname)
1008 create = gp.op == 'ADD'
1024 return fname, missing
1009 remove = gp.op == 'DELETE'
1010 missing = not create and not backend.exists(fname)
1011 return fname, missing, create, remove
1025 nulla = afile_orig == "/dev/null"
1012 nulla = afile_orig == "/dev/null"
1026 nullb = bfile_orig == "/dev/null"
1013 nullb = bfile_orig == "/dev/null"
1014 create = nulla and hunk.starta == 0 and hunk.lena == 0
1015 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1027 abase, afile = pathstrip(afile_orig, strip)
1016 abase, afile = pathstrip(afile_orig, strip)
1028 gooda = not nulla and backend.exists(afile)
1017 gooda = not nulla and backend.exists(afile)
1029 bbase, bfile = pathstrip(bfile_orig, strip)
1018 bbase, bfile = pathstrip(bfile_orig, strip)
1030 if afile == bfile:
1019 if afile == bfile:
1031 goodb = gooda
1020 goodb = gooda
1032 else:
1021 else:
1033 goodb = not nullb and backend.exists(bfile)
1022 goodb = not nullb and backend.exists(bfile)
1034 createfunc = hunk.createfile
1023 missing = not goodb and not gooda and not create
1035 missing = not goodb and not gooda and not createfunc()
1036
1024
1037 # some diff programs apparently produce patches where the afile is
1025 # some diff programs apparently produce patches where the afile is
1038 # not /dev/null, but afile starts with bfile
1026 # not /dev/null, but afile starts with bfile
1039 abasedir = afile[:afile.rfind('/') + 1]
1027 abasedir = afile[:afile.rfind('/') + 1]
1040 bbasedir = bfile[:bfile.rfind('/') + 1]
1028 bbasedir = bfile[:bfile.rfind('/') + 1]
1041 if missing and abasedir == bbasedir and afile.startswith(bfile):
1029 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1042 # this isn't very pretty
1030 and hunk.starta == 0 and hunk.lena == 0):
1043 hunk.create = True
1031 create = True
1044 if createfunc():
1045 missing = False
1032 missing = False
1046 else:
1047 hunk.create = False
1048
1033
1049 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1034 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1050 # diff is between a file and its backup. In this case, the original
1035 # diff is between a file and its backup. In this case, the original
1051 # file should be patched (see original mpatch code).
1036 # file should be patched (see original mpatch code).
1052 isbackup = (abase == bbase and bfile.startswith(afile))
1037 isbackup = (abase == bbase and bfile.startswith(afile))
1053 fname = None
1038 fname = None
1054 if not missing:
1039 if not missing:
1055 if gooda and goodb:
1040 if gooda and goodb:
1056 fname = isbackup and afile or bfile
1041 fname = isbackup and afile or bfile
1057 elif gooda:
1042 elif gooda:
1058 fname = afile
1043 fname = afile
1059
1044
1060 if not fname:
1045 if not fname:
1061 if not nullb:
1046 if not nullb:
1062 fname = isbackup and afile or bfile
1047 fname = isbackup and afile or bfile
1063 elif not nulla:
1048 elif not nulla:
1064 fname = afile
1049 fname = afile
1065 else:
1050 else:
1066 raise PatchError(_("undefined source and destination files"))
1051 raise PatchError(_("undefined source and destination files"))
1067
1052
1068 return fname, missing
1053 return fname, missing, create, remove
1069
1054
1070 def scangitpatch(lr, firstline):
1055 def scangitpatch(lr, firstline):
1071 """
1056 """
1072 Git patches can emit:
1057 Git patches can emit:
1073 - rename a to b
1058 - rename a to b
1074 - change b
1059 - change b
1075 - copy a to c
1060 - copy a to c
1076 - change c
1061 - change c
1077
1062
1078 We cannot apply this sequence as-is, the renamed 'a' could not be
1063 We cannot apply this sequence as-is, the renamed 'a' could not be
1079 found for it would have been renamed already. And we cannot copy
1064 found for it would have been renamed already. And we cannot copy
1080 from 'b' instead because 'b' would have been changed already. So
1065 from 'b' instead because 'b' would have been changed already. So
1081 we scan the git patch for copy and rename commands so we can
1066 we scan the git patch for copy and rename commands so we can
1082 perform the copies ahead of time.
1067 perform the copies ahead of time.
1083 """
1068 """
1084 pos = 0
1069 pos = 0
1085 try:
1070 try:
1086 pos = lr.fp.tell()
1071 pos = lr.fp.tell()
1087 fp = lr.fp
1072 fp = lr.fp
1088 except IOError:
1073 except IOError:
1089 fp = cStringIO.StringIO(lr.fp.read())
1074 fp = cStringIO.StringIO(lr.fp.read())
1090 gitlr = linereader(fp)
1075 gitlr = linereader(fp)
1091 gitlr.push(firstline)
1076 gitlr.push(firstline)
1092 gitpatches = readgitpatch(gitlr)
1077 gitpatches = readgitpatch(gitlr)
1093 fp.seek(pos)
1078 fp.seek(pos)
1094 return gitpatches
1079 return gitpatches
1095
1080
1096 def iterhunks(fp):
1081 def iterhunks(fp):
1097 """Read a patch and yield the following events:
1082 """Read a patch and yield the following events:
1098 - ("file", afile, bfile, firsthunk): select a new target file.
1083 - ("file", afile, bfile, firsthunk): select a new target file.
1099 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1084 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1100 "file" event.
1085 "file" event.
1101 - ("git", gitchanges): current diff is in git format, gitchanges
1086 - ("git", gitchanges): current diff is in git format, gitchanges
1102 maps filenames to gitpatch records. Unique event.
1087 maps filenames to gitpatch records. Unique event.
1103 """
1088 """
1104 afile = ""
1089 afile = ""
1105 bfile = ""
1090 bfile = ""
1106 state = None
1091 state = None
1107 hunknum = 0
1092 hunknum = 0
1108 emitfile = newfile = False
1093 emitfile = newfile = False
1109 gitpatches = None
1094 gitpatches = None
1110
1095
1111 # our states
1096 # our states
1112 BFILE = 1
1097 BFILE = 1
1113 context = None
1098 context = None
1114 lr = linereader(fp)
1099 lr = linereader(fp)
1115
1100
1116 while True:
1101 while True:
1117 x = lr.readline()
1102 x = lr.readline()
1118 if not x:
1103 if not x:
1119 break
1104 break
1120 if state == BFILE and (
1105 if state == BFILE and (
1121 (not context and x[0] == '@')
1106 (not context and x[0] == '@')
1122 or (context is not False and x.startswith('***************'))
1107 or (context is not False and x.startswith('***************'))
1123 or x.startswith('GIT binary patch')):
1108 or x.startswith('GIT binary patch')):
1124 gp = None
1109 gp = None
1125 if gitpatches and gitpatches[-1][0] == bfile:
1110 if gitpatches and gitpatches[-1][0] == bfile:
1126 gp = gitpatches.pop()[1]
1111 gp = gitpatches.pop()[1]
1127 if x.startswith('GIT binary patch'):
1112 if x.startswith('GIT binary patch'):
1128 h = binhunk(gp, lr)
1113 h = binhunk(lr)
1129 else:
1114 else:
1130 if context is None and x.startswith('***************'):
1115 if context is None and x.startswith('***************'):
1131 context = True
1116 context = True
1132 create = afile == '/dev/null' or gp and gp.op == 'ADD'
1117 h = hunk(x, hunknum + 1, lr, context)
1133 remove = bfile == '/dev/null' or gp and gp.op == 'DELETE'
1134 h = hunk(x, hunknum + 1, lr, context, create, remove)
1135 hunknum += 1
1118 hunknum += 1
1136 if emitfile:
1119 if emitfile:
1137 emitfile = False
1120 emitfile = False
1138 yield 'file', (afile, bfile, h, gp)
1121 yield 'file', (afile, bfile, h, gp)
1139 yield 'hunk', h
1122 yield 'hunk', h
1140 elif x.startswith('diff --git'):
1123 elif x.startswith('diff --git'):
1141 m = gitre.match(x)
1124 m = gitre.match(x)
1142 if not m:
1125 if not m:
1143 continue
1126 continue
1144 if gitpatches is None:
1127 if gitpatches is None:
1145 # scan whole input for git metadata
1128 # scan whole input for git metadata
1146 gitpatches = [('b/' + gp.path, gp) for gp
1129 gitpatches = [('b/' + gp.path, gp) for gp
1147 in scangitpatch(lr, x)]
1130 in scangitpatch(lr, x)]
1148 yield 'git', [g[1] for g in gitpatches
1131 yield 'git', [g[1] for g in gitpatches
1149 if g[1].op in ('COPY', 'RENAME')]
1132 if g[1].op in ('COPY', 'RENAME')]
1150 gitpatches.reverse()
1133 gitpatches.reverse()
1151 afile = 'a/' + m.group(1)
1134 afile = 'a/' + m.group(1)
1152 bfile = 'b/' + m.group(2)
1135 bfile = 'b/' + m.group(2)
1153 while bfile != gitpatches[-1][0]:
1136 while bfile != gitpatches[-1][0]:
1154 gp = gitpatches.pop()[1]
1137 gp = gitpatches.pop()[1]
1155 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1138 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1156 gp = gitpatches[-1][1]
1139 gp = gitpatches[-1][1]
1157 # copy/rename + modify should modify target, not source
1140 # copy/rename + modify should modify target, not source
1158 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1141 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1159 afile = bfile
1142 afile = bfile
1160 newfile = True
1143 newfile = True
1161 elif x.startswith('---'):
1144 elif x.startswith('---'):
1162 # check for a unified diff
1145 # check for a unified diff
1163 l2 = lr.readline()
1146 l2 = lr.readline()
1164 if not l2.startswith('+++'):
1147 if not l2.startswith('+++'):
1165 lr.push(l2)
1148 lr.push(l2)
1166 continue
1149 continue
1167 newfile = True
1150 newfile = True
1168 context = False
1151 context = False
1169 afile = parsefilename(x)
1152 afile = parsefilename(x)
1170 bfile = parsefilename(l2)
1153 bfile = parsefilename(l2)
1171 elif x.startswith('***'):
1154 elif x.startswith('***'):
1172 # check for a context diff
1155 # check for a context diff
1173 l2 = lr.readline()
1156 l2 = lr.readline()
1174 if not l2.startswith('---'):
1157 if not l2.startswith('---'):
1175 lr.push(l2)
1158 lr.push(l2)
1176 continue
1159 continue
1177 l3 = lr.readline()
1160 l3 = lr.readline()
1178 lr.push(l3)
1161 lr.push(l3)
1179 if not l3.startswith("***************"):
1162 if not l3.startswith("***************"):
1180 lr.push(l2)
1163 lr.push(l2)
1181 continue
1164 continue
1182 newfile = True
1165 newfile = True
1183 context = True
1166 context = True
1184 afile = parsefilename(x)
1167 afile = parsefilename(x)
1185 bfile = parsefilename(l2)
1168 bfile = parsefilename(l2)
1186
1169
1187 if newfile:
1170 if newfile:
1188 newfile = False
1171 newfile = False
1189 emitfile = True
1172 emitfile = True
1190 state = BFILE
1173 state = BFILE
1191 hunknum = 0
1174 hunknum = 0
1192
1175
1193 while gitpatches:
1176 while gitpatches:
1194 gp = gitpatches.pop()[1]
1177 gp = gitpatches.pop()[1]
1195 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1178 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1196
1179
1197 def applydiff(ui, fp, changed, backend, strip=1, eolmode='strict'):
1180 def applydiff(ui, fp, changed, backend, strip=1, eolmode='strict'):
1198 """Reads a patch from fp and tries to apply it.
1181 """Reads a patch from fp and tries to apply it.
1199
1182
1200 The dict 'changed' is filled in with all of the filenames changed
1183 The dict 'changed' is filled in with all of the filenames changed
1201 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1184 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1202 found and 1 if there was any fuzz.
1185 found and 1 if there was any fuzz.
1203
1186
1204 If 'eolmode' is 'strict', the patch content and patched file are
1187 If 'eolmode' is 'strict', the patch content and patched file are
1205 read in binary mode. Otherwise, line endings are ignored when
1188 read in binary mode. Otherwise, line endings are ignored when
1206 patching then normalized according to 'eolmode'.
1189 patching then normalized according to 'eolmode'.
1207 """
1190 """
1208 return _applydiff(ui, fp, patchfile, backend, changed, strip=strip,
1191 return _applydiff(ui, fp, patchfile, backend, changed, strip=strip,
1209 eolmode=eolmode)
1192 eolmode=eolmode)
1210
1193
1211 def _applydiff(ui, fp, patcher, backend, changed, strip=1, eolmode='strict'):
1194 def _applydiff(ui, fp, patcher, backend, changed, strip=1, eolmode='strict'):
1212
1195
1213 def pstrip(p):
1196 def pstrip(p):
1214 return pathstrip(p, strip - 1)[1]
1197 return pathstrip(p, strip - 1)[1]
1215
1198
1216 rejects = 0
1199 rejects = 0
1217 err = 0
1200 err = 0
1218 current_file = None
1201 current_file = None
1219
1202
1220 for state, values in iterhunks(fp):
1203 for state, values in iterhunks(fp):
1221 if state == 'hunk':
1204 if state == 'hunk':
1222 if not current_file:
1205 if not current_file:
1223 continue
1206 continue
1224 ret = current_file.apply(values)
1207 ret = current_file.apply(values)
1225 if ret >= 0:
1208 if ret >= 0:
1226 changed.setdefault(current_file.fname, None)
1209 changed.setdefault(current_file.fname, None)
1227 if ret > 0:
1210 if ret > 0:
1228 err = 1
1211 err = 1
1229 elif state == 'file':
1212 elif state == 'file':
1230 if current_file:
1213 if current_file:
1231 rejects += current_file.close()
1214 rejects += current_file.close()
1232 current_file = None
1215 current_file = None
1233 afile, bfile, first_hunk, gp = values
1216 afile, bfile, first_hunk, gp = values
1234 if gp:
1217 if gp:
1235 path = pstrip(gp.path)
1218 path = pstrip(gp.path)
1236 changed[path] = gp
1219 changed[path] = gp
1237 if gp.op == 'DELETE':
1220 if gp.op == 'DELETE':
1238 backend.unlink(path)
1221 backend.unlink(path)
1239 continue
1222 continue
1240 if gp.op == 'RENAME':
1223 if gp.op == 'RENAME':
1241 backend.unlink(pstrip(gp.oldpath))
1224 backend.unlink(pstrip(gp.oldpath))
1242 if gp.mode and not first_hunk:
1225 if gp.mode and not first_hunk:
1243 data = None
1226 data = None
1244 if gp.op == 'ADD':
1227 if gp.op == 'ADD':
1245 # Added files without content have no hunk and
1228 # Added files without content have no hunk and
1246 # must be created
1229 # must be created
1247 data = ''
1230 data = ''
1248 backend.setfile(path, data, gp.mode)
1231 backend.setfile(path, data, gp.mode)
1249 if not first_hunk:
1232 if not first_hunk:
1250 continue
1233 continue
1251 try:
1234 try:
1252 mode = gp and gp.mode or None
1235 mode = gp and gp.mode or None
1253 current_file, missing = selectfile(backend, afile, bfile,
1236 current_file, missing, create, remove = selectfile(
1254 first_hunk, strip, gp)
1237 backend, afile, bfile, first_hunk, strip, gp)
1255 current_file = patcher(ui, current_file, backend, mode,
1238 current_file = patcher(ui, current_file, backend, mode,
1256 missing=missing, eolmode=eolmode)
1239 create, remove, missing=missing,
1240 eolmode=eolmode)
1257 except PatchError, inst:
1241 except PatchError, inst:
1258 ui.warn(str(inst) + '\n')
1242 ui.warn(str(inst) + '\n')
1259 current_file = None
1243 current_file = None
1260 rejects += 1
1244 rejects += 1
1261 continue
1245 continue
1262 elif state == 'git':
1246 elif state == 'git':
1263 for gp in values:
1247 for gp in values:
1264 backend.copy(pstrip(gp.oldpath), pstrip(gp.path))
1248 backend.copy(pstrip(gp.oldpath), pstrip(gp.path))
1265 else:
1249 else:
1266 raise util.Abort(_('unsupported parser state: %s') % state)
1250 raise util.Abort(_('unsupported parser state: %s') % state)
1267
1251
1268 if current_file:
1252 if current_file:
1269 rejects += current_file.close()
1253 rejects += current_file.close()
1270
1254
1271 if rejects:
1255 if rejects:
1272 return -1
1256 return -1
1273 return err
1257 return err
1274
1258
1275 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1259 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1276 similarity):
1260 similarity):
1277 """use <patcher> to apply <patchname> to the working directory.
1261 """use <patcher> to apply <patchname> to the working directory.
1278 returns whether patch was applied with fuzz factor."""
1262 returns whether patch was applied with fuzz factor."""
1279
1263
1280 fuzz = False
1264 fuzz = False
1281 args = []
1265 args = []
1282 cwd = repo.root
1266 cwd = repo.root
1283 if cwd:
1267 if cwd:
1284 args.append('-d %s' % util.shellquote(cwd))
1268 args.append('-d %s' % util.shellquote(cwd))
1285 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1269 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1286 util.shellquote(patchname)))
1270 util.shellquote(patchname)))
1287 try:
1271 try:
1288 for line in fp:
1272 for line in fp:
1289 line = line.rstrip()
1273 line = line.rstrip()
1290 ui.note(line + '\n')
1274 ui.note(line + '\n')
1291 if line.startswith('patching file '):
1275 if line.startswith('patching file '):
1292 pf = util.parsepatchoutput(line)
1276 pf = util.parsepatchoutput(line)
1293 printed_file = False
1277 printed_file = False
1294 files.setdefault(pf, None)
1278 files.setdefault(pf, None)
1295 elif line.find('with fuzz') >= 0:
1279 elif line.find('with fuzz') >= 0:
1296 fuzz = True
1280 fuzz = True
1297 if not printed_file:
1281 if not printed_file:
1298 ui.warn(pf + '\n')
1282 ui.warn(pf + '\n')
1299 printed_file = True
1283 printed_file = True
1300 ui.warn(line + '\n')
1284 ui.warn(line + '\n')
1301 elif line.find('saving rejects to file') >= 0:
1285 elif line.find('saving rejects to file') >= 0:
1302 ui.warn(line + '\n')
1286 ui.warn(line + '\n')
1303 elif line.find('FAILED') >= 0:
1287 elif line.find('FAILED') >= 0:
1304 if not printed_file:
1288 if not printed_file:
1305 ui.warn(pf + '\n')
1289 ui.warn(pf + '\n')
1306 printed_file = True
1290 printed_file = True
1307 ui.warn(line + '\n')
1291 ui.warn(line + '\n')
1308 finally:
1292 finally:
1309 if files:
1293 if files:
1310 cfiles = list(files)
1294 cfiles = list(files)
1311 cwd = repo.getcwd()
1295 cwd = repo.getcwd()
1312 if cwd:
1296 if cwd:
1313 cfiles = [util.pathto(repo.root, cwd, f)
1297 cfiles = [util.pathto(repo.root, cwd, f)
1314 for f in cfile]
1298 for f in cfile]
1315 scmutil.addremove(repo, cfiles, similarity=similarity)
1299 scmutil.addremove(repo, cfiles, similarity=similarity)
1316 code = fp.close()
1300 code = fp.close()
1317 if code:
1301 if code:
1318 raise PatchError(_("patch command failed: %s") %
1302 raise PatchError(_("patch command failed: %s") %
1319 util.explainexit(code)[0])
1303 util.explainexit(code)[0])
1320 return fuzz
1304 return fuzz
1321
1305
1322 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1306 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1323 similarity=0):
1307 similarity=0):
1324 """use builtin patch to apply <patchobj> to the working directory.
1308 """use builtin patch to apply <patchobj> to the working directory.
1325 returns whether patch was applied with fuzz factor."""
1309 returns whether patch was applied with fuzz factor."""
1326
1310
1327 if files is None:
1311 if files is None:
1328 files = {}
1312 files = {}
1329 if eolmode is None:
1313 if eolmode is None:
1330 eolmode = ui.config('patch', 'eol', 'strict')
1314 eolmode = ui.config('patch', 'eol', 'strict')
1331 if eolmode.lower() not in eolmodes:
1315 if eolmode.lower() not in eolmodes:
1332 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1316 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1333 eolmode = eolmode.lower()
1317 eolmode = eolmode.lower()
1334
1318
1335 backend = workingbackend(ui, repo, similarity)
1319 backend = workingbackend(ui, repo, similarity)
1336 try:
1320 try:
1337 fp = open(patchobj, 'rb')
1321 fp = open(patchobj, 'rb')
1338 except TypeError:
1322 except TypeError:
1339 fp = patchobj
1323 fp = patchobj
1340 try:
1324 try:
1341 ret = applydiff(ui, fp, files, backend, strip=strip, eolmode=eolmode)
1325 ret = applydiff(ui, fp, files, backend, strip=strip, eolmode=eolmode)
1342 finally:
1326 finally:
1343 if fp != patchobj:
1327 if fp != patchobj:
1344 fp.close()
1328 fp.close()
1345 files.update(dict.fromkeys(backend.close()))
1329 files.update(dict.fromkeys(backend.close()))
1346 if ret < 0:
1330 if ret < 0:
1347 raise PatchError(_('patch failed to apply'))
1331 raise PatchError(_('patch failed to apply'))
1348 return ret > 0
1332 return ret > 0
1349
1333
1350 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1334 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1351 similarity=0):
1335 similarity=0):
1352 """Apply <patchname> to the working directory.
1336 """Apply <patchname> to the working directory.
1353
1337
1354 'eolmode' specifies how end of lines should be handled. It can be:
1338 'eolmode' specifies how end of lines should be handled. It can be:
1355 - 'strict': inputs are read in binary mode, EOLs are preserved
1339 - 'strict': inputs are read in binary mode, EOLs are preserved
1356 - 'crlf': EOLs are ignored when patching and reset to CRLF
1340 - 'crlf': EOLs are ignored when patching and reset to CRLF
1357 - 'lf': EOLs are ignored when patching and reset to LF
1341 - 'lf': EOLs are ignored when patching and reset to LF
1358 - None: get it from user settings, default to 'strict'
1342 - None: get it from user settings, default to 'strict'
1359 'eolmode' is ignored when using an external patcher program.
1343 'eolmode' is ignored when using an external patcher program.
1360
1344
1361 Returns whether patch was applied with fuzz factor.
1345 Returns whether patch was applied with fuzz factor.
1362 """
1346 """
1363 patcher = ui.config('ui', 'patch')
1347 patcher = ui.config('ui', 'patch')
1364 if files is None:
1348 if files is None:
1365 files = {}
1349 files = {}
1366 try:
1350 try:
1367 if patcher:
1351 if patcher:
1368 return _externalpatch(ui, repo, patcher, patchname, strip,
1352 return _externalpatch(ui, repo, patcher, patchname, strip,
1369 files, similarity)
1353 files, similarity)
1370 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1354 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1371 similarity)
1355 similarity)
1372 except PatchError, err:
1356 except PatchError, err:
1373 raise util.Abort(str(err))
1357 raise util.Abort(str(err))
1374
1358
1375 def changedfiles(ui, repo, patchpath, strip=1):
1359 def changedfiles(ui, repo, patchpath, strip=1):
1376 backend = fsbackend(ui, repo.root)
1360 backend = fsbackend(ui, repo.root)
1377 fp = open(patchpath, 'rb')
1361 fp = open(patchpath, 'rb')
1378 try:
1362 try:
1379 changed = set()
1363 changed = set()
1380 for state, values in iterhunks(fp):
1364 for state, values in iterhunks(fp):
1381 if state == 'file':
1365 if state == 'file':
1382 afile, bfile, first_hunk, gp = values
1366 afile, bfile, first_hunk, gp = values
1383 if gp:
1367 if gp:
1384 changed.add(pathstrip(gp.path, strip - 1)[1])
1368 changed.add(pathstrip(gp.path, strip - 1)[1])
1385 if gp.op == 'RENAME':
1369 if gp.op == 'RENAME':
1386 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1370 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1387 if not first_hunk:
1371 if not first_hunk:
1388 continue
1372 continue
1389 current_file, missing = selectfile(backend, afile, bfile,
1373 current_file, missing, create, remove = selectfile(
1390 first_hunk, strip, gp)
1374 backend, afile, bfile, first_hunk, strip, gp)
1391 changed.add(current_file)
1375 changed.add(current_file)
1392 elif state not in ('hunk', 'git'):
1376 elif state not in ('hunk', 'git'):
1393 raise util.Abort(_('unsupported parser state: %s') % state)
1377 raise util.Abort(_('unsupported parser state: %s') % state)
1394 return changed
1378 return changed
1395 finally:
1379 finally:
1396 fp.close()
1380 fp.close()
1397
1381
1398 def b85diff(to, tn):
1382 def b85diff(to, tn):
1399 '''print base85-encoded binary diff'''
1383 '''print base85-encoded binary diff'''
1400 def gitindex(text):
1384 def gitindex(text):
1401 if not text:
1385 if not text:
1402 return hex(nullid)
1386 return hex(nullid)
1403 l = len(text)
1387 l = len(text)
1404 s = util.sha1('blob %d\0' % l)
1388 s = util.sha1('blob %d\0' % l)
1405 s.update(text)
1389 s.update(text)
1406 return s.hexdigest()
1390 return s.hexdigest()
1407
1391
1408 def fmtline(line):
1392 def fmtline(line):
1409 l = len(line)
1393 l = len(line)
1410 if l <= 26:
1394 if l <= 26:
1411 l = chr(ord('A') + l - 1)
1395 l = chr(ord('A') + l - 1)
1412 else:
1396 else:
1413 l = chr(l - 26 + ord('a') - 1)
1397 l = chr(l - 26 + ord('a') - 1)
1414 return '%c%s\n' % (l, base85.b85encode(line, True))
1398 return '%c%s\n' % (l, base85.b85encode(line, True))
1415
1399
1416 def chunk(text, csize=52):
1400 def chunk(text, csize=52):
1417 l = len(text)
1401 l = len(text)
1418 i = 0
1402 i = 0
1419 while i < l:
1403 while i < l:
1420 yield text[i:i + csize]
1404 yield text[i:i + csize]
1421 i += csize
1405 i += csize
1422
1406
1423 tohash = gitindex(to)
1407 tohash = gitindex(to)
1424 tnhash = gitindex(tn)
1408 tnhash = gitindex(tn)
1425 if tohash == tnhash:
1409 if tohash == tnhash:
1426 return ""
1410 return ""
1427
1411
1428 # TODO: deltas
1412 # TODO: deltas
1429 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1413 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1430 (tohash, tnhash, len(tn))]
1414 (tohash, tnhash, len(tn))]
1431 for l in chunk(zlib.compress(tn)):
1415 for l in chunk(zlib.compress(tn)):
1432 ret.append(fmtline(l))
1416 ret.append(fmtline(l))
1433 ret.append('\n')
1417 ret.append('\n')
1434 return ''.join(ret)
1418 return ''.join(ret)
1435
1419
1436 class GitDiffRequired(Exception):
1420 class GitDiffRequired(Exception):
1437 pass
1421 pass
1438
1422
1439 def diffopts(ui, opts=None, untrusted=False):
1423 def diffopts(ui, opts=None, untrusted=False):
1440 def get(key, name=None, getter=ui.configbool):
1424 def get(key, name=None, getter=ui.configbool):
1441 return ((opts and opts.get(key)) or
1425 return ((opts and opts.get(key)) or
1442 getter('diff', name or key, None, untrusted=untrusted))
1426 getter('diff', name or key, None, untrusted=untrusted))
1443 return mdiff.diffopts(
1427 return mdiff.diffopts(
1444 text=opts and opts.get('text'),
1428 text=opts and opts.get('text'),
1445 git=get('git'),
1429 git=get('git'),
1446 nodates=get('nodates'),
1430 nodates=get('nodates'),
1447 showfunc=get('show_function', 'showfunc'),
1431 showfunc=get('show_function', 'showfunc'),
1448 ignorews=get('ignore_all_space', 'ignorews'),
1432 ignorews=get('ignore_all_space', 'ignorews'),
1449 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1433 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1450 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1434 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1451 context=get('unified', getter=ui.config))
1435 context=get('unified', getter=ui.config))
1452
1436
1453 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1437 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1454 losedatafn=None, prefix=''):
1438 losedatafn=None, prefix=''):
1455 '''yields diff of changes to files between two nodes, or node and
1439 '''yields diff of changes to files between two nodes, or node and
1456 working directory.
1440 working directory.
1457
1441
1458 if node1 is None, use first dirstate parent instead.
1442 if node1 is None, use first dirstate parent instead.
1459 if node2 is None, compare node1 with working directory.
1443 if node2 is None, compare node1 with working directory.
1460
1444
1461 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1445 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1462 every time some change cannot be represented with the current
1446 every time some change cannot be represented with the current
1463 patch format. Return False to upgrade to git patch format, True to
1447 patch format. Return False to upgrade to git patch format, True to
1464 accept the loss or raise an exception to abort the diff. It is
1448 accept the loss or raise an exception to abort the diff. It is
1465 called with the name of current file being diffed as 'fn'. If set
1449 called with the name of current file being diffed as 'fn'. If set
1466 to None, patches will always be upgraded to git format when
1450 to None, patches will always be upgraded to git format when
1467 necessary.
1451 necessary.
1468
1452
1469 prefix is a filename prefix that is prepended to all filenames on
1453 prefix is a filename prefix that is prepended to all filenames on
1470 display (used for subrepos).
1454 display (used for subrepos).
1471 '''
1455 '''
1472
1456
1473 if opts is None:
1457 if opts is None:
1474 opts = mdiff.defaultopts
1458 opts = mdiff.defaultopts
1475
1459
1476 if not node1 and not node2:
1460 if not node1 and not node2:
1477 node1 = repo.dirstate.p1()
1461 node1 = repo.dirstate.p1()
1478
1462
1479 def lrugetfilectx():
1463 def lrugetfilectx():
1480 cache = {}
1464 cache = {}
1481 order = []
1465 order = []
1482 def getfilectx(f, ctx):
1466 def getfilectx(f, ctx):
1483 fctx = ctx.filectx(f, filelog=cache.get(f))
1467 fctx = ctx.filectx(f, filelog=cache.get(f))
1484 if f not in cache:
1468 if f not in cache:
1485 if len(cache) > 20:
1469 if len(cache) > 20:
1486 del cache[order.pop(0)]
1470 del cache[order.pop(0)]
1487 cache[f] = fctx.filelog()
1471 cache[f] = fctx.filelog()
1488 else:
1472 else:
1489 order.remove(f)
1473 order.remove(f)
1490 order.append(f)
1474 order.append(f)
1491 return fctx
1475 return fctx
1492 return getfilectx
1476 return getfilectx
1493 getfilectx = lrugetfilectx()
1477 getfilectx = lrugetfilectx()
1494
1478
1495 ctx1 = repo[node1]
1479 ctx1 = repo[node1]
1496 ctx2 = repo[node2]
1480 ctx2 = repo[node2]
1497
1481
1498 if not changes:
1482 if not changes:
1499 changes = repo.status(ctx1, ctx2, match=match)
1483 changes = repo.status(ctx1, ctx2, match=match)
1500 modified, added, removed = changes[:3]
1484 modified, added, removed = changes[:3]
1501
1485
1502 if not modified and not added and not removed:
1486 if not modified and not added and not removed:
1503 return []
1487 return []
1504
1488
1505 revs = None
1489 revs = None
1506 if not repo.ui.quiet:
1490 if not repo.ui.quiet:
1507 hexfunc = repo.ui.debugflag and hex or short
1491 hexfunc = repo.ui.debugflag and hex or short
1508 revs = [hexfunc(node) for node in [node1, node2] if node]
1492 revs = [hexfunc(node) for node in [node1, node2] if node]
1509
1493
1510 copy = {}
1494 copy = {}
1511 if opts.git or opts.upgrade:
1495 if opts.git or opts.upgrade:
1512 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1496 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1513
1497
1514 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1498 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1515 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1499 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1516 if opts.upgrade and not opts.git:
1500 if opts.upgrade and not opts.git:
1517 try:
1501 try:
1518 def losedata(fn):
1502 def losedata(fn):
1519 if not losedatafn or not losedatafn(fn=fn):
1503 if not losedatafn or not losedatafn(fn=fn):
1520 raise GitDiffRequired()
1504 raise GitDiffRequired()
1521 # Buffer the whole output until we are sure it can be generated
1505 # Buffer the whole output until we are sure it can be generated
1522 return list(difffn(opts.copy(git=False), losedata))
1506 return list(difffn(opts.copy(git=False), losedata))
1523 except GitDiffRequired:
1507 except GitDiffRequired:
1524 return difffn(opts.copy(git=True), None)
1508 return difffn(opts.copy(git=True), None)
1525 else:
1509 else:
1526 return difffn(opts, None)
1510 return difffn(opts, None)
1527
1511
1528 def difflabel(func, *args, **kw):
1512 def difflabel(func, *args, **kw):
1529 '''yields 2-tuples of (output, label) based on the output of func()'''
1513 '''yields 2-tuples of (output, label) based on the output of func()'''
1530 prefixes = [('diff', 'diff.diffline'),
1514 prefixes = [('diff', 'diff.diffline'),
1531 ('copy', 'diff.extended'),
1515 ('copy', 'diff.extended'),
1532 ('rename', 'diff.extended'),
1516 ('rename', 'diff.extended'),
1533 ('old', 'diff.extended'),
1517 ('old', 'diff.extended'),
1534 ('new', 'diff.extended'),
1518 ('new', 'diff.extended'),
1535 ('deleted', 'diff.extended'),
1519 ('deleted', 'diff.extended'),
1536 ('---', 'diff.file_a'),
1520 ('---', 'diff.file_a'),
1537 ('+++', 'diff.file_b'),
1521 ('+++', 'diff.file_b'),
1538 ('@@', 'diff.hunk'),
1522 ('@@', 'diff.hunk'),
1539 ('-', 'diff.deleted'),
1523 ('-', 'diff.deleted'),
1540 ('+', 'diff.inserted')]
1524 ('+', 'diff.inserted')]
1541
1525
1542 for chunk in func(*args, **kw):
1526 for chunk in func(*args, **kw):
1543 lines = chunk.split('\n')
1527 lines = chunk.split('\n')
1544 for i, line in enumerate(lines):
1528 for i, line in enumerate(lines):
1545 if i != 0:
1529 if i != 0:
1546 yield ('\n', '')
1530 yield ('\n', '')
1547 stripline = line
1531 stripline = line
1548 if line and line[0] in '+-':
1532 if line and line[0] in '+-':
1549 # highlight trailing whitespace, but only in changed lines
1533 # highlight trailing whitespace, but only in changed lines
1550 stripline = line.rstrip()
1534 stripline = line.rstrip()
1551 for prefix, label in prefixes:
1535 for prefix, label in prefixes:
1552 if stripline.startswith(prefix):
1536 if stripline.startswith(prefix):
1553 yield (stripline, label)
1537 yield (stripline, label)
1554 break
1538 break
1555 else:
1539 else:
1556 yield (line, '')
1540 yield (line, '')
1557 if line != stripline:
1541 if line != stripline:
1558 yield (line[len(stripline):], 'diff.trailingwhitespace')
1542 yield (line[len(stripline):], 'diff.trailingwhitespace')
1559
1543
1560 def diffui(*args, **kw):
1544 def diffui(*args, **kw):
1561 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1545 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1562 return difflabel(diff, *args, **kw)
1546 return difflabel(diff, *args, **kw)
1563
1547
1564
1548
1565 def _addmodehdr(header, omode, nmode):
1549 def _addmodehdr(header, omode, nmode):
1566 if omode != nmode:
1550 if omode != nmode:
1567 header.append('old mode %s\n' % omode)
1551 header.append('old mode %s\n' % omode)
1568 header.append('new mode %s\n' % nmode)
1552 header.append('new mode %s\n' % nmode)
1569
1553
1570 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1554 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1571 copy, getfilectx, opts, losedatafn, prefix):
1555 copy, getfilectx, opts, losedatafn, prefix):
1572
1556
1573 def join(f):
1557 def join(f):
1574 return os.path.join(prefix, f)
1558 return os.path.join(prefix, f)
1575
1559
1576 date1 = util.datestr(ctx1.date())
1560 date1 = util.datestr(ctx1.date())
1577 man1 = ctx1.manifest()
1561 man1 = ctx1.manifest()
1578
1562
1579 gone = set()
1563 gone = set()
1580 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1564 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1581
1565
1582 copyto = dict([(v, k) for k, v in copy.items()])
1566 copyto = dict([(v, k) for k, v in copy.items()])
1583
1567
1584 if opts.git:
1568 if opts.git:
1585 revs = None
1569 revs = None
1586
1570
1587 for f in sorted(modified + added + removed):
1571 for f in sorted(modified + added + removed):
1588 to = None
1572 to = None
1589 tn = None
1573 tn = None
1590 dodiff = True
1574 dodiff = True
1591 header = []
1575 header = []
1592 if f in man1:
1576 if f in man1:
1593 to = getfilectx(f, ctx1).data()
1577 to = getfilectx(f, ctx1).data()
1594 if f not in removed:
1578 if f not in removed:
1595 tn = getfilectx(f, ctx2).data()
1579 tn = getfilectx(f, ctx2).data()
1596 a, b = f, f
1580 a, b = f, f
1597 if opts.git or losedatafn:
1581 if opts.git or losedatafn:
1598 if f in added:
1582 if f in added:
1599 mode = gitmode[ctx2.flags(f)]
1583 mode = gitmode[ctx2.flags(f)]
1600 if f in copy or f in copyto:
1584 if f in copy or f in copyto:
1601 if opts.git:
1585 if opts.git:
1602 if f in copy:
1586 if f in copy:
1603 a = copy[f]
1587 a = copy[f]
1604 else:
1588 else:
1605 a = copyto[f]
1589 a = copyto[f]
1606 omode = gitmode[man1.flags(a)]
1590 omode = gitmode[man1.flags(a)]
1607 _addmodehdr(header, omode, mode)
1591 _addmodehdr(header, omode, mode)
1608 if a in removed and a not in gone:
1592 if a in removed and a not in gone:
1609 op = 'rename'
1593 op = 'rename'
1610 gone.add(a)
1594 gone.add(a)
1611 else:
1595 else:
1612 op = 'copy'
1596 op = 'copy'
1613 header.append('%s from %s\n' % (op, join(a)))
1597 header.append('%s from %s\n' % (op, join(a)))
1614 header.append('%s to %s\n' % (op, join(f)))
1598 header.append('%s to %s\n' % (op, join(f)))
1615 to = getfilectx(a, ctx1).data()
1599 to = getfilectx(a, ctx1).data()
1616 else:
1600 else:
1617 losedatafn(f)
1601 losedatafn(f)
1618 else:
1602 else:
1619 if opts.git:
1603 if opts.git:
1620 header.append('new file mode %s\n' % mode)
1604 header.append('new file mode %s\n' % mode)
1621 elif ctx2.flags(f):
1605 elif ctx2.flags(f):
1622 losedatafn(f)
1606 losedatafn(f)
1623 # In theory, if tn was copied or renamed we should check
1607 # In theory, if tn was copied or renamed we should check
1624 # if the source is binary too but the copy record already
1608 # if the source is binary too but the copy record already
1625 # forces git mode.
1609 # forces git mode.
1626 if util.binary(tn):
1610 if util.binary(tn):
1627 if opts.git:
1611 if opts.git:
1628 dodiff = 'binary'
1612 dodiff = 'binary'
1629 else:
1613 else:
1630 losedatafn(f)
1614 losedatafn(f)
1631 if not opts.git and not tn:
1615 if not opts.git and not tn:
1632 # regular diffs cannot represent new empty file
1616 # regular diffs cannot represent new empty file
1633 losedatafn(f)
1617 losedatafn(f)
1634 elif f in removed:
1618 elif f in removed:
1635 if opts.git:
1619 if opts.git:
1636 # have we already reported a copy above?
1620 # have we already reported a copy above?
1637 if ((f in copy and copy[f] in added
1621 if ((f in copy and copy[f] in added
1638 and copyto[copy[f]] == f) or
1622 and copyto[copy[f]] == f) or
1639 (f in copyto and copyto[f] in added
1623 (f in copyto and copyto[f] in added
1640 and copy[copyto[f]] == f)):
1624 and copy[copyto[f]] == f)):
1641 dodiff = False
1625 dodiff = False
1642 else:
1626 else:
1643 header.append('deleted file mode %s\n' %
1627 header.append('deleted file mode %s\n' %
1644 gitmode[man1.flags(f)])
1628 gitmode[man1.flags(f)])
1645 elif not to or util.binary(to):
1629 elif not to or util.binary(to):
1646 # regular diffs cannot represent empty file deletion
1630 # regular diffs cannot represent empty file deletion
1647 losedatafn(f)
1631 losedatafn(f)
1648 else:
1632 else:
1649 oflag = man1.flags(f)
1633 oflag = man1.flags(f)
1650 nflag = ctx2.flags(f)
1634 nflag = ctx2.flags(f)
1651 binary = util.binary(to) or util.binary(tn)
1635 binary = util.binary(to) or util.binary(tn)
1652 if opts.git:
1636 if opts.git:
1653 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1637 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1654 if binary:
1638 if binary:
1655 dodiff = 'binary'
1639 dodiff = 'binary'
1656 elif binary or nflag != oflag:
1640 elif binary or nflag != oflag:
1657 losedatafn(f)
1641 losedatafn(f)
1658 if opts.git:
1642 if opts.git:
1659 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1643 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1660
1644
1661 if dodiff:
1645 if dodiff:
1662 if dodiff == 'binary':
1646 if dodiff == 'binary':
1663 text = b85diff(to, tn)
1647 text = b85diff(to, tn)
1664 else:
1648 else:
1665 text = mdiff.unidiff(to, date1,
1649 text = mdiff.unidiff(to, date1,
1666 # ctx2 date may be dynamic
1650 # ctx2 date may be dynamic
1667 tn, util.datestr(ctx2.date()),
1651 tn, util.datestr(ctx2.date()),
1668 join(a), join(b), revs, opts=opts)
1652 join(a), join(b), revs, opts=opts)
1669 if header and (text or len(header) > 1):
1653 if header and (text or len(header) > 1):
1670 yield ''.join(header)
1654 yield ''.join(header)
1671 if text:
1655 if text:
1672 yield text
1656 yield text
1673
1657
1674 def diffstatsum(stats):
1658 def diffstatsum(stats):
1675 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1659 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1676 for f, a, r, b in stats:
1660 for f, a, r, b in stats:
1677 maxfile = max(maxfile, encoding.colwidth(f))
1661 maxfile = max(maxfile, encoding.colwidth(f))
1678 maxtotal = max(maxtotal, a + r)
1662 maxtotal = max(maxtotal, a + r)
1679 addtotal += a
1663 addtotal += a
1680 removetotal += r
1664 removetotal += r
1681 binary = binary or b
1665 binary = binary or b
1682
1666
1683 return maxfile, maxtotal, addtotal, removetotal, binary
1667 return maxfile, maxtotal, addtotal, removetotal, binary
1684
1668
1685 def diffstatdata(lines):
1669 def diffstatdata(lines):
1686 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1670 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1687
1671
1688 results = []
1672 results = []
1689 filename, adds, removes = None, 0, 0
1673 filename, adds, removes = None, 0, 0
1690
1674
1691 def addresult():
1675 def addresult():
1692 if filename:
1676 if filename:
1693 isbinary = adds == 0 and removes == 0
1677 isbinary = adds == 0 and removes == 0
1694 results.append((filename, adds, removes, isbinary))
1678 results.append((filename, adds, removes, isbinary))
1695
1679
1696 for line in lines:
1680 for line in lines:
1697 if line.startswith('diff'):
1681 if line.startswith('diff'):
1698 addresult()
1682 addresult()
1699 # set numbers to 0 anyway when starting new file
1683 # set numbers to 0 anyway when starting new file
1700 adds, removes = 0, 0
1684 adds, removes = 0, 0
1701 if line.startswith('diff --git'):
1685 if line.startswith('diff --git'):
1702 filename = gitre.search(line).group(1)
1686 filename = gitre.search(line).group(1)
1703 elif line.startswith('diff -r'):
1687 elif line.startswith('diff -r'):
1704 # format: "diff -r ... -r ... filename"
1688 # format: "diff -r ... -r ... filename"
1705 filename = diffre.search(line).group(1)
1689 filename = diffre.search(line).group(1)
1706 elif line.startswith('+') and not line.startswith('+++'):
1690 elif line.startswith('+') and not line.startswith('+++'):
1707 adds += 1
1691 adds += 1
1708 elif line.startswith('-') and not line.startswith('---'):
1692 elif line.startswith('-') and not line.startswith('---'):
1709 removes += 1
1693 removes += 1
1710 addresult()
1694 addresult()
1711 return results
1695 return results
1712
1696
1713 def diffstat(lines, width=80, git=False):
1697 def diffstat(lines, width=80, git=False):
1714 output = []
1698 output = []
1715 stats = diffstatdata(lines)
1699 stats = diffstatdata(lines)
1716 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1700 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1717
1701
1718 countwidth = len(str(maxtotal))
1702 countwidth = len(str(maxtotal))
1719 if hasbinary and countwidth < 3:
1703 if hasbinary and countwidth < 3:
1720 countwidth = 3
1704 countwidth = 3
1721 graphwidth = width - countwidth - maxname - 6
1705 graphwidth = width - countwidth - maxname - 6
1722 if graphwidth < 10:
1706 if graphwidth < 10:
1723 graphwidth = 10
1707 graphwidth = 10
1724
1708
1725 def scale(i):
1709 def scale(i):
1726 if maxtotal <= graphwidth:
1710 if maxtotal <= graphwidth:
1727 return i
1711 return i
1728 # If diffstat runs out of room it doesn't print anything,
1712 # If diffstat runs out of room it doesn't print anything,
1729 # which isn't very useful, so always print at least one + or -
1713 # which isn't very useful, so always print at least one + or -
1730 # if there were at least some changes.
1714 # if there were at least some changes.
1731 return max(i * graphwidth // maxtotal, int(bool(i)))
1715 return max(i * graphwidth // maxtotal, int(bool(i)))
1732
1716
1733 for filename, adds, removes, isbinary in stats:
1717 for filename, adds, removes, isbinary in stats:
1734 if git and isbinary:
1718 if git and isbinary:
1735 count = 'Bin'
1719 count = 'Bin'
1736 else:
1720 else:
1737 count = adds + removes
1721 count = adds + removes
1738 pluses = '+' * scale(adds)
1722 pluses = '+' * scale(adds)
1739 minuses = '-' * scale(removes)
1723 minuses = '-' * scale(removes)
1740 output.append(' %s%s | %*s %s%s\n' %
1724 output.append(' %s%s | %*s %s%s\n' %
1741 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1725 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1742 countwidth, count, pluses, minuses))
1726 countwidth, count, pluses, minuses))
1743
1727
1744 if stats:
1728 if stats:
1745 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1729 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1746 % (len(stats), totaladds, totalremoves))
1730 % (len(stats), totaladds, totalremoves))
1747
1731
1748 return ''.join(output)
1732 return ''.join(output)
1749
1733
1750 def diffstatui(*args, **kw):
1734 def diffstatui(*args, **kw):
1751 '''like diffstat(), but yields 2-tuples of (output, label) for
1735 '''like diffstat(), but yields 2-tuples of (output, label) for
1752 ui.write()
1736 ui.write()
1753 '''
1737 '''
1754
1738
1755 for line in diffstat(*args, **kw).splitlines():
1739 for line in diffstat(*args, **kw).splitlines():
1756 if line and line[-1] in '+-':
1740 if line and line[-1] in '+-':
1757 name, graph = line.rsplit(' ', 1)
1741 name, graph = line.rsplit(' ', 1)
1758 yield (name + ' ', '')
1742 yield (name + ' ', '')
1759 m = re.search(r'\++', graph)
1743 m = re.search(r'\++', graph)
1760 if m:
1744 if m:
1761 yield (m.group(0), 'diffstat.inserted')
1745 yield (m.group(0), 'diffstat.inserted')
1762 m = re.search(r'-+', graph)
1746 m = re.search(r'-+', graph)
1763 if m:
1747 if m:
1764 yield (m.group(0), 'diffstat.deleted')
1748 yield (m.group(0), 'diffstat.deleted')
1765 else:
1749 else:
1766 yield (line, '')
1750 yield (line, '')
1767 yield ('\n', '')
1751 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now