##// END OF EJS Templates
merge with stable
Matt Mackall -
r15591:97fc5eec merge default
parent child Browse files
Show More
@@ -1,477 +1,475 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --to-normal to convert largefiles back to normal files; after
38 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['to_normal']:
41 if opts['to_normal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
46
47 if not hg.islocal(src):
47 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
49 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
51
52 rsrc = hg.repository(ui, src)
52 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
53 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
54 rdst = hg.repository(ui, dest, create=True)
55
55
56 success = False
56 success = False
57 try:
57 try:
58 # Lock destination to prevent modification while it is converted to.
58 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
59 # Don't need to lock src because we are just reading from its history
60 # which can't change.
60 # which can't change.
61 dst_lock = rdst.lock()
61 dst_lock = rdst.lock()
62
62
63 # Get a list of all changesets in the source. The easy way to do this
63 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
65 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
66 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
68 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
69 revmap = {node.nullid: node.nullid}
70 if tolfile:
70 if tolfile:
71 lfiles = set()
71 lfiles = set()
72 normalfiles = set()
72 normalfiles = set()
73 if not pats:
73 if not pats:
74 pats = ui.config(lfutil.longname, 'patterns', default=())
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
76 pats = pats.split(' ')
77 if pats:
75 if pats:
78 matcher = match_.match(rsrc.root, '', list(pats))
76 matcher = match_.match(rsrc.root, '', list(pats))
79 else:
77 else:
80 matcher = None
78 matcher = None
81
79
82 lfiletohash = {}
80 lfiletohash = {}
83 for ctx in ctxs:
81 for ctx in ctxs:
84 ui.progress(_('converting revisions'), ctx.rev(),
82 ui.progress(_('converting revisions'), ctx.rev(),
85 unit=_('revision'), total=rsrc['tip'].rev())
83 unit=_('revision'), total=rsrc['tip'].rev())
86 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
87 lfiles, normalfiles, matcher, size, lfiletohash)
85 lfiles, normalfiles, matcher, size, lfiletohash)
88 ui.progress(_('converting revisions'), None)
86 ui.progress(_('converting revisions'), None)
89
87
90 if os.path.exists(rdst.wjoin(lfutil.shortname)):
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
91 shutil.rmtree(rdst.wjoin(lfutil.shortname))
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
92
90
93 for f in lfiletohash.keys():
91 for f in lfiletohash.keys():
94 if os.path.isfile(rdst.wjoin(f)):
92 if os.path.isfile(rdst.wjoin(f)):
95 os.unlink(rdst.wjoin(f))
93 os.unlink(rdst.wjoin(f))
96 try:
94 try:
97 os.removedirs(os.path.dirname(rdst.wjoin(f)))
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
98 except OSError:
96 except OSError:
99 pass
97 pass
100
98
101 # If there were any files converted to largefiles, add largefiles
99 # If there were any files converted to largefiles, add largefiles
102 # to the destination repository's requirements.
100 # to the destination repository's requirements.
103 if lfiles:
101 if lfiles:
104 rdst.requirements.add('largefiles')
102 rdst.requirements.add('largefiles')
105 rdst._writerequirements()
103 rdst._writerequirements()
106 else:
104 else:
107 for ctx in ctxs:
105 for ctx in ctxs:
108 ui.progress(_('converting revisions'), ctx.rev(),
106 ui.progress(_('converting revisions'), ctx.rev(),
109 unit=_('revision'), total=rsrc['tip'].rev())
107 unit=_('revision'), total=rsrc['tip'].rev())
110 _addchangeset(ui, rsrc, rdst, ctx, revmap)
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
111
109
112 ui.progress(_('converting revisions'), None)
110 ui.progress(_('converting revisions'), None)
113 success = True
111 success = True
114 finally:
112 finally:
115 if not success:
113 if not success:
116 # we failed, remove the new directory
114 # we failed, remove the new directory
117 shutil.rmtree(rdst.root)
115 shutil.rmtree(rdst.root)
118 dst_lock.release()
116 dst_lock.release()
119
117
120 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
121 # Convert src parents to dst parents
119 # Convert src parents to dst parents
122 parents = []
120 parents = []
123 for p in ctx.parents():
121 for p in ctx.parents():
124 parents.append(revmap[p.node()])
122 parents.append(revmap[p.node()])
125 while len(parents) < 2:
123 while len(parents) < 2:
126 parents.append(node.nullid)
124 parents.append(node.nullid)
127
125
128 # Generate list of changed files
126 # Generate list of changed files
129 files = set(ctx.files())
127 files = set(ctx.files())
130 if node.nullid not in parents:
128 if node.nullid not in parents:
131 mc = ctx.manifest()
129 mc = ctx.manifest()
132 mp1 = ctx.parents()[0].manifest()
130 mp1 = ctx.parents()[0].manifest()
133 mp2 = ctx.parents()[1].manifest()
131 mp2 = ctx.parents()[1].manifest()
134 files |= (set(mp1) | set(mp2)) - set(mc)
132 files |= (set(mp1) | set(mp2)) - set(mc)
135 for f in mc:
133 for f in mc:
136 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
134 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
137 files.add(f)
135 files.add(f)
138
136
139 def getfilectx(repo, memctx, f):
137 def getfilectx(repo, memctx, f):
140 if lfutil.standin(f) in files:
138 if lfutil.standin(f) in files:
141 # if the file isn't in the manifest then it was removed
139 # if the file isn't in the manifest then it was removed
142 # or renamed, raise IOError to indicate this
140 # or renamed, raise IOError to indicate this
143 try:
141 try:
144 fctx = ctx.filectx(lfutil.standin(f))
142 fctx = ctx.filectx(lfutil.standin(f))
145 except error.LookupError:
143 except error.LookupError:
146 raise IOError()
144 raise IOError()
147 renamed = fctx.renamed()
145 renamed = fctx.renamed()
148 if renamed:
146 if renamed:
149 renamed = lfutil.splitstandin(renamed[0])
147 renamed = lfutil.splitstandin(renamed[0])
150
148
151 hash = fctx.data().strip()
149 hash = fctx.data().strip()
152 path = lfutil.findfile(rsrc, hash)
150 path = lfutil.findfile(rsrc, hash)
153 ### TODO: What if the file is not cached?
151 ### TODO: What if the file is not cached?
154 data = ''
152 data = ''
155 fd = None
153 fd = None
156 try:
154 try:
157 fd = open(path, 'rb')
155 fd = open(path, 'rb')
158 data = fd.read()
156 data = fd.read()
159 finally:
157 finally:
160 if fd:
158 if fd:
161 fd.close()
159 fd.close()
162 return context.memfilectx(f, data, 'l' in fctx.flags(),
160 return context.memfilectx(f, data, 'l' in fctx.flags(),
163 'x' in fctx.flags(), renamed)
161 'x' in fctx.flags(), renamed)
164 else:
162 else:
165 try:
163 try:
166 fctx = ctx.filectx(f)
164 fctx = ctx.filectx(f)
167 except error.LookupError:
165 except error.LookupError:
168 raise IOError()
166 raise IOError()
169 renamed = fctx.renamed()
167 renamed = fctx.renamed()
170 if renamed:
168 if renamed:
171 renamed = renamed[0]
169 renamed = renamed[0]
172 data = fctx.data()
170 data = fctx.data()
173 if f == '.hgtags':
171 if f == '.hgtags':
174 newdata = []
172 newdata = []
175 for line in data.splitlines():
173 for line in data.splitlines():
176 id, name = line.split(' ', 1)
174 id, name = line.split(' ', 1)
177 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
175 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
178 name))
176 name))
179 data = ''.join(newdata)
177 data = ''.join(newdata)
180 return context.memfilectx(f, data, 'l' in fctx.flags(),
178 return context.memfilectx(f, data, 'l' in fctx.flags(),
181 'x' in fctx.flags(), renamed)
179 'x' in fctx.flags(), renamed)
182
180
183 dstfiles = []
181 dstfiles = []
184 for file in files:
182 for file in files:
185 if lfutil.isstandin(file):
183 if lfutil.isstandin(file):
186 dstfiles.append(lfutil.splitstandin(file))
184 dstfiles.append(lfutil.splitstandin(file))
187 else:
185 else:
188 dstfiles.append(file)
186 dstfiles.append(file)
189 # Commit
187 # Commit
190 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
188 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
191 getfilectx, ctx.user(), ctx.date(), ctx.extra())
189 getfilectx, ctx.user(), ctx.date(), ctx.extra())
192 ret = rdst.commitctx(mctx)
190 ret = rdst.commitctx(mctx)
193 rdst.dirstate.setparents(ret)
191 rdst.dirstate.setparents(ret)
194 revmap[ctx.node()] = rdst.changelog.tip()
192 revmap[ctx.node()] = rdst.changelog.tip()
195
193
196 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
197 matcher, size, lfiletohash):
195 matcher, size, lfiletohash):
198 # Convert src parents to dst parents
196 # Convert src parents to dst parents
199 parents = []
197 parents = []
200 for p in ctx.parents():
198 for p in ctx.parents():
201 parents.append(revmap[p.node()])
199 parents.append(revmap[p.node()])
202 while len(parents) < 2:
200 while len(parents) < 2:
203 parents.append(node.nullid)
201 parents.append(node.nullid)
204
202
205 # Generate list of changed files
203 # Generate list of changed files
206 files = set(ctx.files())
204 files = set(ctx.files())
207 if node.nullid not in parents:
205 if node.nullid not in parents:
208 mc = ctx.manifest()
206 mc = ctx.manifest()
209 mp1 = ctx.parents()[0].manifest()
207 mp1 = ctx.parents()[0].manifest()
210 mp2 = ctx.parents()[1].manifest()
208 mp2 = ctx.parents()[1].manifest()
211 files |= (set(mp1) | set(mp2)) - set(mc)
209 files |= (set(mp1) | set(mp2)) - set(mc)
212 for f in mc:
210 for f in mc:
213 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
211 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
214 files.add(f)
212 files.add(f)
215
213
216 dstfiles = []
214 dstfiles = []
217 for f in files:
215 for f in files:
218 if f not in lfiles and f not in normalfiles:
216 if f not in lfiles and f not in normalfiles:
219 islfile = _islfile(f, ctx, matcher, size)
217 islfile = _islfile(f, ctx, matcher, size)
220 # If this file was renamed or copied then copy
218 # If this file was renamed or copied then copy
221 # the lfileness of its predecessor
219 # the lfileness of its predecessor
222 if f in ctx.manifest():
220 if f in ctx.manifest():
223 fctx = ctx.filectx(f)
221 fctx = ctx.filectx(f)
224 renamed = fctx.renamed()
222 renamed = fctx.renamed()
225 renamedlfile = renamed and renamed[0] in lfiles
223 renamedlfile = renamed and renamed[0] in lfiles
226 islfile |= renamedlfile
224 islfile |= renamedlfile
227 if 'l' in fctx.flags():
225 if 'l' in fctx.flags():
228 if renamedlfile:
226 if renamedlfile:
229 raise util.Abort(
227 raise util.Abort(
230 _('renamed/copied largefile %s becomes symlink')
228 _('renamed/copied largefile %s becomes symlink')
231 % f)
229 % f)
232 islfile = False
230 islfile = False
233 if islfile:
231 if islfile:
234 lfiles.add(f)
232 lfiles.add(f)
235 else:
233 else:
236 normalfiles.add(f)
234 normalfiles.add(f)
237
235
238 if f in lfiles:
236 if f in lfiles:
239 dstfiles.append(lfutil.standin(f))
237 dstfiles.append(lfutil.standin(f))
240 # largefile in manifest if it has not been removed/renamed
238 # largefile in manifest if it has not been removed/renamed
241 if f in ctx.manifest():
239 if f in ctx.manifest():
242 if 'l' in ctx.filectx(f).flags():
240 if 'l' in ctx.filectx(f).flags():
243 if renamed and renamed[0] in lfiles:
241 if renamed and renamed[0] in lfiles:
244 raise util.Abort(_('largefile %s becomes symlink') % f)
242 raise util.Abort(_('largefile %s becomes symlink') % f)
245
243
246 # largefile was modified, update standins
244 # largefile was modified, update standins
247 fullpath = rdst.wjoin(f)
245 fullpath = rdst.wjoin(f)
248 util.makedirs(os.path.dirname(fullpath))
246 util.makedirs(os.path.dirname(fullpath))
249 m = util.sha1('')
247 m = util.sha1('')
250 m.update(ctx[f].data())
248 m.update(ctx[f].data())
251 hash = m.hexdigest()
249 hash = m.hexdigest()
252 if f not in lfiletohash or lfiletohash[f] != hash:
250 if f not in lfiletohash or lfiletohash[f] != hash:
253 try:
251 try:
254 fd = open(fullpath, 'wb')
252 fd = open(fullpath, 'wb')
255 fd.write(ctx[f].data())
253 fd.write(ctx[f].data())
256 finally:
254 finally:
257 if fd:
255 if fd:
258 fd.close()
256 fd.close()
259 executable = 'x' in ctx[f].flags()
257 executable = 'x' in ctx[f].flags()
260 os.chmod(fullpath, lfutil.getmode(executable))
258 os.chmod(fullpath, lfutil.getmode(executable))
261 lfutil.writestandin(rdst, lfutil.standin(f), hash,
259 lfutil.writestandin(rdst, lfutil.standin(f), hash,
262 executable)
260 executable)
263 lfiletohash[f] = hash
261 lfiletohash[f] = hash
264 else:
262 else:
265 # normal file
263 # normal file
266 dstfiles.append(f)
264 dstfiles.append(f)
267
265
268 def getfilectx(repo, memctx, f):
266 def getfilectx(repo, memctx, f):
269 if lfutil.isstandin(f):
267 if lfutil.isstandin(f):
270 # if the file isn't in the manifest then it was removed
268 # if the file isn't in the manifest then it was removed
271 # or renamed, raise IOError to indicate this
269 # or renamed, raise IOError to indicate this
272 srcfname = lfutil.splitstandin(f)
270 srcfname = lfutil.splitstandin(f)
273 try:
271 try:
274 fctx = ctx.filectx(srcfname)
272 fctx = ctx.filectx(srcfname)
275 except error.LookupError:
273 except error.LookupError:
276 raise IOError()
274 raise IOError()
277 renamed = fctx.renamed()
275 renamed = fctx.renamed()
278 if renamed:
276 if renamed:
279 # standin is always a largefile because largefile-ness
277 # standin is always a largefile because largefile-ness
280 # doesn't change after rename or copy
278 # doesn't change after rename or copy
281 renamed = lfutil.standin(renamed[0])
279 renamed = lfutil.standin(renamed[0])
282
280
283 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
281 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
284 fctx.flags(), 'x' in fctx.flags(), renamed)
282 fctx.flags(), 'x' in fctx.flags(), renamed)
285 else:
283 else:
286 try:
284 try:
287 fctx = ctx.filectx(f)
285 fctx = ctx.filectx(f)
288 except error.LookupError:
286 except error.LookupError:
289 raise IOError()
287 raise IOError()
290 renamed = fctx.renamed()
288 renamed = fctx.renamed()
291 if renamed:
289 if renamed:
292 renamed = renamed[0]
290 renamed = renamed[0]
293
291
294 data = fctx.data()
292 data = fctx.data()
295 if f == '.hgtags':
293 if f == '.hgtags':
296 newdata = []
294 newdata = []
297 for line in data.splitlines():
295 for line in data.splitlines():
298 id, name = line.split(' ', 1)
296 id, name = line.split(' ', 1)
299 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
297 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
300 name))
298 name))
301 data = ''.join(newdata)
299 data = ''.join(newdata)
302 return context.memfilectx(f, data, 'l' in fctx.flags(),
300 return context.memfilectx(f, data, 'l' in fctx.flags(),
303 'x' in fctx.flags(), renamed)
301 'x' in fctx.flags(), renamed)
304
302
305 # Commit
303 # Commit
306 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
304 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
307 getfilectx, ctx.user(), ctx.date(), ctx.extra())
305 getfilectx, ctx.user(), ctx.date(), ctx.extra())
308 ret = rdst.commitctx(mctx)
306 ret = rdst.commitctx(mctx)
309 rdst.dirstate.setparents(ret)
307 rdst.dirstate.setparents(ret)
310 revmap[ctx.node()] = rdst.changelog.tip()
308 revmap[ctx.node()] = rdst.changelog.tip()
311
309
312 def _islfile(file, ctx, matcher, size):
310 def _islfile(file, ctx, matcher, size):
313 '''Return true if file should be considered a largefile, i.e.
311 '''Return true if file should be considered a largefile, i.e.
314 matcher matches it or it is larger than size.'''
312 matcher matches it or it is larger than size.'''
315 # never store special .hg* files as largefiles
313 # never store special .hg* files as largefiles
316 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
314 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
317 return False
315 return False
318 if matcher and matcher(file):
316 if matcher and matcher(file):
319 return True
317 return True
320 try:
318 try:
321 return ctx.filectx(file).size() >= size * 1024 * 1024
319 return ctx.filectx(file).size() >= size * 1024 * 1024
322 except error.LookupError:
320 except error.LookupError:
323 return False
321 return False
324
322
325 def uploadlfiles(ui, rsrc, rdst, files):
323 def uploadlfiles(ui, rsrc, rdst, files):
326 '''upload largefiles to the central store'''
324 '''upload largefiles to the central store'''
327
325
328 if not files:
326 if not files:
329 return
327 return
330
328
331 store = basestore._openstore(rsrc, rdst, put=True)
329 store = basestore._openstore(rsrc, rdst, put=True)
332
330
333 at = 0
331 at = 0
334 files = filter(lambda h: not store.exists(h), files)
332 files = filter(lambda h: not store.exists(h), files)
335 for hash in files:
333 for hash in files:
336 ui.progress(_('uploading largefiles'), at, unit='largefile',
334 ui.progress(_('uploading largefiles'), at, unit='largefile',
337 total=len(files))
335 total=len(files))
338 source = lfutil.findfile(rsrc, hash)
336 source = lfutil.findfile(rsrc, hash)
339 if not source:
337 if not source:
340 raise util.Abort(_('largefile %s missing from store'
338 raise util.Abort(_('largefile %s missing from store'
341 ' (needs to be uploaded)') % hash)
339 ' (needs to be uploaded)') % hash)
342 # XXX check for errors here
340 # XXX check for errors here
343 store.put(source, hash)
341 store.put(source, hash)
344 at += 1
342 at += 1
345 ui.progress(_('uploading largefiles'), None)
343 ui.progress(_('uploading largefiles'), None)
346
344
347 def verifylfiles(ui, repo, all=False, contents=False):
345 def verifylfiles(ui, repo, all=False, contents=False):
348 '''Verify that every big file revision in the current changeset
346 '''Verify that every big file revision in the current changeset
349 exists in the central store. With --contents, also verify that
347 exists in the central store. With --contents, also verify that
350 the contents of each big file revision are correct (SHA-1 hash
348 the contents of each big file revision are correct (SHA-1 hash
351 matches the revision ID). With --all, check every changeset in
349 matches the revision ID). With --all, check every changeset in
352 this repository.'''
350 this repository.'''
353 if all:
351 if all:
354 # Pass a list to the function rather than an iterator because we know a
352 # Pass a list to the function rather than an iterator because we know a
355 # list will work.
353 # list will work.
356 revs = range(len(repo))
354 revs = range(len(repo))
357 else:
355 else:
358 revs = ['.']
356 revs = ['.']
359
357
360 store = basestore._openstore(repo)
358 store = basestore._openstore(repo)
361 return store.verify(revs, contents=contents)
359 return store.verify(revs, contents=contents)
362
360
363 def cachelfiles(ui, repo, node):
361 def cachelfiles(ui, repo, node):
364 '''cachelfiles ensures that all largefiles needed by the specified revision
362 '''cachelfiles ensures that all largefiles needed by the specified revision
365 are present in the repository's largefile cache.
363 are present in the repository's largefile cache.
366
364
367 returns a tuple (cached, missing). cached is the list of files downloaded
365 returns a tuple (cached, missing). cached is the list of files downloaded
368 by this operation; missing is the list of files that were needed but could
366 by this operation; missing is the list of files that were needed but could
369 not be found.'''
367 not be found.'''
370 lfiles = lfutil.listlfiles(repo, node)
368 lfiles = lfutil.listlfiles(repo, node)
371 toget = []
369 toget = []
372
370
373 for lfile in lfiles:
371 for lfile in lfiles:
374 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
372 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
375 # if it exists and its hash matches, it might have been locally
373 # if it exists and its hash matches, it might have been locally
376 # modified before updating and the user chose 'local'. in this case,
374 # modified before updating and the user chose 'local'. in this case,
377 # it will not be in any store, so don't look for it.
375 # it will not be in any store, so don't look for it.
378 if ((not os.path.exists(repo.wjoin(lfile)) or
376 if ((not os.path.exists(repo.wjoin(lfile)) or
379 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
377 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
380 not lfutil.findfile(repo, expectedhash)):
378 not lfutil.findfile(repo, expectedhash)):
381 toget.append((lfile, expectedhash))
379 toget.append((lfile, expectedhash))
382
380
383 if toget:
381 if toget:
384 store = basestore._openstore(repo)
382 store = basestore._openstore(repo)
385 ret = store.get(toget)
383 ret = store.get(toget)
386 return ret
384 return ret
387
385
388 return ([], [])
386 return ([], [])
389
387
390 def updatelfiles(ui, repo, filelist=None, printmessage=True):
388 def updatelfiles(ui, repo, filelist=None, printmessage=True):
391 wlock = repo.wlock()
389 wlock = repo.wlock()
392 try:
390 try:
393 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 lfdirstate = lfutil.openlfdirstate(ui, repo)
394 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
392 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
395
393
396 if filelist is not None:
394 if filelist is not None:
397 lfiles = [f for f in lfiles if f in filelist]
395 lfiles = [f for f in lfiles if f in filelist]
398
396
399 printed = False
397 printed = False
400 if printmessage and lfiles:
398 if printmessage and lfiles:
401 ui.status(_('getting changed largefiles\n'))
399 ui.status(_('getting changed largefiles\n'))
402 printed = True
400 printed = True
403 cachelfiles(ui, repo, '.')
401 cachelfiles(ui, repo, '.')
404
402
405 updated, removed = 0, 0
403 updated, removed = 0, 0
406 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
404 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
407 # increment the appropriate counter according to _updatelfile's
405 # increment the appropriate counter according to _updatelfile's
408 # return value
406 # return value
409 updated += i > 0 and i or 0
407 updated += i > 0 and i or 0
410 removed -= i < 0 and i or 0
408 removed -= i < 0 and i or 0
411 if printmessage and (removed or updated) and not printed:
409 if printmessage and (removed or updated) and not printed:
412 ui.status(_('getting changed largefiles\n'))
410 ui.status(_('getting changed largefiles\n'))
413 printed = True
411 printed = True
414
412
415 lfdirstate.write()
413 lfdirstate.write()
416 if printed and printmessage:
414 if printed and printmessage:
417 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
415 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
418 removed))
416 removed))
419 finally:
417 finally:
420 wlock.release()
418 wlock.release()
421
419
422 def _updatelfile(repo, lfdirstate, lfile):
420 def _updatelfile(repo, lfdirstate, lfile):
423 '''updates a single largefile and copies the state of its standin from
421 '''updates a single largefile and copies the state of its standin from
424 the repository's dirstate to its state in the lfdirstate.
422 the repository's dirstate to its state in the lfdirstate.
425
423
426 returns 1 if the file was modified, -1 if the file was removed, 0 if the
424 returns 1 if the file was modified, -1 if the file was removed, 0 if the
427 file was unchanged, and None if the needed largefile was missing from the
425 file was unchanged, and None if the needed largefile was missing from the
428 cache.'''
426 cache.'''
429 ret = 0
427 ret = 0
430 abslfile = repo.wjoin(lfile)
428 abslfile = repo.wjoin(lfile)
431 absstandin = repo.wjoin(lfutil.standin(lfile))
429 absstandin = repo.wjoin(lfutil.standin(lfile))
432 if os.path.exists(absstandin):
430 if os.path.exists(absstandin):
433 if os.path.exists(absstandin+'.orig'):
431 if os.path.exists(absstandin+'.orig'):
434 shutil.copyfile(abslfile, abslfile+'.orig')
432 shutil.copyfile(abslfile, abslfile+'.orig')
435 expecthash = lfutil.readstandin(repo, lfile)
433 expecthash = lfutil.readstandin(repo, lfile)
436 if (expecthash != '' and
434 if (expecthash != '' and
437 (not os.path.exists(abslfile) or
435 (not os.path.exists(abslfile) or
438 expecthash != lfutil.hashfile(abslfile))):
436 expecthash != lfutil.hashfile(abslfile))):
439 if not lfutil.copyfromcache(repo, expecthash, lfile):
437 if not lfutil.copyfromcache(repo, expecthash, lfile):
440 # use normallookup() to allocate entry in largefiles dirstate,
438 # use normallookup() to allocate entry in largefiles dirstate,
441 # because lack of it misleads lfiles_repo.status() into
439 # because lack of it misleads lfiles_repo.status() into
442 # recognition that such cache missing files are REMOVED.
440 # recognition that such cache missing files are REMOVED.
443 lfdirstate.normallookup(lfile)
441 lfdirstate.normallookup(lfile)
444 return None # don't try to set the mode
442 return None # don't try to set the mode
445 ret = 1
443 ret = 1
446 mode = os.stat(absstandin).st_mode
444 mode = os.stat(absstandin).st_mode
447 if mode != os.stat(abslfile).st_mode:
445 if mode != os.stat(abslfile).st_mode:
448 os.chmod(abslfile, mode)
446 os.chmod(abslfile, mode)
449 ret = 1
447 ret = 1
450 else:
448 else:
451 if os.path.exists(abslfile):
449 if os.path.exists(abslfile):
452 os.unlink(abslfile)
450 os.unlink(abslfile)
453 ret = -1
451 ret = -1
454 state = repo.dirstate[lfutil.standin(lfile)]
452 state = repo.dirstate[lfutil.standin(lfile)]
455 if state == 'n':
453 if state == 'n':
456 lfdirstate.normal(lfile)
454 lfdirstate.normal(lfile)
457 elif state == 'r':
455 elif state == 'r':
458 lfdirstate.remove(lfile)
456 lfdirstate.remove(lfile)
459 elif state == 'a':
457 elif state == 'a':
460 lfdirstate.add(lfile)
458 lfdirstate.add(lfile)
461 elif state == '?':
459 elif state == '?':
462 lfdirstate.drop(lfile)
460 lfdirstate.drop(lfile)
463 return ret
461 return ret
464
462
465 # -- hg commands declarations ------------------------------------------------
463 # -- hg commands declarations ------------------------------------------------
466
464
467 cmdtable = {
465 cmdtable = {
468 'lfconvert': (lfconvert,
466 'lfconvert': (lfconvert,
469 [('s', 'size', '',
467 [('s', 'size', '',
470 _('minimum size (MB) for files to be converted '
468 _('minimum size (MB) for files to be converted '
471 'as largefiles'),
469 'as largefiles'),
472 'SIZE'),
470 'SIZE'),
473 ('', 'to-normal', False,
471 ('', 'to-normal', False,
474 _('convert from a largefiles repo to a normal repo')),
472 _('convert from a largefiles repo to a normal repo')),
475 ],
473 ],
476 _('hg lfconvert SOURCE DEST [FILE ...]')),
474 _('hg lfconvert SOURCE DEST [FILE ...]')),
477 }
475 }
@@ -1,1869 +1,1869 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[10:])
233 parents.append(line[10:])
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def __repr__(self):
293 def __repr__(self):
294 return "<patchmeta %s %r>" % (self.op, self.path)
294 return "<patchmeta %s %r>" % (self.op, self.path)
295
295
296 def readgitpatch(lr):
296 def readgitpatch(lr):
297 """extract git-style metadata about patches from <patchname>"""
297 """extract git-style metadata about patches from <patchname>"""
298
298
299 # Filter patch for git information
299 # Filter patch for git information
300 gp = None
300 gp = None
301 gitpatches = []
301 gitpatches = []
302 for line in lr:
302 for line in lr:
303 line = line.rstrip(' \r\n')
303 line = line.rstrip(' \r\n')
304 if line.startswith('diff --git'):
304 if line.startswith('diff --git'):
305 m = gitre.match(line)
305 m = gitre.match(line)
306 if m:
306 if m:
307 if gp:
307 if gp:
308 gitpatches.append(gp)
308 gitpatches.append(gp)
309 dst = m.group(2)
309 dst = m.group(2)
310 gp = patchmeta(dst)
310 gp = patchmeta(dst)
311 elif gp:
311 elif gp:
312 if line.startswith('--- '):
312 if line.startswith('--- '):
313 gitpatches.append(gp)
313 gitpatches.append(gp)
314 gp = None
314 gp = None
315 continue
315 continue
316 if line.startswith('rename from '):
316 if line.startswith('rename from '):
317 gp.op = 'RENAME'
317 gp.op = 'RENAME'
318 gp.oldpath = line[12:]
318 gp.oldpath = line[12:]
319 elif line.startswith('rename to '):
319 elif line.startswith('rename to '):
320 gp.path = line[10:]
320 gp.path = line[10:]
321 elif line.startswith('copy from '):
321 elif line.startswith('copy from '):
322 gp.op = 'COPY'
322 gp.op = 'COPY'
323 gp.oldpath = line[10:]
323 gp.oldpath = line[10:]
324 elif line.startswith('copy to '):
324 elif line.startswith('copy to '):
325 gp.path = line[8:]
325 gp.path = line[8:]
326 elif line.startswith('deleted file'):
326 elif line.startswith('deleted file'):
327 gp.op = 'DELETE'
327 gp.op = 'DELETE'
328 elif line.startswith('new file mode '):
328 elif line.startswith('new file mode '):
329 gp.op = 'ADD'
329 gp.op = 'ADD'
330 gp.setmode(int(line[-6:], 8))
330 gp.setmode(int(line[-6:], 8))
331 elif line.startswith('new mode '):
331 elif line.startswith('new mode '):
332 gp.setmode(int(line[-6:], 8))
332 gp.setmode(int(line[-6:], 8))
333 elif line.startswith('GIT binary patch'):
333 elif line.startswith('GIT binary patch'):
334 gp.binary = True
334 gp.binary = True
335 if gp:
335 if gp:
336 gitpatches.append(gp)
336 gitpatches.append(gp)
337
337
338 return gitpatches
338 return gitpatches
339
339
340 class linereader(object):
340 class linereader(object):
341 # simple class to allow pushing lines back into the input stream
341 # simple class to allow pushing lines back into the input stream
342 def __init__(self, fp):
342 def __init__(self, fp):
343 self.fp = fp
343 self.fp = fp
344 self.buf = []
344 self.buf = []
345
345
346 def push(self, line):
346 def push(self, line):
347 if line is not None:
347 if line is not None:
348 self.buf.append(line)
348 self.buf.append(line)
349
349
350 def readline(self):
350 def readline(self):
351 if self.buf:
351 if self.buf:
352 l = self.buf[0]
352 l = self.buf[0]
353 del self.buf[0]
353 del self.buf[0]
354 return l
354 return l
355 return self.fp.readline()
355 return self.fp.readline()
356
356
357 def __iter__(self):
357 def __iter__(self):
358 while True:
358 while True:
359 l = self.readline()
359 l = self.readline()
360 if not l:
360 if not l:
361 break
361 break
362 yield l
362 yield l
363
363
364 class abstractbackend(object):
364 class abstractbackend(object):
365 def __init__(self, ui):
365 def __init__(self, ui):
366 self.ui = ui
366 self.ui = ui
367
367
368 def getfile(self, fname):
368 def getfile(self, fname):
369 """Return target file data and flags as a (data, (islink,
369 """Return target file data and flags as a (data, (islink,
370 isexec)) tuple.
370 isexec)) tuple.
371 """
371 """
372 raise NotImplementedError
372 raise NotImplementedError
373
373
374 def setfile(self, fname, data, mode, copysource):
374 def setfile(self, fname, data, mode, copysource):
375 """Write data to target file fname and set its mode. mode is a
375 """Write data to target file fname and set its mode. mode is a
376 (islink, isexec) tuple. If data is None, the file content should
376 (islink, isexec) tuple. If data is None, the file content should
377 be left unchanged. If the file is modified after being copied,
377 be left unchanged. If the file is modified after being copied,
378 copysource is set to the original file name.
378 copysource is set to the original file name.
379 """
379 """
380 raise NotImplementedError
380 raise NotImplementedError
381
381
382 def unlink(self, fname):
382 def unlink(self, fname):
383 """Unlink target file."""
383 """Unlink target file."""
384 raise NotImplementedError
384 raise NotImplementedError
385
385
386 def writerej(self, fname, failed, total, lines):
386 def writerej(self, fname, failed, total, lines):
387 """Write rejected lines for fname. total is the number of hunks
387 """Write rejected lines for fname. total is the number of hunks
388 which failed to apply and total the total number of hunks for this
388 which failed to apply and total the total number of hunks for this
389 files.
389 files.
390 """
390 """
391 pass
391 pass
392
392
393 def exists(self, fname):
393 def exists(self, fname):
394 raise NotImplementedError
394 raise NotImplementedError
395
395
396 class fsbackend(abstractbackend):
396 class fsbackend(abstractbackend):
397 def __init__(self, ui, basedir):
397 def __init__(self, ui, basedir):
398 super(fsbackend, self).__init__(ui)
398 super(fsbackend, self).__init__(ui)
399 self.opener = scmutil.opener(basedir)
399 self.opener = scmutil.opener(basedir)
400
400
401 def _join(self, f):
401 def _join(self, f):
402 return os.path.join(self.opener.base, f)
402 return os.path.join(self.opener.base, f)
403
403
404 def getfile(self, fname):
404 def getfile(self, fname):
405 path = self._join(fname)
405 path = self._join(fname)
406 if os.path.islink(path):
406 if os.path.islink(path):
407 return (os.readlink(path), (True, False))
407 return (os.readlink(path), (True, False))
408 isexec = False
408 isexec = False
409 try:
409 try:
410 isexec = os.lstat(path).st_mode & 0100 != 0
410 isexec = os.lstat(path).st_mode & 0100 != 0
411 except OSError, e:
411 except OSError, e:
412 if e.errno != errno.ENOENT:
412 if e.errno != errno.ENOENT:
413 raise
413 raise
414 return (self.opener.read(fname), (False, isexec))
414 return (self.opener.read(fname), (False, isexec))
415
415
416 def setfile(self, fname, data, mode, copysource):
416 def setfile(self, fname, data, mode, copysource):
417 islink, isexec = mode
417 islink, isexec = mode
418 if data is None:
418 if data is None:
419 util.setflags(self._join(fname), islink, isexec)
419 util.setflags(self._join(fname), islink, isexec)
420 return
420 return
421 if islink:
421 if islink:
422 self.opener.symlink(data, fname)
422 self.opener.symlink(data, fname)
423 else:
423 else:
424 self.opener.write(fname, data)
424 self.opener.write(fname, data)
425 if isexec:
425 if isexec:
426 util.setflags(self._join(fname), False, True)
426 util.setflags(self._join(fname), False, True)
427
427
428 def unlink(self, fname):
428 def unlink(self, fname):
429 try:
429 try:
430 util.unlinkpath(self._join(fname))
430 util.unlinkpath(self._join(fname))
431 except OSError, inst:
431 except OSError, inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 def writerej(self, fname, failed, total, lines):
435 def writerej(self, fname, failed, total, lines):
436 fname = fname + ".rej"
436 fname = fname + ".rej"
437 self.ui.warn(
437 self.ui.warn(
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
439 (failed, total, fname))
439 (failed, total, fname))
440 fp = self.opener(fname, 'w')
440 fp = self.opener(fname, 'w')
441 fp.writelines(lines)
441 fp.writelines(lines)
442 fp.close()
442 fp.close()
443
443
444 def exists(self, fname):
444 def exists(self, fname):
445 return os.path.lexists(self._join(fname))
445 return os.path.lexists(self._join(fname))
446
446
447 class workingbackend(fsbackend):
447 class workingbackend(fsbackend):
448 def __init__(self, ui, repo, similarity):
448 def __init__(self, ui, repo, similarity):
449 super(workingbackend, self).__init__(ui, repo.root)
449 super(workingbackend, self).__init__(ui, repo.root)
450 self.repo = repo
450 self.repo = repo
451 self.similarity = similarity
451 self.similarity = similarity
452 self.removed = set()
452 self.removed = set()
453 self.changed = set()
453 self.changed = set()
454 self.copied = []
454 self.copied = []
455
455
456 def _checkknown(self, fname):
456 def _checkknown(self, fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
459
459
460 def setfile(self, fname, data, mode, copysource):
460 def setfile(self, fname, data, mode, copysource):
461 self._checkknown(fname)
461 self._checkknown(fname)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
463 if copysource is not None:
463 if copysource is not None:
464 self.copied.append((copysource, fname))
464 self.copied.append((copysource, fname))
465 self.changed.add(fname)
465 self.changed.add(fname)
466
466
467 def unlink(self, fname):
467 def unlink(self, fname):
468 self._checkknown(fname)
468 self._checkknown(fname)
469 super(workingbackend, self).unlink(fname)
469 super(workingbackend, self).unlink(fname)
470 self.removed.add(fname)
470 self.removed.add(fname)
471 self.changed.add(fname)
471 self.changed.add(fname)
472
472
473 def close(self):
473 def close(self):
474 wctx = self.repo[None]
474 wctx = self.repo[None]
475 addremoved = set(self.changed)
475 addremoved = set(self.changed)
476 for src, dst in self.copied:
476 for src, dst in self.copied:
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
478 addremoved.discard(src)
478 addremoved.discard(src)
479 if (not self.similarity) and self.removed:
479 if (not self.similarity) and self.removed:
480 wctx.forget(sorted(self.removed))
480 wctx.forget(sorted(self.removed))
481 if addremoved:
481 if addremoved:
482 cwd = self.repo.getcwd()
482 cwd = self.repo.getcwd()
483 if cwd:
483 if cwd:
484 addremoved = [util.pathto(self.repo.root, cwd, f)
484 addremoved = [util.pathto(self.repo.root, cwd, f)
485 for f in addremoved]
485 for f in addremoved]
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
487 return sorted(self.changed)
487 return sorted(self.changed)
488
488
489 class filestore(object):
489 class filestore(object):
490 def __init__(self, maxsize=None):
490 def __init__(self, maxsize=None):
491 self.opener = None
491 self.opener = None
492 self.files = {}
492 self.files = {}
493 self.created = 0
493 self.created = 0
494 self.maxsize = maxsize
494 self.maxsize = maxsize
495 if self.maxsize is None:
495 if self.maxsize is None:
496 self.maxsize = 4*(2**20)
496 self.maxsize = 4*(2**20)
497 self.size = 0
497 self.size = 0
498 self.data = {}
498 self.data = {}
499
499
500 def setfile(self, fname, data, mode, copied=None):
500 def setfile(self, fname, data, mode, copied=None):
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
502 self.data[fname] = (data, mode, copied)
502 self.data[fname] = (data, mode, copied)
503 self.size += len(data)
503 self.size += len(data)
504 else:
504 else:
505 if self.opener is None:
505 if self.opener is None:
506 root = tempfile.mkdtemp(prefix='hg-patch-')
506 root = tempfile.mkdtemp(prefix='hg-patch-')
507 self.opener = scmutil.opener(root)
507 self.opener = scmutil.opener(root)
508 # Avoid filename issues with these simple names
508 # Avoid filename issues with these simple names
509 fn = str(self.created)
509 fn = str(self.created)
510 self.opener.write(fn, data)
510 self.opener.write(fn, data)
511 self.created += 1
511 self.created += 1
512 self.files[fname] = (fn, mode, copied)
512 self.files[fname] = (fn, mode, copied)
513
513
514 def getfile(self, fname):
514 def getfile(self, fname):
515 if fname in self.data:
515 if fname in self.data:
516 return self.data[fname]
516 return self.data[fname]
517 if not self.opener or fname not in self.files:
517 if not self.opener or fname not in self.files:
518 raise IOError()
518 raise IOError()
519 fn, mode, copied = self.files[fname]
519 fn, mode, copied = self.files[fname]
520 return self.opener.read(fn), mode, copied
520 return self.opener.read(fn), mode, copied
521
521
522 def close(self):
522 def close(self):
523 if self.opener:
523 if self.opener:
524 shutil.rmtree(self.opener.base)
524 shutil.rmtree(self.opener.base)
525
525
526 class repobackend(abstractbackend):
526 class repobackend(abstractbackend):
527 def __init__(self, ui, repo, ctx, store):
527 def __init__(self, ui, repo, ctx, store):
528 super(repobackend, self).__init__(ui)
528 super(repobackend, self).__init__(ui)
529 self.repo = repo
529 self.repo = repo
530 self.ctx = ctx
530 self.ctx = ctx
531 self.store = store
531 self.store = store
532 self.changed = set()
532 self.changed = set()
533 self.removed = set()
533 self.removed = set()
534 self.copied = {}
534 self.copied = {}
535
535
536 def _checkknown(self, fname):
536 def _checkknown(self, fname):
537 if fname not in self.ctx:
537 if fname not in self.ctx:
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
539
539
540 def getfile(self, fname):
540 def getfile(self, fname):
541 try:
541 try:
542 fctx = self.ctx[fname]
542 fctx = self.ctx[fname]
543 except error.LookupError:
543 except error.LookupError:
544 raise IOError()
544 raise IOError()
545 flags = fctx.flags()
545 flags = fctx.flags()
546 return fctx.data(), ('l' in flags, 'x' in flags)
546 return fctx.data(), ('l' in flags, 'x' in flags)
547
547
548 def setfile(self, fname, data, mode, copysource):
548 def setfile(self, fname, data, mode, copysource):
549 if copysource:
549 if copysource:
550 self._checkknown(copysource)
550 self._checkknown(copysource)
551 if data is None:
551 if data is None:
552 data = self.ctx[fname].data()
552 data = self.ctx[fname].data()
553 self.store.setfile(fname, data, mode, copysource)
553 self.store.setfile(fname, data, mode, copysource)
554 self.changed.add(fname)
554 self.changed.add(fname)
555 if copysource:
555 if copysource:
556 self.copied[fname] = copysource
556 self.copied[fname] = copysource
557
557
558 def unlink(self, fname):
558 def unlink(self, fname):
559 self._checkknown(fname)
559 self._checkknown(fname)
560 self.removed.add(fname)
560 self.removed.add(fname)
561
561
562 def exists(self, fname):
562 def exists(self, fname):
563 return fname in self.ctx
563 return fname in self.ctx
564
564
565 def close(self):
565 def close(self):
566 return self.changed | self.removed
566 return self.changed | self.removed
567
567
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
569 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
569 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
570 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
570 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
572
572
573 class patchfile(object):
573 class patchfile(object):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
575 self.fname = gp.path
575 self.fname = gp.path
576 self.eolmode = eolmode
576 self.eolmode = eolmode
577 self.eol = None
577 self.eol = None
578 self.backend = backend
578 self.backend = backend
579 self.ui = ui
579 self.ui = ui
580 self.lines = []
580 self.lines = []
581 self.exists = False
581 self.exists = False
582 self.missing = True
582 self.missing = True
583 self.mode = gp.mode
583 self.mode = gp.mode
584 self.copysource = gp.oldpath
584 self.copysource = gp.oldpath
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
586 self.remove = gp.op == 'DELETE'
586 self.remove = gp.op == 'DELETE'
587 try:
587 try:
588 if self.copysource is None:
588 if self.copysource is None:
589 data, mode = backend.getfile(self.fname)
589 data, mode = backend.getfile(self.fname)
590 self.exists = True
590 self.exists = True
591 else:
591 else:
592 data, mode = store.getfile(self.copysource)[:2]
592 data, mode = store.getfile(self.copysource)[:2]
593 self.exists = backend.exists(self.fname)
593 self.exists = backend.exists(self.fname)
594 self.missing = False
594 self.missing = False
595 if data:
595 if data:
596 self.lines = mdiff.splitnewlines(data)
596 self.lines = mdiff.splitnewlines(data)
597 if self.mode is None:
597 if self.mode is None:
598 self.mode = mode
598 self.mode = mode
599 if self.lines:
599 if self.lines:
600 # Normalize line endings
600 # Normalize line endings
601 if self.lines[0].endswith('\r\n'):
601 if self.lines[0].endswith('\r\n'):
602 self.eol = '\r\n'
602 self.eol = '\r\n'
603 elif self.lines[0].endswith('\n'):
603 elif self.lines[0].endswith('\n'):
604 self.eol = '\n'
604 self.eol = '\n'
605 if eolmode != 'strict':
605 if eolmode != 'strict':
606 nlines = []
606 nlines = []
607 for l in self.lines:
607 for l in self.lines:
608 if l.endswith('\r\n'):
608 if l.endswith('\r\n'):
609 l = l[:-2] + '\n'
609 l = l[:-2] + '\n'
610 nlines.append(l)
610 nlines.append(l)
611 self.lines = nlines
611 self.lines = nlines
612 except IOError:
612 except IOError:
613 if self.create:
613 if self.create:
614 self.missing = False
614 self.missing = False
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = (False, False)
616 self.mode = (False, False)
617 if self.missing:
617 if self.missing:
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
619
619
620 self.hash = {}
620 self.hash = {}
621 self.dirty = 0
621 self.dirty = 0
622 self.offset = 0
622 self.offset = 0
623 self.skew = 0
623 self.skew = 0
624 self.rej = []
624 self.rej = []
625 self.fileprinted = False
625 self.fileprinted = False
626 self.printfile(False)
626 self.printfile(False)
627 self.hunks = 0
627 self.hunks = 0
628
628
629 def writelines(self, fname, lines, mode):
629 def writelines(self, fname, lines, mode):
630 if self.eolmode == 'auto':
630 if self.eolmode == 'auto':
631 eol = self.eol
631 eol = self.eol
632 elif self.eolmode == 'crlf':
632 elif self.eolmode == 'crlf':
633 eol = '\r\n'
633 eol = '\r\n'
634 else:
634 else:
635 eol = '\n'
635 eol = '\n'
636
636
637 if self.eolmode != 'strict' and eol and eol != '\n':
637 if self.eolmode != 'strict' and eol and eol != '\n':
638 rawlines = []
638 rawlines = []
639 for l in lines:
639 for l in lines:
640 if l and l[-1] == '\n':
640 if l and l[-1] == '\n':
641 l = l[:-1] + eol
641 l = l[:-1] + eol
642 rawlines.append(l)
642 rawlines.append(l)
643 lines = rawlines
643 lines = rawlines
644
644
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
646
646
647 def printfile(self, warn):
647 def printfile(self, warn):
648 if self.fileprinted:
648 if self.fileprinted:
649 return
649 return
650 if warn or self.ui.verbose:
650 if warn or self.ui.verbose:
651 self.fileprinted = True
651 self.fileprinted = True
652 s = _("patching file %s\n") % self.fname
652 s = _("patching file %s\n") % self.fname
653 if warn:
653 if warn:
654 self.ui.warn(s)
654 self.ui.warn(s)
655 else:
655 else:
656 self.ui.note(s)
656 self.ui.note(s)
657
657
658
658
659 def findlines(self, l, linenum):
659 def findlines(self, l, linenum):
660 # looks through the hash and finds candidate lines. The
660 # looks through the hash and finds candidate lines. The
661 # result is a list of line numbers sorted based on distance
661 # result is a list of line numbers sorted based on distance
662 # from linenum
662 # from linenum
663
663
664 cand = self.hash.get(l, [])
664 cand = self.hash.get(l, [])
665 if len(cand) > 1:
665 if len(cand) > 1:
666 # resort our list of potentials forward then back.
666 # resort our list of potentials forward then back.
667 cand.sort(key=lambda x: abs(x - linenum))
667 cand.sort(key=lambda x: abs(x - linenum))
668 return cand
668 return cand
669
669
670 def write_rej(self):
670 def write_rej(self):
671 # our rejects are a little different from patch(1). This always
671 # our rejects are a little different from patch(1). This always
672 # creates rejects in the same form as the original patch. A file
672 # creates rejects in the same form as the original patch. A file
673 # header is inserted so that you can run the reject through patch again
673 # header is inserted so that you can run the reject through patch again
674 # without having to type the filename.
674 # without having to type the filename.
675 if not self.rej:
675 if not self.rej:
676 return
676 return
677 base = os.path.basename(self.fname)
677 base = os.path.basename(self.fname)
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
679 for x in self.rej:
679 for x in self.rej:
680 for l in x.hunk:
680 for l in x.hunk:
681 lines.append(l)
681 lines.append(l)
682 if l[-1] != '\n':
682 if l[-1] != '\n':
683 lines.append("\n\ No newline at end of file\n")
683 lines.append("\n\ No newline at end of file\n")
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
685
685
686 def apply(self, h):
686 def apply(self, h):
687 if not h.complete():
687 if not h.complete():
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
690 h.lenb))
690 h.lenb))
691
691
692 self.hunks += 1
692 self.hunks += 1
693
693
694 if self.missing:
694 if self.missing:
695 self.rej.append(h)
695 self.rej.append(h)
696 return -1
696 return -1
697
697
698 if self.exists and self.create:
698 if self.exists and self.create:
699 if self.copysource:
699 if self.copysource:
700 self.ui.warn(_("cannot create %s: destination already "
700 self.ui.warn(_("cannot create %s: destination already "
701 "exists\n" % self.fname))
701 "exists\n" % self.fname))
702 else:
702 else:
703 self.ui.warn(_("file %s already exists\n") % self.fname)
703 self.ui.warn(_("file %s already exists\n") % self.fname)
704 self.rej.append(h)
704 self.rej.append(h)
705 return -1
705 return -1
706
706
707 if isinstance(h, binhunk):
707 if isinstance(h, binhunk):
708 if self.remove:
708 if self.remove:
709 self.backend.unlink(self.fname)
709 self.backend.unlink(self.fname)
710 else:
710 else:
711 self.lines[:] = h.new()
711 self.lines[:] = h.new()
712 self.offset += len(h.new())
712 self.offset += len(h.new())
713 self.dirty = True
713 self.dirty = True
714 return 0
714 return 0
715
715
716 horig = h
716 horig = h
717 if (self.eolmode in ('crlf', 'lf')
717 if (self.eolmode in ('crlf', 'lf')
718 or self.eolmode == 'auto' and self.eol):
718 or self.eolmode == 'auto' and self.eol):
719 # If new eols are going to be normalized, then normalize
719 # If new eols are going to be normalized, then normalize
720 # hunk data before patching. Otherwise, preserve input
720 # hunk data before patching. Otherwise, preserve input
721 # line-endings.
721 # line-endings.
722 h = h.getnormalized()
722 h = h.getnormalized()
723
723
724 # fast case first, no offsets, no fuzz
724 # fast case first, no offsets, no fuzz
725 old = h.old()
725 old = h.old()
726 start = h.starta + self.offset
726 start = h.starta + self.offset
727 # zero length hunk ranges already have their start decremented
727 # zero length hunk ranges already have their start decremented
728 if h.lena:
728 if h.lena:
729 start -= 1
729 start -= 1
730 orig_start = start
730 orig_start = start
731 # if there's skew we want to emit the "(offset %d lines)" even
731 # if there's skew we want to emit the "(offset %d lines)" even
732 # when the hunk cleanly applies at start + skew, so skip the
732 # when the hunk cleanly applies at start + skew, so skip the
733 # fast case code
733 # fast case code
734 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
734 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
735 if self.remove:
735 if self.remove:
736 self.backend.unlink(self.fname)
736 self.backend.unlink(self.fname)
737 else:
737 else:
738 self.lines[start : start + h.lena] = h.new()
738 self.lines[start : start + h.lena] = h.new()
739 self.offset += h.lenb - h.lena
739 self.offset += h.lenb - h.lena
740 self.dirty = True
740 self.dirty = True
741 return 0
741 return 0
742
742
743 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
743 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
744 self.hash = {}
744 self.hash = {}
745 for x, s in enumerate(self.lines):
745 for x, s in enumerate(self.lines):
746 self.hash.setdefault(s, []).append(x)
746 self.hash.setdefault(s, []).append(x)
747 if h.hunk[-1][0] != ' ':
747 if h.hunk[-1][0] != ' ':
748 # if the hunk tried to put something at the bottom of the file
748 # if the hunk tried to put something at the bottom of the file
749 # override the start line and use eof here
749 # override the start line and use eof here
750 search_start = len(self.lines)
750 search_start = len(self.lines)
751 else:
751 else:
752 search_start = orig_start + self.skew
752 search_start = orig_start + self.skew
753
753
754 for fuzzlen in xrange(3):
754 for fuzzlen in xrange(3):
755 for toponly in [True, False]:
755 for toponly in [True, False]:
756 old = h.old(fuzzlen, toponly)
756 old = h.old(fuzzlen, toponly)
757
757
758 cand = self.findlines(old[0][1:], search_start)
758 cand = self.findlines(old[0][1:], search_start)
759 for l in cand:
759 for l in cand:
760 if diffhelpers.testhunk(old, self.lines, l) == 0:
760 if diffhelpers.testhunk(old, self.lines, l) == 0:
761 newlines = h.new(fuzzlen, toponly)
761 newlines = h.new(fuzzlen, toponly)
762 self.lines[l : l + len(old)] = newlines
762 self.lines[l : l + len(old)] = newlines
763 self.offset += len(newlines) - len(old)
763 self.offset += len(newlines) - len(old)
764 self.skew = l - orig_start
764 self.skew = l - orig_start
765 self.dirty = True
765 self.dirty = True
766 offset = l - orig_start - fuzzlen
766 offset = l - orig_start - fuzzlen
767 if fuzzlen:
767 if fuzzlen:
768 msg = _("Hunk #%d succeeded at %d "
768 msg = _("Hunk #%d succeeded at %d "
769 "with fuzz %d "
769 "with fuzz %d "
770 "(offset %d lines).\n")
770 "(offset %d lines).\n")
771 self.printfile(True)
771 self.printfile(True)
772 self.ui.warn(msg %
772 self.ui.warn(msg %
773 (h.number, l + 1, fuzzlen, offset))
773 (h.number, l + 1, fuzzlen, offset))
774 else:
774 else:
775 msg = _("Hunk #%d succeeded at %d "
775 msg = _("Hunk #%d succeeded at %d "
776 "(offset %d lines).\n")
776 "(offset %d lines).\n")
777 self.ui.note(msg % (h.number, l + 1, offset))
777 self.ui.note(msg % (h.number, l + 1, offset))
778 return fuzzlen
778 return fuzzlen
779 self.printfile(True)
779 self.printfile(True)
780 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
780 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
781 self.rej.append(horig)
781 self.rej.append(horig)
782 return -1
782 return -1
783
783
784 def close(self):
784 def close(self):
785 if self.dirty:
785 if self.dirty:
786 self.writelines(self.fname, self.lines, self.mode)
786 self.writelines(self.fname, self.lines, self.mode)
787 self.write_rej()
787 self.write_rej()
788 return len(self.rej)
788 return len(self.rej)
789
789
790 class hunk(object):
790 class hunk(object):
791 def __init__(self, desc, num, lr, context):
791 def __init__(self, desc, num, lr, context):
792 self.number = num
792 self.number = num
793 self.desc = desc
793 self.desc = desc
794 self.hunk = [desc]
794 self.hunk = [desc]
795 self.a = []
795 self.a = []
796 self.b = []
796 self.b = []
797 self.starta = self.lena = None
797 self.starta = self.lena = None
798 self.startb = self.lenb = None
798 self.startb = self.lenb = None
799 if lr is not None:
799 if lr is not None:
800 if context:
800 if context:
801 self.read_context_hunk(lr)
801 self.read_context_hunk(lr)
802 else:
802 else:
803 self.read_unified_hunk(lr)
803 self.read_unified_hunk(lr)
804
804
805 def getnormalized(self):
805 def getnormalized(self):
806 """Return a copy with line endings normalized to LF."""
806 """Return a copy with line endings normalized to LF."""
807
807
808 def normalize(lines):
808 def normalize(lines):
809 nlines = []
809 nlines = []
810 for line in lines:
810 for line in lines:
811 if line.endswith('\r\n'):
811 if line.endswith('\r\n'):
812 line = line[:-2] + '\n'
812 line = line[:-2] + '\n'
813 nlines.append(line)
813 nlines.append(line)
814 return nlines
814 return nlines
815
815
816 # Dummy object, it is rebuilt manually
816 # Dummy object, it is rebuilt manually
817 nh = hunk(self.desc, self.number, None, None)
817 nh = hunk(self.desc, self.number, None, None)
818 nh.number = self.number
818 nh.number = self.number
819 nh.desc = self.desc
819 nh.desc = self.desc
820 nh.hunk = self.hunk
820 nh.hunk = self.hunk
821 nh.a = normalize(self.a)
821 nh.a = normalize(self.a)
822 nh.b = normalize(self.b)
822 nh.b = normalize(self.b)
823 nh.starta = self.starta
823 nh.starta = self.starta
824 nh.startb = self.startb
824 nh.startb = self.startb
825 nh.lena = self.lena
825 nh.lena = self.lena
826 nh.lenb = self.lenb
826 nh.lenb = self.lenb
827 return nh
827 return nh
828
828
829 def read_unified_hunk(self, lr):
829 def read_unified_hunk(self, lr):
830 m = unidesc.match(self.desc)
830 m = unidesc.match(self.desc)
831 if not m:
831 if not m:
832 raise PatchError(_("bad hunk #%d") % self.number)
832 raise PatchError(_("bad hunk #%d") % self.number)
833 self.starta, self.lena, self.startb, self.lenb = m.groups()
833 self.starta, self.lena, self.startb, self.lenb = m.groups()
834 if self.lena is None:
834 if self.lena is None:
835 self.lena = 1
835 self.lena = 1
836 else:
836 else:
837 self.lena = int(self.lena)
837 self.lena = int(self.lena)
838 if self.lenb is None:
838 if self.lenb is None:
839 self.lenb = 1
839 self.lenb = 1
840 else:
840 else:
841 self.lenb = int(self.lenb)
841 self.lenb = int(self.lenb)
842 self.starta = int(self.starta)
842 self.starta = int(self.starta)
843 self.startb = int(self.startb)
843 self.startb = int(self.startb)
844 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
844 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
845 # if we hit eof before finishing out the hunk, the last line will
845 # if we hit eof before finishing out the hunk, the last line will
846 # be zero length. Lets try to fix it up.
846 # be zero length. Lets try to fix it up.
847 while len(self.hunk[-1]) == 0:
847 while len(self.hunk[-1]) == 0:
848 del self.hunk[-1]
848 del self.hunk[-1]
849 del self.a[-1]
849 del self.a[-1]
850 del self.b[-1]
850 del self.b[-1]
851 self.lena -= 1
851 self.lena -= 1
852 self.lenb -= 1
852 self.lenb -= 1
853 self._fixnewline(lr)
853 self._fixnewline(lr)
854
854
855 def read_context_hunk(self, lr):
855 def read_context_hunk(self, lr):
856 self.desc = lr.readline()
856 self.desc = lr.readline()
857 m = contextdesc.match(self.desc)
857 m = contextdesc.match(self.desc)
858 if not m:
858 if not m:
859 raise PatchError(_("bad hunk #%d") % self.number)
859 raise PatchError(_("bad hunk #%d") % self.number)
860 self.starta, aend = m.groups()
860 self.starta, aend = m.groups()
861 self.starta = int(self.starta)
861 self.starta = int(self.starta)
862 if aend is None:
862 if aend is None:
863 aend = self.starta
863 aend = self.starta
864 self.lena = int(aend) - self.starta
864 self.lena = int(aend) - self.starta
865 if self.starta:
865 if self.starta:
866 self.lena += 1
866 self.lena += 1
867 for x in xrange(self.lena):
867 for x in xrange(self.lena):
868 l = lr.readline()
868 l = lr.readline()
869 if l.startswith('---'):
869 if l.startswith('---'):
870 # lines addition, old block is empty
870 # lines addition, old block is empty
871 lr.push(l)
871 lr.push(l)
872 break
872 break
873 s = l[2:]
873 s = l[2:]
874 if l.startswith('- ') or l.startswith('! '):
874 if l.startswith('- ') or l.startswith('! '):
875 u = '-' + s
875 u = '-' + s
876 elif l.startswith(' '):
876 elif l.startswith(' '):
877 u = ' ' + s
877 u = ' ' + s
878 else:
878 else:
879 raise PatchError(_("bad hunk #%d old text line %d") %
879 raise PatchError(_("bad hunk #%d old text line %d") %
880 (self.number, x))
880 (self.number, x))
881 self.a.append(u)
881 self.a.append(u)
882 self.hunk.append(u)
882 self.hunk.append(u)
883
883
884 l = lr.readline()
884 l = lr.readline()
885 if l.startswith('\ '):
885 if l.startswith('\ '):
886 s = self.a[-1][:-1]
886 s = self.a[-1][:-1]
887 self.a[-1] = s
887 self.a[-1] = s
888 self.hunk[-1] = s
888 self.hunk[-1] = s
889 l = lr.readline()
889 l = lr.readline()
890 m = contextdesc.match(l)
890 m = contextdesc.match(l)
891 if not m:
891 if not m:
892 raise PatchError(_("bad hunk #%d") % self.number)
892 raise PatchError(_("bad hunk #%d") % self.number)
893 self.startb, bend = m.groups()
893 self.startb, bend = m.groups()
894 self.startb = int(self.startb)
894 self.startb = int(self.startb)
895 if bend is None:
895 if bend is None:
896 bend = self.startb
896 bend = self.startb
897 self.lenb = int(bend) - self.startb
897 self.lenb = int(bend) - self.startb
898 if self.startb:
898 if self.startb:
899 self.lenb += 1
899 self.lenb += 1
900 hunki = 1
900 hunki = 1
901 for x in xrange(self.lenb):
901 for x in xrange(self.lenb):
902 l = lr.readline()
902 l = lr.readline()
903 if l.startswith('\ '):
903 if l.startswith('\ '):
904 # XXX: the only way to hit this is with an invalid line range.
904 # XXX: the only way to hit this is with an invalid line range.
905 # The no-eol marker is not counted in the line range, but I
905 # The no-eol marker is not counted in the line range, but I
906 # guess there are diff(1) out there which behave differently.
906 # guess there are diff(1) out there which behave differently.
907 s = self.b[-1][:-1]
907 s = self.b[-1][:-1]
908 self.b[-1] = s
908 self.b[-1] = s
909 self.hunk[hunki - 1] = s
909 self.hunk[hunki - 1] = s
910 continue
910 continue
911 if not l:
911 if not l:
912 # line deletions, new block is empty and we hit EOF
912 # line deletions, new block is empty and we hit EOF
913 lr.push(l)
913 lr.push(l)
914 break
914 break
915 s = l[2:]
915 s = l[2:]
916 if l.startswith('+ ') or l.startswith('! '):
916 if l.startswith('+ ') or l.startswith('! '):
917 u = '+' + s
917 u = '+' + s
918 elif l.startswith(' '):
918 elif l.startswith(' '):
919 u = ' ' + s
919 u = ' ' + s
920 elif len(self.b) == 0:
920 elif len(self.b) == 0:
921 # line deletions, new block is empty
921 # line deletions, new block is empty
922 lr.push(l)
922 lr.push(l)
923 break
923 break
924 else:
924 else:
925 raise PatchError(_("bad hunk #%d old text line %d") %
925 raise PatchError(_("bad hunk #%d old text line %d") %
926 (self.number, x))
926 (self.number, x))
927 self.b.append(s)
927 self.b.append(s)
928 while True:
928 while True:
929 if hunki >= len(self.hunk):
929 if hunki >= len(self.hunk):
930 h = ""
930 h = ""
931 else:
931 else:
932 h = self.hunk[hunki]
932 h = self.hunk[hunki]
933 hunki += 1
933 hunki += 1
934 if h == u:
934 if h == u:
935 break
935 break
936 elif h.startswith('-'):
936 elif h.startswith('-'):
937 continue
937 continue
938 else:
938 else:
939 self.hunk.insert(hunki - 1, u)
939 self.hunk.insert(hunki - 1, u)
940 break
940 break
941
941
942 if not self.a:
942 if not self.a:
943 # this happens when lines were only added to the hunk
943 # this happens when lines were only added to the hunk
944 for x in self.hunk:
944 for x in self.hunk:
945 if x.startswith('-') or x.startswith(' '):
945 if x.startswith('-') or x.startswith(' '):
946 self.a.append(x)
946 self.a.append(x)
947 if not self.b:
947 if not self.b:
948 # this happens when lines were only deleted from the hunk
948 # this happens when lines were only deleted from the hunk
949 for x in self.hunk:
949 for x in self.hunk:
950 if x.startswith('+') or x.startswith(' '):
950 if x.startswith('+') or x.startswith(' '):
951 self.b.append(x[1:])
951 self.b.append(x[1:])
952 # @@ -start,len +start,len @@
952 # @@ -start,len +start,len @@
953 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
953 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
954 self.startb, self.lenb)
954 self.startb, self.lenb)
955 self.hunk[0] = self.desc
955 self.hunk[0] = self.desc
956 self._fixnewline(lr)
956 self._fixnewline(lr)
957
957
958 def _fixnewline(self, lr):
958 def _fixnewline(self, lr):
959 l = lr.readline()
959 l = lr.readline()
960 if l.startswith('\ '):
960 if l.startswith('\ '):
961 diffhelpers.fix_newline(self.hunk, self.a, self.b)
961 diffhelpers.fix_newline(self.hunk, self.a, self.b)
962 else:
962 else:
963 lr.push(l)
963 lr.push(l)
964
964
965 def complete(self):
965 def complete(self):
966 return len(self.a) == self.lena and len(self.b) == self.lenb
966 return len(self.a) == self.lena and len(self.b) == self.lenb
967
967
968 def fuzzit(self, l, fuzz, toponly):
968 def fuzzit(self, l, fuzz, toponly):
969 # this removes context lines from the top and bottom of list 'l'. It
969 # this removes context lines from the top and bottom of list 'l'. It
970 # checks the hunk to make sure only context lines are removed, and then
970 # checks the hunk to make sure only context lines are removed, and then
971 # returns a new shortened list of lines.
971 # returns a new shortened list of lines.
972 fuzz = min(fuzz, len(l)-1)
972 fuzz = min(fuzz, len(l)-1)
973 if fuzz:
973 if fuzz:
974 top = 0
974 top = 0
975 bot = 0
975 bot = 0
976 hlen = len(self.hunk)
976 hlen = len(self.hunk)
977 for x in xrange(hlen - 1):
977 for x in xrange(hlen - 1):
978 # the hunk starts with the @@ line, so use x+1
978 # the hunk starts with the @@ line, so use x+1
979 if self.hunk[x + 1][0] == ' ':
979 if self.hunk[x + 1][0] == ' ':
980 top += 1
980 top += 1
981 else:
981 else:
982 break
982 break
983 if not toponly:
983 if not toponly:
984 for x in xrange(hlen - 1):
984 for x in xrange(hlen - 1):
985 if self.hunk[hlen - bot - 1][0] == ' ':
985 if self.hunk[hlen - bot - 1][0] == ' ':
986 bot += 1
986 bot += 1
987 else:
987 else:
988 break
988 break
989
989
990 # top and bot now count context in the hunk
990 # top and bot now count context in the hunk
991 # adjust them if either one is short
991 # adjust them if either one is short
992 context = max(top, bot, 3)
992 context = max(top, bot, 3)
993 if bot < context:
993 if bot < context:
994 bot = max(0, fuzz - (context - bot))
994 bot = max(0, fuzz - (context - bot))
995 else:
995 else:
996 bot = min(fuzz, bot)
996 bot = min(fuzz, bot)
997 if top < context:
997 if top < context:
998 top = max(0, fuzz - (context - top))
998 top = max(0, fuzz - (context - top))
999 else:
999 else:
1000 top = min(fuzz, top)
1000 top = min(fuzz, top)
1001
1001
1002 return l[top:len(l)-bot]
1002 return l[top:len(l)-bot]
1003 return l
1003 return l
1004
1004
1005 def old(self, fuzz=0, toponly=False):
1005 def old(self, fuzz=0, toponly=False):
1006 return self.fuzzit(self.a, fuzz, toponly)
1006 return self.fuzzit(self.a, fuzz, toponly)
1007
1007
1008 def new(self, fuzz=0, toponly=False):
1008 def new(self, fuzz=0, toponly=False):
1009 return self.fuzzit(self.b, fuzz, toponly)
1009 return self.fuzzit(self.b, fuzz, toponly)
1010
1010
1011 class binhunk(object):
1011 class binhunk(object):
1012 'A binary patch file. Only understands literals so far.'
1012 'A binary patch file. Only understands literals so far.'
1013 def __init__(self, lr):
1013 def __init__(self, lr):
1014 self.text = None
1014 self.text = None
1015 self.hunk = ['GIT binary patch\n']
1015 self.hunk = ['GIT binary patch\n']
1016 self._read(lr)
1016 self._read(lr)
1017
1017
1018 def complete(self):
1018 def complete(self):
1019 return self.text is not None
1019 return self.text is not None
1020
1020
1021 def new(self):
1021 def new(self):
1022 return [self.text]
1022 return [self.text]
1023
1023
1024 def _read(self, lr):
1024 def _read(self, lr):
1025 line = lr.readline()
1025 line = lr.readline()
1026 self.hunk.append(line)
1026 self.hunk.append(line)
1027 while line and not line.startswith('literal '):
1027 while line and not line.startswith('literal '):
1028 line = lr.readline()
1028 line = lr.readline()
1029 self.hunk.append(line)
1029 self.hunk.append(line)
1030 if not line:
1030 if not line:
1031 raise PatchError(_('could not extract binary patch'))
1031 raise PatchError(_('could not extract binary patch'))
1032 size = int(line[8:].rstrip())
1032 size = int(line[8:].rstrip())
1033 dec = []
1033 dec = []
1034 line = lr.readline()
1034 line = lr.readline()
1035 self.hunk.append(line)
1035 self.hunk.append(line)
1036 while len(line) > 1:
1036 while len(line) > 1:
1037 l = line[0]
1037 l = line[0]
1038 if l <= 'Z' and l >= 'A':
1038 if l <= 'Z' and l >= 'A':
1039 l = ord(l) - ord('A') + 1
1039 l = ord(l) - ord('A') + 1
1040 else:
1040 else:
1041 l = ord(l) - ord('a') + 27
1041 l = ord(l) - ord('a') + 27
1042 dec.append(base85.b85decode(line[1:-1])[:l])
1042 dec.append(base85.b85decode(line[1:-1])[:l])
1043 line = lr.readline()
1043 line = lr.readline()
1044 self.hunk.append(line)
1044 self.hunk.append(line)
1045 text = zlib.decompress(''.join(dec))
1045 text = zlib.decompress(''.join(dec))
1046 if len(text) != size:
1046 if len(text) != size:
1047 raise PatchError(_('binary patch is %d bytes, not %d') %
1047 raise PatchError(_('binary patch is %d bytes, not %d') %
1048 len(text), size)
1048 len(text), size)
1049 self.text = text
1049 self.text = text
1050
1050
1051 def parsefilename(str):
1051 def parsefilename(str):
1052 # --- filename \t|space stuff
1052 # --- filename \t|space stuff
1053 s = str[4:].rstrip('\r\n')
1053 s = str[4:].rstrip('\r\n')
1054 i = s.find('\t')
1054 i = s.find('\t')
1055 if i < 0:
1055 if i < 0:
1056 i = s.find(' ')
1056 i = s.find(' ')
1057 if i < 0:
1057 if i < 0:
1058 return s
1058 return s
1059 return s[:i]
1059 return s[:i]
1060
1060
1061 def pathstrip(path, strip):
1061 def pathstrip(path, strip):
1062 pathlen = len(path)
1062 pathlen = len(path)
1063 i = 0
1063 i = 0
1064 if strip == 0:
1064 if strip == 0:
1065 return '', path.rstrip()
1065 return '', path.rstrip()
1066 count = strip
1066 count = strip
1067 while count > 0:
1067 while count > 0:
1068 i = path.find('/', i)
1068 i = path.find('/', i)
1069 if i == -1:
1069 if i == -1:
1070 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1070 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1071 (count, strip, path))
1071 (count, strip, path))
1072 i += 1
1072 i += 1
1073 # consume '//' in the path
1073 # consume '//' in the path
1074 while i < pathlen - 1 and path[i] == '/':
1074 while i < pathlen - 1 and path[i] == '/':
1075 i += 1
1075 i += 1
1076 count -= 1
1076 count -= 1
1077 return path[:i].lstrip(), path[i:].rstrip()
1077 return path[:i].lstrip(), path[i:].rstrip()
1078
1078
1079 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1079 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1080 nulla = afile_orig == "/dev/null"
1080 nulla = afile_orig == "/dev/null"
1081 nullb = bfile_orig == "/dev/null"
1081 nullb = bfile_orig == "/dev/null"
1082 create = nulla and hunk.starta == 0 and hunk.lena == 0
1082 create = nulla and hunk.starta == 0 and hunk.lena == 0
1083 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1083 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1084 abase, afile = pathstrip(afile_orig, strip)
1084 abase, afile = pathstrip(afile_orig, strip)
1085 gooda = not nulla and backend.exists(afile)
1085 gooda = not nulla and backend.exists(afile)
1086 bbase, bfile = pathstrip(bfile_orig, strip)
1086 bbase, bfile = pathstrip(bfile_orig, strip)
1087 if afile == bfile:
1087 if afile == bfile:
1088 goodb = gooda
1088 goodb = gooda
1089 else:
1089 else:
1090 goodb = not nullb and backend.exists(bfile)
1090 goodb = not nullb and backend.exists(bfile)
1091 missing = not goodb and not gooda and not create
1091 missing = not goodb and not gooda and not create
1092
1092
1093 # some diff programs apparently produce patches where the afile is
1093 # some diff programs apparently produce patches where the afile is
1094 # not /dev/null, but afile starts with bfile
1094 # not /dev/null, but afile starts with bfile
1095 abasedir = afile[:afile.rfind('/') + 1]
1095 abasedir = afile[:afile.rfind('/') + 1]
1096 bbasedir = bfile[:bfile.rfind('/') + 1]
1096 bbasedir = bfile[:bfile.rfind('/') + 1]
1097 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1097 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1098 and hunk.starta == 0 and hunk.lena == 0):
1098 and hunk.starta == 0 and hunk.lena == 0):
1099 create = True
1099 create = True
1100 missing = False
1100 missing = False
1101
1101
1102 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1102 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1103 # diff is between a file and its backup. In this case, the original
1103 # diff is between a file and its backup. In this case, the original
1104 # file should be patched (see original mpatch code).
1104 # file should be patched (see original mpatch code).
1105 isbackup = (abase == bbase and bfile.startswith(afile))
1105 isbackup = (abase == bbase and bfile.startswith(afile))
1106 fname = None
1106 fname = None
1107 if not missing:
1107 if not missing:
1108 if gooda and goodb:
1108 if gooda and goodb:
1109 fname = isbackup and afile or bfile
1109 fname = isbackup and afile or bfile
1110 elif gooda:
1110 elif gooda:
1111 fname = afile
1111 fname = afile
1112
1112
1113 if not fname:
1113 if not fname:
1114 if not nullb:
1114 if not nullb:
1115 fname = isbackup and afile or bfile
1115 fname = isbackup and afile or bfile
1116 elif not nulla:
1116 elif not nulla:
1117 fname = afile
1117 fname = afile
1118 else:
1118 else:
1119 raise PatchError(_("undefined source and destination files"))
1119 raise PatchError(_("undefined source and destination files"))
1120
1120
1121 gp = patchmeta(fname)
1121 gp = patchmeta(fname)
1122 if create:
1122 if create:
1123 gp.op = 'ADD'
1123 gp.op = 'ADD'
1124 elif remove:
1124 elif remove:
1125 gp.op = 'DELETE'
1125 gp.op = 'DELETE'
1126 return gp
1126 return gp
1127
1127
1128 def scangitpatch(lr, firstline):
1128 def scangitpatch(lr, firstline):
1129 """
1129 """
1130 Git patches can emit:
1130 Git patches can emit:
1131 - rename a to b
1131 - rename a to b
1132 - change b
1132 - change b
1133 - copy a to c
1133 - copy a to c
1134 - change c
1134 - change c
1135
1135
1136 We cannot apply this sequence as-is, the renamed 'a' could not be
1136 We cannot apply this sequence as-is, the renamed 'a' could not be
1137 found for it would have been renamed already. And we cannot copy
1137 found for it would have been renamed already. And we cannot copy
1138 from 'b' instead because 'b' would have been changed already. So
1138 from 'b' instead because 'b' would have been changed already. So
1139 we scan the git patch for copy and rename commands so we can
1139 we scan the git patch for copy and rename commands so we can
1140 perform the copies ahead of time.
1140 perform the copies ahead of time.
1141 """
1141 """
1142 pos = 0
1142 pos = 0
1143 try:
1143 try:
1144 pos = lr.fp.tell()
1144 pos = lr.fp.tell()
1145 fp = lr.fp
1145 fp = lr.fp
1146 except IOError:
1146 except IOError:
1147 fp = cStringIO.StringIO(lr.fp.read())
1147 fp = cStringIO.StringIO(lr.fp.read())
1148 gitlr = linereader(fp)
1148 gitlr = linereader(fp)
1149 gitlr.push(firstline)
1149 gitlr.push(firstline)
1150 gitpatches = readgitpatch(gitlr)
1150 gitpatches = readgitpatch(gitlr)
1151 fp.seek(pos)
1151 fp.seek(pos)
1152 return gitpatches
1152 return gitpatches
1153
1153
1154 def iterhunks(fp):
1154 def iterhunks(fp):
1155 """Read a patch and yield the following events:
1155 """Read a patch and yield the following events:
1156 - ("file", afile, bfile, firsthunk): select a new target file.
1156 - ("file", afile, bfile, firsthunk): select a new target file.
1157 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1157 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1158 "file" event.
1158 "file" event.
1159 - ("git", gitchanges): current diff is in git format, gitchanges
1159 - ("git", gitchanges): current diff is in git format, gitchanges
1160 maps filenames to gitpatch records. Unique event.
1160 maps filenames to gitpatch records. Unique event.
1161 """
1161 """
1162 afile = ""
1162 afile = ""
1163 bfile = ""
1163 bfile = ""
1164 state = None
1164 state = None
1165 hunknum = 0
1165 hunknum = 0
1166 emitfile = newfile = False
1166 emitfile = newfile = False
1167 gitpatches = None
1167 gitpatches = None
1168
1168
1169 # our states
1169 # our states
1170 BFILE = 1
1170 BFILE = 1
1171 context = None
1171 context = None
1172 lr = linereader(fp)
1172 lr = linereader(fp)
1173
1173
1174 while True:
1174 while True:
1175 x = lr.readline()
1175 x = lr.readline()
1176 if not x:
1176 if not x:
1177 break
1177 break
1178 if state == BFILE and (
1178 if state == BFILE and (
1179 (not context and x[0] == '@')
1179 (not context and x[0] == '@')
1180 or (context is not False and x.startswith('***************'))
1180 or (context is not False and x.startswith('***************'))
1181 or x.startswith('GIT binary patch')):
1181 or x.startswith('GIT binary patch')):
1182 gp = None
1182 gp = None
1183 if (gitpatches and
1183 if (gitpatches and
1184 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1184 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1185 gp = gitpatches.pop()[2]
1185 gp = gitpatches.pop()[2]
1186 if x.startswith('GIT binary patch'):
1186 if x.startswith('GIT binary patch'):
1187 h = binhunk(lr)
1187 h = binhunk(lr)
1188 else:
1188 else:
1189 if context is None and x.startswith('***************'):
1189 if context is None and x.startswith('***************'):
1190 context = True
1190 context = True
1191 h = hunk(x, hunknum + 1, lr, context)
1191 h = hunk(x, hunknum + 1, lr, context)
1192 hunknum += 1
1192 hunknum += 1
1193 if emitfile:
1193 if emitfile:
1194 emitfile = False
1194 emitfile = False
1195 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1195 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1196 yield 'hunk', h
1196 yield 'hunk', h
1197 elif x.startswith('diff --git'):
1197 elif x.startswith('diff --git'):
1198 m = gitre.match(x)
1198 m = gitre.match(x)
1199 if not m:
1199 if not m:
1200 continue
1200 continue
1201 if not gitpatches:
1201 if not gitpatches:
1202 # scan whole input for git metadata
1202 # scan whole input for git metadata
1203 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1203 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1204 in scangitpatch(lr, x)]
1204 in scangitpatch(lr, x)]
1205 yield 'git', [g[2].copy() for g in gitpatches
1205 yield 'git', [g[2].copy() for g in gitpatches
1206 if g[2].op in ('COPY', 'RENAME')]
1206 if g[2].op in ('COPY', 'RENAME')]
1207 gitpatches.reverse()
1207 gitpatches.reverse()
1208 afile = 'a/' + m.group(1)
1208 afile = 'a/' + m.group(1)
1209 bfile = 'b/' + m.group(2)
1209 bfile = 'b/' + m.group(2)
1210 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1210 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1211 gp = gitpatches.pop()[2]
1211 gp = gitpatches.pop()[2]
1212 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1212 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1213 gp = gitpatches[-1][2]
1213 gp = gitpatches[-1][2]
1214 # copy/rename + modify should modify target, not source
1214 # copy/rename + modify should modify target, not source
1215 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1215 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1216 afile = bfile
1216 afile = bfile
1217 newfile = True
1217 newfile = True
1218 elif x.startswith('---'):
1218 elif x.startswith('---'):
1219 # check for a unified diff
1219 # check for a unified diff
1220 l2 = lr.readline()
1220 l2 = lr.readline()
1221 if not l2.startswith('+++'):
1221 if not l2.startswith('+++'):
1222 lr.push(l2)
1222 lr.push(l2)
1223 continue
1223 continue
1224 newfile = True
1224 newfile = True
1225 context = False
1225 context = False
1226 afile = parsefilename(x)
1226 afile = parsefilename(x)
1227 bfile = parsefilename(l2)
1227 bfile = parsefilename(l2)
1228 elif x.startswith('***'):
1228 elif x.startswith('***'):
1229 # check for a context diff
1229 # check for a context diff
1230 l2 = lr.readline()
1230 l2 = lr.readline()
1231 if not l2.startswith('---'):
1231 if not l2.startswith('---'):
1232 lr.push(l2)
1232 lr.push(l2)
1233 continue
1233 continue
1234 l3 = lr.readline()
1234 l3 = lr.readline()
1235 lr.push(l3)
1235 lr.push(l3)
1236 if not l3.startswith("***************"):
1236 if not l3.startswith("***************"):
1237 lr.push(l2)
1237 lr.push(l2)
1238 continue
1238 continue
1239 newfile = True
1239 newfile = True
1240 context = True
1240 context = True
1241 afile = parsefilename(x)
1241 afile = parsefilename(x)
1242 bfile = parsefilename(l2)
1242 bfile = parsefilename(l2)
1243
1243
1244 if newfile:
1244 if newfile:
1245 newfile = False
1245 newfile = False
1246 emitfile = True
1246 emitfile = True
1247 state = BFILE
1247 state = BFILE
1248 hunknum = 0
1248 hunknum = 0
1249
1249
1250 while gitpatches:
1250 while gitpatches:
1251 gp = gitpatches.pop()[2]
1251 gp = gitpatches.pop()[2]
1252 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1252 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1253
1253
1254 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1254 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1255 """Reads a patch from fp and tries to apply it.
1255 """Reads a patch from fp and tries to apply it.
1256
1256
1257 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1257 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1258 there was any fuzz.
1258 there was any fuzz.
1259
1259
1260 If 'eolmode' is 'strict', the patch content and patched file are
1260 If 'eolmode' is 'strict', the patch content and patched file are
1261 read in binary mode. Otherwise, line endings are ignored when
1261 read in binary mode. Otherwise, line endings are ignored when
1262 patching then normalized according to 'eolmode'.
1262 patching then normalized according to 'eolmode'.
1263 """
1263 """
1264 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1264 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1265 eolmode=eolmode)
1265 eolmode=eolmode)
1266
1266
1267 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1267 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1268 eolmode='strict'):
1268 eolmode='strict'):
1269
1269
1270 def pstrip(p):
1270 def pstrip(p):
1271 return pathstrip(p, strip - 1)[1]
1271 return pathstrip(p, strip - 1)[1]
1272
1272
1273 rejects = 0
1273 rejects = 0
1274 err = 0
1274 err = 0
1275 current_file = None
1275 current_file = None
1276
1276
1277 for state, values in iterhunks(fp):
1277 for state, values in iterhunks(fp):
1278 if state == 'hunk':
1278 if state == 'hunk':
1279 if not current_file:
1279 if not current_file:
1280 continue
1280 continue
1281 ret = current_file.apply(values)
1281 ret = current_file.apply(values)
1282 if ret > 0:
1282 if ret > 0:
1283 err = 1
1283 err = 1
1284 elif state == 'file':
1284 elif state == 'file':
1285 if current_file:
1285 if current_file:
1286 rejects += current_file.close()
1286 rejects += current_file.close()
1287 current_file = None
1287 current_file = None
1288 afile, bfile, first_hunk, gp = values
1288 afile, bfile, first_hunk, gp = values
1289 if gp:
1289 if gp:
1290 path = pstrip(gp.path)
1290 path = pstrip(gp.path)
1291 gp.path = pstrip(gp.path)
1291 gp.path = pstrip(gp.path)
1292 if gp.oldpath:
1292 if gp.oldpath:
1293 gp.oldpath = pstrip(gp.oldpath)
1293 gp.oldpath = pstrip(gp.oldpath)
1294 else:
1294 else:
1295 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1295 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1296 if gp.op == 'RENAME':
1296 if gp.op == 'RENAME':
1297 backend.unlink(gp.oldpath)
1297 backend.unlink(gp.oldpath)
1298 if not first_hunk:
1298 if not first_hunk:
1299 if gp.op == 'DELETE':
1299 if gp.op == 'DELETE':
1300 backend.unlink(gp.path)
1300 backend.unlink(gp.path)
1301 continue
1301 continue
1302 data, mode = None, None
1302 data, mode = None, None
1303 if gp.op in ('RENAME', 'COPY'):
1303 if gp.op in ('RENAME', 'COPY'):
1304 data, mode = store.getfile(gp.oldpath)[:2]
1304 data, mode = store.getfile(gp.oldpath)[:2]
1305 if gp.mode:
1305 if gp.mode:
1306 mode = gp.mode
1306 mode = gp.mode
1307 if gp.op == 'ADD':
1307 if gp.op == 'ADD':
1308 # Added files without content have no hunk and
1308 # Added files without content have no hunk and
1309 # must be created
1309 # must be created
1310 data = ''
1310 data = ''
1311 if data or mode:
1311 if data or mode:
1312 if (gp.op in ('ADD', 'RENAME', 'COPY')
1312 if (gp.op in ('ADD', 'RENAME', 'COPY')
1313 and backend.exists(gp.path)):
1313 and backend.exists(gp.path)):
1314 raise PatchError(_("cannot create %s: destination "
1314 raise PatchError(_("cannot create %s: destination "
1315 "already exists") % gp.path)
1315 "already exists") % gp.path)
1316 backend.setfile(gp.path, data, mode, gp.oldpath)
1316 backend.setfile(gp.path, data, mode, gp.oldpath)
1317 continue
1317 continue
1318 try:
1318 try:
1319 current_file = patcher(ui, gp, backend, store,
1319 current_file = patcher(ui, gp, backend, store,
1320 eolmode=eolmode)
1320 eolmode=eolmode)
1321 except PatchError, inst:
1321 except PatchError, inst:
1322 ui.warn(str(inst) + '\n')
1322 ui.warn(str(inst) + '\n')
1323 current_file = None
1323 current_file = None
1324 rejects += 1
1324 rejects += 1
1325 continue
1325 continue
1326 elif state == 'git':
1326 elif state == 'git':
1327 for gp in values:
1327 for gp in values:
1328 path = pstrip(gp.oldpath)
1328 path = pstrip(gp.oldpath)
1329 data, mode = backend.getfile(path)
1329 data, mode = backend.getfile(path)
1330 store.setfile(path, data, mode)
1330 store.setfile(path, data, mode)
1331 else:
1331 else:
1332 raise util.Abort(_('unsupported parser state: %s') % state)
1332 raise util.Abort(_('unsupported parser state: %s') % state)
1333
1333
1334 if current_file:
1334 if current_file:
1335 rejects += current_file.close()
1335 rejects += current_file.close()
1336
1336
1337 if rejects:
1337 if rejects:
1338 return -1
1338 return -1
1339 return err
1339 return err
1340
1340
1341 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1341 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1342 similarity):
1342 similarity):
1343 """use <patcher> to apply <patchname> to the working directory.
1343 """use <patcher> to apply <patchname> to the working directory.
1344 returns whether patch was applied with fuzz factor."""
1344 returns whether patch was applied with fuzz factor."""
1345
1345
1346 fuzz = False
1346 fuzz = False
1347 args = []
1347 args = []
1348 cwd = repo.root
1348 cwd = repo.root
1349 if cwd:
1349 if cwd:
1350 args.append('-d %s' % util.shellquote(cwd))
1350 args.append('-d %s' % util.shellquote(cwd))
1351 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1351 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1352 util.shellquote(patchname)))
1352 util.shellquote(patchname)))
1353 try:
1353 try:
1354 for line in fp:
1354 for line in fp:
1355 line = line.rstrip()
1355 line = line.rstrip()
1356 ui.note(line + '\n')
1356 ui.note(line + '\n')
1357 if line.startswith('patching file '):
1357 if line.startswith('patching file '):
1358 pf = util.parsepatchoutput(line)
1358 pf = util.parsepatchoutput(line)
1359 printed_file = False
1359 printed_file = False
1360 files.add(pf)
1360 files.add(pf)
1361 elif line.find('with fuzz') >= 0:
1361 elif line.find('with fuzz') >= 0:
1362 fuzz = True
1362 fuzz = True
1363 if not printed_file:
1363 if not printed_file:
1364 ui.warn(pf + '\n')
1364 ui.warn(pf + '\n')
1365 printed_file = True
1365 printed_file = True
1366 ui.warn(line + '\n')
1366 ui.warn(line + '\n')
1367 elif line.find('saving rejects to file') >= 0:
1367 elif line.find('saving rejects to file') >= 0:
1368 ui.warn(line + '\n')
1368 ui.warn(line + '\n')
1369 elif line.find('FAILED') >= 0:
1369 elif line.find('FAILED') >= 0:
1370 if not printed_file:
1370 if not printed_file:
1371 ui.warn(pf + '\n')
1371 ui.warn(pf + '\n')
1372 printed_file = True
1372 printed_file = True
1373 ui.warn(line + '\n')
1373 ui.warn(line + '\n')
1374 finally:
1374 finally:
1375 if files:
1375 if files:
1376 cfiles = list(files)
1376 cfiles = list(files)
1377 cwd = repo.getcwd()
1377 cwd = repo.getcwd()
1378 if cwd:
1378 if cwd:
1379 cfiles = [util.pathto(repo.root, cwd, f)
1379 cfiles = [util.pathto(repo.root, cwd, f)
1380 for f in cfiles]
1380 for f in cfiles]
1381 scmutil.addremove(repo, cfiles, similarity=similarity)
1381 scmutil.addremove(repo, cfiles, similarity=similarity)
1382 code = fp.close()
1382 code = fp.close()
1383 if code:
1383 if code:
1384 raise PatchError(_("patch command failed: %s") %
1384 raise PatchError(_("patch command failed: %s") %
1385 util.explainexit(code)[0])
1385 util.explainexit(code)[0])
1386 return fuzz
1386 return fuzz
1387
1387
1388 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1388 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1389 if files is None:
1389 if files is None:
1390 files = set()
1390 files = set()
1391 if eolmode is None:
1391 if eolmode is None:
1392 eolmode = ui.config('patch', 'eol', 'strict')
1392 eolmode = ui.config('patch', 'eol', 'strict')
1393 if eolmode.lower() not in eolmodes:
1393 if eolmode.lower() not in eolmodes:
1394 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1394 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1395 eolmode = eolmode.lower()
1395 eolmode = eolmode.lower()
1396
1396
1397 store = filestore()
1397 store = filestore()
1398 try:
1398 try:
1399 fp = open(patchobj, 'rb')
1399 fp = open(patchobj, 'rb')
1400 except TypeError:
1400 except TypeError:
1401 fp = patchobj
1401 fp = patchobj
1402 try:
1402 try:
1403 ret = applydiff(ui, fp, backend, store, strip=strip,
1403 ret = applydiff(ui, fp, backend, store, strip=strip,
1404 eolmode=eolmode)
1404 eolmode=eolmode)
1405 finally:
1405 finally:
1406 if fp != patchobj:
1406 if fp != patchobj:
1407 fp.close()
1407 fp.close()
1408 files.update(backend.close())
1408 files.update(backend.close())
1409 store.close()
1409 store.close()
1410 if ret < 0:
1410 if ret < 0:
1411 raise PatchError(_('patch failed to apply'))
1411 raise PatchError(_('patch failed to apply'))
1412 return ret > 0
1412 return ret > 0
1413
1413
1414 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1414 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1415 similarity=0):
1415 similarity=0):
1416 """use builtin patch to apply <patchobj> to the working directory.
1416 """use builtin patch to apply <patchobj> to the working directory.
1417 returns whether patch was applied with fuzz factor."""
1417 returns whether patch was applied with fuzz factor."""
1418 backend = workingbackend(ui, repo, similarity)
1418 backend = workingbackend(ui, repo, similarity)
1419 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1419 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1420
1420
1421 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1421 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1422 eolmode='strict'):
1422 eolmode='strict'):
1423 backend = repobackend(ui, repo, ctx, store)
1423 backend = repobackend(ui, repo, ctx, store)
1424 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1424 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1425
1425
1426 def makememctx(repo, parents, text, user, date, branch, files, store,
1426 def makememctx(repo, parents, text, user, date, branch, files, store,
1427 editor=None):
1427 editor=None):
1428 def getfilectx(repo, memctx, path):
1428 def getfilectx(repo, memctx, path):
1429 data, (islink, isexec), copied = store.getfile(path)
1429 data, (islink, isexec), copied = store.getfile(path)
1430 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1430 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1431 copied=copied)
1431 copied=copied)
1432 extra = {}
1432 extra = {}
1433 if branch:
1433 if branch:
1434 extra['branch'] = encoding.fromlocal(branch)
1434 extra['branch'] = encoding.fromlocal(branch)
1435 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1435 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1436 date, extra)
1436 date, extra)
1437 if editor:
1437 if editor:
1438 ctx._text = editor(repo, ctx, [])
1438 ctx._text = editor(repo, ctx, [])
1439 return ctx
1439 return ctx
1440
1440
1441 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1441 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1442 similarity=0):
1442 similarity=0):
1443 """Apply <patchname> to the working directory.
1443 """Apply <patchname> to the working directory.
1444
1444
1445 'eolmode' specifies how end of lines should be handled. It can be:
1445 'eolmode' specifies how end of lines should be handled. It can be:
1446 - 'strict': inputs are read in binary mode, EOLs are preserved
1446 - 'strict': inputs are read in binary mode, EOLs are preserved
1447 - 'crlf': EOLs are ignored when patching and reset to CRLF
1447 - 'crlf': EOLs are ignored when patching and reset to CRLF
1448 - 'lf': EOLs are ignored when patching and reset to LF
1448 - 'lf': EOLs are ignored when patching and reset to LF
1449 - None: get it from user settings, default to 'strict'
1449 - None: get it from user settings, default to 'strict'
1450 'eolmode' is ignored when using an external patcher program.
1450 'eolmode' is ignored when using an external patcher program.
1451
1451
1452 Returns whether patch was applied with fuzz factor.
1452 Returns whether patch was applied with fuzz factor.
1453 """
1453 """
1454 patcher = ui.config('ui', 'patch')
1454 patcher = ui.config('ui', 'patch')
1455 if files is None:
1455 if files is None:
1456 files = set()
1456 files = set()
1457 try:
1457 try:
1458 if patcher:
1458 if patcher:
1459 return _externalpatch(ui, repo, patcher, patchname, strip,
1459 return _externalpatch(ui, repo, patcher, patchname, strip,
1460 files, similarity)
1460 files, similarity)
1461 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1461 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1462 similarity)
1462 similarity)
1463 except PatchError, err:
1463 except PatchError, err:
1464 raise util.Abort(str(err))
1464 raise util.Abort(str(err))
1465
1465
1466 def changedfiles(ui, repo, patchpath, strip=1):
1466 def changedfiles(ui, repo, patchpath, strip=1):
1467 backend = fsbackend(ui, repo.root)
1467 backend = fsbackend(ui, repo.root)
1468 fp = open(patchpath, 'rb')
1468 fp = open(patchpath, 'rb')
1469 try:
1469 try:
1470 changed = set()
1470 changed = set()
1471 for state, values in iterhunks(fp):
1471 for state, values in iterhunks(fp):
1472 if state == 'file':
1472 if state == 'file':
1473 afile, bfile, first_hunk, gp = values
1473 afile, bfile, first_hunk, gp = values
1474 if gp:
1474 if gp:
1475 gp.path = pathstrip(gp.path, strip - 1)[1]
1475 gp.path = pathstrip(gp.path, strip - 1)[1]
1476 if gp.oldpath:
1476 if gp.oldpath:
1477 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1477 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1478 else:
1478 else:
1479 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1479 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1480 changed.add(gp.path)
1480 changed.add(gp.path)
1481 if gp.op == 'RENAME':
1481 if gp.op == 'RENAME':
1482 changed.add(gp.oldpath)
1482 changed.add(gp.oldpath)
1483 elif state not in ('hunk', 'git'):
1483 elif state not in ('hunk', 'git'):
1484 raise util.Abort(_('unsupported parser state: %s') % state)
1484 raise util.Abort(_('unsupported parser state: %s') % state)
1485 return changed
1485 return changed
1486 finally:
1486 finally:
1487 fp.close()
1487 fp.close()
1488
1488
1489 def b85diff(to, tn):
1489 def b85diff(to, tn):
1490 '''print base85-encoded binary diff'''
1490 '''print base85-encoded binary diff'''
1491 def gitindex(text):
1491 def gitindex(text):
1492 if not text:
1492 if not text:
1493 return hex(nullid)
1493 return hex(nullid)
1494 l = len(text)
1494 l = len(text)
1495 s = util.sha1('blob %d\0' % l)
1495 s = util.sha1('blob %d\0' % l)
1496 s.update(text)
1496 s.update(text)
1497 return s.hexdigest()
1497 return s.hexdigest()
1498
1498
1499 def fmtline(line):
1499 def fmtline(line):
1500 l = len(line)
1500 l = len(line)
1501 if l <= 26:
1501 if l <= 26:
1502 l = chr(ord('A') + l - 1)
1502 l = chr(ord('A') + l - 1)
1503 else:
1503 else:
1504 l = chr(l - 26 + ord('a') - 1)
1504 l = chr(l - 26 + ord('a') - 1)
1505 return '%c%s\n' % (l, base85.b85encode(line, True))
1505 return '%c%s\n' % (l, base85.b85encode(line, True))
1506
1506
1507 def chunk(text, csize=52):
1507 def chunk(text, csize=52):
1508 l = len(text)
1508 l = len(text)
1509 i = 0
1509 i = 0
1510 while i < l:
1510 while i < l:
1511 yield text[i:i + csize]
1511 yield text[i:i + csize]
1512 i += csize
1512 i += csize
1513
1513
1514 tohash = gitindex(to)
1514 tohash = gitindex(to)
1515 tnhash = gitindex(tn)
1515 tnhash = gitindex(tn)
1516 if tohash == tnhash:
1516 if tohash == tnhash:
1517 return ""
1517 return ""
1518
1518
1519 # TODO: deltas
1519 # TODO: deltas
1520 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1520 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1521 (tohash, tnhash, len(tn))]
1521 (tohash, tnhash, len(tn))]
1522 for l in chunk(zlib.compress(tn)):
1522 for l in chunk(zlib.compress(tn)):
1523 ret.append(fmtline(l))
1523 ret.append(fmtline(l))
1524 ret.append('\n')
1524 ret.append('\n')
1525 return ''.join(ret)
1525 return ''.join(ret)
1526
1526
1527 class GitDiffRequired(Exception):
1527 class GitDiffRequired(Exception):
1528 pass
1528 pass
1529
1529
1530 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1530 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1531 def get(key, name=None, getter=ui.configbool):
1531 def get(key, name=None, getter=ui.configbool):
1532 return ((opts and opts.get(key)) or
1532 return ((opts and opts.get(key)) or
1533 getter(section, name or key, None, untrusted=untrusted))
1533 getter(section, name or key, None, untrusted=untrusted))
1534 return mdiff.diffopts(
1534 return mdiff.diffopts(
1535 text=opts and opts.get('text'),
1535 text=opts and opts.get('text'),
1536 git=get('git'),
1536 git=get('git'),
1537 nodates=get('nodates'),
1537 nodates=get('nodates'),
1538 showfunc=get('show_function', 'showfunc'),
1538 showfunc=get('show_function', 'showfunc'),
1539 ignorews=get('ignore_all_space', 'ignorews'),
1539 ignorews=get('ignore_all_space', 'ignorews'),
1540 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1540 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1541 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1541 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1542 context=get('unified', getter=ui.config))
1542 context=get('unified', getter=ui.config))
1543
1543
1544 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1544 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1545 losedatafn=None, prefix=''):
1545 losedatafn=None, prefix=''):
1546 '''yields diff of changes to files between two nodes, or node and
1546 '''yields diff of changes to files between two nodes, or node and
1547 working directory.
1547 working directory.
1548
1548
1549 if node1 is None, use first dirstate parent instead.
1549 if node1 is None, use first dirstate parent instead.
1550 if node2 is None, compare node1 with working directory.
1550 if node2 is None, compare node1 with working directory.
1551
1551
1552 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1552 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1553 every time some change cannot be represented with the current
1553 every time some change cannot be represented with the current
1554 patch format. Return False to upgrade to git patch format, True to
1554 patch format. Return False to upgrade to git patch format, True to
1555 accept the loss or raise an exception to abort the diff. It is
1555 accept the loss or raise an exception to abort the diff. It is
1556 called with the name of current file being diffed as 'fn'. If set
1556 called with the name of current file being diffed as 'fn'. If set
1557 to None, patches will always be upgraded to git format when
1557 to None, patches will always be upgraded to git format when
1558 necessary.
1558 necessary.
1559
1559
1560 prefix is a filename prefix that is prepended to all filenames on
1560 prefix is a filename prefix that is prepended to all filenames on
1561 display (used for subrepos).
1561 display (used for subrepos).
1562 '''
1562 '''
1563
1563
1564 if opts is None:
1564 if opts is None:
1565 opts = mdiff.defaultopts
1565 opts = mdiff.defaultopts
1566
1566
1567 if not node1 and not node2:
1567 if not node1 and not node2:
1568 node1 = repo.dirstate.p1()
1568 node1 = repo.dirstate.p1()
1569
1569
1570 def lrugetfilectx():
1570 def lrugetfilectx():
1571 cache = {}
1571 cache = {}
1572 order = []
1572 order = []
1573 def getfilectx(f, ctx):
1573 def getfilectx(f, ctx):
1574 fctx = ctx.filectx(f, filelog=cache.get(f))
1574 fctx = ctx.filectx(f, filelog=cache.get(f))
1575 if f not in cache:
1575 if f not in cache:
1576 if len(cache) > 20:
1576 if len(cache) > 20:
1577 del cache[order.pop(0)]
1577 del cache[order.pop(0)]
1578 cache[f] = fctx.filelog()
1578 cache[f] = fctx.filelog()
1579 else:
1579 else:
1580 order.remove(f)
1580 order.remove(f)
1581 order.append(f)
1581 order.append(f)
1582 return fctx
1582 return fctx
1583 return getfilectx
1583 return getfilectx
1584 getfilectx = lrugetfilectx()
1584 getfilectx = lrugetfilectx()
1585
1585
1586 ctx1 = repo[node1]
1586 ctx1 = repo[node1]
1587 ctx2 = repo[node2]
1587 ctx2 = repo[node2]
1588
1588
1589 if not changes:
1589 if not changes:
1590 changes = repo.status(ctx1, ctx2, match=match)
1590 changes = repo.status(ctx1, ctx2, match=match)
1591 modified, added, removed = changes[:3]
1591 modified, added, removed = changes[:3]
1592
1592
1593 if not modified and not added and not removed:
1593 if not modified and not added and not removed:
1594 return []
1594 return []
1595
1595
1596 revs = None
1596 revs = None
1597 if not repo.ui.quiet:
1597 if not repo.ui.quiet:
1598 hexfunc = repo.ui.debugflag and hex or short
1598 hexfunc = repo.ui.debugflag and hex or short
1599 revs = [hexfunc(node) for node in [node1, node2] if node]
1599 revs = [hexfunc(node) for node in [node1, node2] if node]
1600
1600
1601 copy = {}
1601 copy = {}
1602 if opts.git or opts.upgrade:
1602 if opts.git or opts.upgrade:
1603 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1603 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1604
1604
1605 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1605 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1606 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1606 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1607 if opts.upgrade and not opts.git:
1607 if opts.upgrade and not opts.git:
1608 try:
1608 try:
1609 def losedata(fn):
1609 def losedata(fn):
1610 if not losedatafn or not losedatafn(fn=fn):
1610 if not losedatafn or not losedatafn(fn=fn):
1611 raise GitDiffRequired()
1611 raise GitDiffRequired()
1612 # Buffer the whole output until we are sure it can be generated
1612 # Buffer the whole output until we are sure it can be generated
1613 return list(difffn(opts.copy(git=False), losedata))
1613 return list(difffn(opts.copy(git=False), losedata))
1614 except GitDiffRequired:
1614 except GitDiffRequired:
1615 return difffn(opts.copy(git=True), None)
1615 return difffn(opts.copy(git=True), None)
1616 else:
1616 else:
1617 return difffn(opts, None)
1617 return difffn(opts, None)
1618
1618
1619 def difflabel(func, *args, **kw):
1619 def difflabel(func, *args, **kw):
1620 '''yields 2-tuples of (output, label) based on the output of func()'''
1620 '''yields 2-tuples of (output, label) based on the output of func()'''
1621 headprefixes = [('diff', 'diff.diffline'),
1621 headprefixes = [('diff', 'diff.diffline'),
1622 ('copy', 'diff.extended'),
1622 ('copy', 'diff.extended'),
1623 ('rename', 'diff.extended'),
1623 ('rename', 'diff.extended'),
1624 ('old', 'diff.extended'),
1624 ('old', 'diff.extended'),
1625 ('new', 'diff.extended'),
1625 ('new', 'diff.extended'),
1626 ('deleted', 'diff.extended'),
1626 ('deleted', 'diff.extended'),
1627 ('---', 'diff.file_a'),
1627 ('---', 'diff.file_a'),
1628 ('+++', 'diff.file_b')]
1628 ('+++', 'diff.file_b')]
1629 textprefixes = [('@', 'diff.hunk'),
1629 textprefixes = [('@', 'diff.hunk'),
1630 ('-', 'diff.deleted'),
1630 ('-', 'diff.deleted'),
1631 ('+', 'diff.inserted')]
1631 ('+', 'diff.inserted')]
1632 head = False
1632 head = False
1633 for chunk in func(*args, **kw):
1633 for chunk in func(*args, **kw):
1634 lines = chunk.split('\n')
1634 lines = chunk.split('\n')
1635 for i, line in enumerate(lines):
1635 for i, line in enumerate(lines):
1636 if i != 0:
1636 if i != 0:
1637 yield ('\n', '')
1637 yield ('\n', '')
1638 if head:
1638 if head:
1639 if line.startswith('@'):
1639 if line.startswith('@'):
1640 head = False
1640 head = False
1641 else:
1641 else:
1642 if line and not line[0] in ' +-@':
1642 if line and not line[0] in ' +-@\\':
1643 head = True
1643 head = True
1644 stripline = line
1644 stripline = line
1645 if not head and line and line[0] in '+-':
1645 if not head and line and line[0] in '+-':
1646 # highlight trailing whitespace, but only in changed lines
1646 # highlight trailing whitespace, but only in changed lines
1647 stripline = line.rstrip()
1647 stripline = line.rstrip()
1648 prefixes = textprefixes
1648 prefixes = textprefixes
1649 if head:
1649 if head:
1650 prefixes = headprefixes
1650 prefixes = headprefixes
1651 for prefix, label in prefixes:
1651 for prefix, label in prefixes:
1652 if stripline.startswith(prefix):
1652 if stripline.startswith(prefix):
1653 yield (stripline, label)
1653 yield (stripline, label)
1654 break
1654 break
1655 else:
1655 else:
1656 yield (line, '')
1656 yield (line, '')
1657 if line != stripline:
1657 if line != stripline:
1658 yield (line[len(stripline):], 'diff.trailingwhitespace')
1658 yield (line[len(stripline):], 'diff.trailingwhitespace')
1659
1659
1660 def diffui(*args, **kw):
1660 def diffui(*args, **kw):
1661 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1661 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1662 return difflabel(diff, *args, **kw)
1662 return difflabel(diff, *args, **kw)
1663
1663
1664
1664
1665 def _addmodehdr(header, omode, nmode):
1665 def _addmodehdr(header, omode, nmode):
1666 if omode != nmode:
1666 if omode != nmode:
1667 header.append('old mode %s\n' % omode)
1667 header.append('old mode %s\n' % omode)
1668 header.append('new mode %s\n' % nmode)
1668 header.append('new mode %s\n' % nmode)
1669
1669
1670 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1670 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1671 copy, getfilectx, opts, losedatafn, prefix):
1671 copy, getfilectx, opts, losedatafn, prefix):
1672
1672
1673 def join(f):
1673 def join(f):
1674 return os.path.join(prefix, f)
1674 return os.path.join(prefix, f)
1675
1675
1676 date1 = util.datestr(ctx1.date())
1676 date1 = util.datestr(ctx1.date())
1677 man1 = ctx1.manifest()
1677 man1 = ctx1.manifest()
1678
1678
1679 gone = set()
1679 gone = set()
1680 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1680 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1681
1681
1682 copyto = dict([(v, k) for k, v in copy.items()])
1682 copyto = dict([(v, k) for k, v in copy.items()])
1683
1683
1684 if opts.git:
1684 if opts.git:
1685 revs = None
1685 revs = None
1686
1686
1687 for f in sorted(modified + added + removed):
1687 for f in sorted(modified + added + removed):
1688 to = None
1688 to = None
1689 tn = None
1689 tn = None
1690 dodiff = True
1690 dodiff = True
1691 header = []
1691 header = []
1692 if f in man1:
1692 if f in man1:
1693 to = getfilectx(f, ctx1).data()
1693 to = getfilectx(f, ctx1).data()
1694 if f not in removed:
1694 if f not in removed:
1695 tn = getfilectx(f, ctx2).data()
1695 tn = getfilectx(f, ctx2).data()
1696 a, b = f, f
1696 a, b = f, f
1697 if opts.git or losedatafn:
1697 if opts.git or losedatafn:
1698 if f in added:
1698 if f in added:
1699 mode = gitmode[ctx2.flags(f)]
1699 mode = gitmode[ctx2.flags(f)]
1700 if f in copy or f in copyto:
1700 if f in copy or f in copyto:
1701 if opts.git:
1701 if opts.git:
1702 if f in copy:
1702 if f in copy:
1703 a = copy[f]
1703 a = copy[f]
1704 else:
1704 else:
1705 a = copyto[f]
1705 a = copyto[f]
1706 omode = gitmode[man1.flags(a)]
1706 omode = gitmode[man1.flags(a)]
1707 _addmodehdr(header, omode, mode)
1707 _addmodehdr(header, omode, mode)
1708 if a in removed and a not in gone:
1708 if a in removed and a not in gone:
1709 op = 'rename'
1709 op = 'rename'
1710 gone.add(a)
1710 gone.add(a)
1711 else:
1711 else:
1712 op = 'copy'
1712 op = 'copy'
1713 header.append('%s from %s\n' % (op, join(a)))
1713 header.append('%s from %s\n' % (op, join(a)))
1714 header.append('%s to %s\n' % (op, join(f)))
1714 header.append('%s to %s\n' % (op, join(f)))
1715 to = getfilectx(a, ctx1).data()
1715 to = getfilectx(a, ctx1).data()
1716 else:
1716 else:
1717 losedatafn(f)
1717 losedatafn(f)
1718 else:
1718 else:
1719 if opts.git:
1719 if opts.git:
1720 header.append('new file mode %s\n' % mode)
1720 header.append('new file mode %s\n' % mode)
1721 elif ctx2.flags(f):
1721 elif ctx2.flags(f):
1722 losedatafn(f)
1722 losedatafn(f)
1723 # In theory, if tn was copied or renamed we should check
1723 # In theory, if tn was copied or renamed we should check
1724 # if the source is binary too but the copy record already
1724 # if the source is binary too but the copy record already
1725 # forces git mode.
1725 # forces git mode.
1726 if util.binary(tn):
1726 if util.binary(tn):
1727 if opts.git:
1727 if opts.git:
1728 dodiff = 'binary'
1728 dodiff = 'binary'
1729 else:
1729 else:
1730 losedatafn(f)
1730 losedatafn(f)
1731 if not opts.git and not tn:
1731 if not opts.git and not tn:
1732 # regular diffs cannot represent new empty file
1732 # regular diffs cannot represent new empty file
1733 losedatafn(f)
1733 losedatafn(f)
1734 elif f in removed:
1734 elif f in removed:
1735 if opts.git:
1735 if opts.git:
1736 # have we already reported a copy above?
1736 # have we already reported a copy above?
1737 if ((f in copy and copy[f] in added
1737 if ((f in copy and copy[f] in added
1738 and copyto[copy[f]] == f) or
1738 and copyto[copy[f]] == f) or
1739 (f in copyto and copyto[f] in added
1739 (f in copyto and copyto[f] in added
1740 and copy[copyto[f]] == f)):
1740 and copy[copyto[f]] == f)):
1741 dodiff = False
1741 dodiff = False
1742 else:
1742 else:
1743 header.append('deleted file mode %s\n' %
1743 header.append('deleted file mode %s\n' %
1744 gitmode[man1.flags(f)])
1744 gitmode[man1.flags(f)])
1745 elif not to or util.binary(to):
1745 elif not to or util.binary(to):
1746 # regular diffs cannot represent empty file deletion
1746 # regular diffs cannot represent empty file deletion
1747 losedatafn(f)
1747 losedatafn(f)
1748 else:
1748 else:
1749 oflag = man1.flags(f)
1749 oflag = man1.flags(f)
1750 nflag = ctx2.flags(f)
1750 nflag = ctx2.flags(f)
1751 binary = util.binary(to) or util.binary(tn)
1751 binary = util.binary(to) or util.binary(tn)
1752 if opts.git:
1752 if opts.git:
1753 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1753 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1754 if binary:
1754 if binary:
1755 dodiff = 'binary'
1755 dodiff = 'binary'
1756 elif binary or nflag != oflag:
1756 elif binary or nflag != oflag:
1757 losedatafn(f)
1757 losedatafn(f)
1758 if opts.git:
1758 if opts.git:
1759 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1759 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1760
1760
1761 if dodiff:
1761 if dodiff:
1762 if dodiff == 'binary':
1762 if dodiff == 'binary':
1763 text = b85diff(to, tn)
1763 text = b85diff(to, tn)
1764 else:
1764 else:
1765 text = mdiff.unidiff(to, date1,
1765 text = mdiff.unidiff(to, date1,
1766 # ctx2 date may be dynamic
1766 # ctx2 date may be dynamic
1767 tn, util.datestr(ctx2.date()),
1767 tn, util.datestr(ctx2.date()),
1768 join(a), join(b), revs, opts=opts)
1768 join(a), join(b), revs, opts=opts)
1769 if header and (text or len(header) > 1):
1769 if header and (text or len(header) > 1):
1770 yield ''.join(header)
1770 yield ''.join(header)
1771 if text:
1771 if text:
1772 yield text
1772 yield text
1773
1773
1774 def diffstatsum(stats):
1774 def diffstatsum(stats):
1775 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1775 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1776 for f, a, r, b in stats:
1776 for f, a, r, b in stats:
1777 maxfile = max(maxfile, encoding.colwidth(f))
1777 maxfile = max(maxfile, encoding.colwidth(f))
1778 maxtotal = max(maxtotal, a + r)
1778 maxtotal = max(maxtotal, a + r)
1779 addtotal += a
1779 addtotal += a
1780 removetotal += r
1780 removetotal += r
1781 binary = binary or b
1781 binary = binary or b
1782
1782
1783 return maxfile, maxtotal, addtotal, removetotal, binary
1783 return maxfile, maxtotal, addtotal, removetotal, binary
1784
1784
1785 def diffstatdata(lines):
1785 def diffstatdata(lines):
1786 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1786 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1787
1787
1788 results = []
1788 results = []
1789 filename, adds, removes, isbinary = None, 0, 0, False
1789 filename, adds, removes, isbinary = None, 0, 0, False
1790
1790
1791 def addresult():
1791 def addresult():
1792 if filename:
1792 if filename:
1793 results.append((filename, adds, removes, isbinary))
1793 results.append((filename, adds, removes, isbinary))
1794
1794
1795 for line in lines:
1795 for line in lines:
1796 if line.startswith('diff'):
1796 if line.startswith('diff'):
1797 addresult()
1797 addresult()
1798 # set numbers to 0 anyway when starting new file
1798 # set numbers to 0 anyway when starting new file
1799 adds, removes, isbinary = 0, 0, False
1799 adds, removes, isbinary = 0, 0, False
1800 if line.startswith('diff --git'):
1800 if line.startswith('diff --git'):
1801 filename = gitre.search(line).group(1)
1801 filename = gitre.search(line).group(1)
1802 elif line.startswith('diff -r'):
1802 elif line.startswith('diff -r'):
1803 # format: "diff -r ... -r ... filename"
1803 # format: "diff -r ... -r ... filename"
1804 filename = diffre.search(line).group(1)
1804 filename = diffre.search(line).group(1)
1805 elif line.startswith('+') and not line.startswith('+++'):
1805 elif line.startswith('+') and not line.startswith('+++'):
1806 adds += 1
1806 adds += 1
1807 elif line.startswith('-') and not line.startswith('---'):
1807 elif line.startswith('-') and not line.startswith('---'):
1808 removes += 1
1808 removes += 1
1809 elif (line.startswith('GIT binary patch') or
1809 elif (line.startswith('GIT binary patch') or
1810 line.startswith('Binary file')):
1810 line.startswith('Binary file')):
1811 isbinary = True
1811 isbinary = True
1812 addresult()
1812 addresult()
1813 return results
1813 return results
1814
1814
1815 def diffstat(lines, width=80, git=False):
1815 def diffstat(lines, width=80, git=False):
1816 output = []
1816 output = []
1817 stats = diffstatdata(lines)
1817 stats = diffstatdata(lines)
1818 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1818 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1819
1819
1820 countwidth = len(str(maxtotal))
1820 countwidth = len(str(maxtotal))
1821 if hasbinary and countwidth < 3:
1821 if hasbinary and countwidth < 3:
1822 countwidth = 3
1822 countwidth = 3
1823 graphwidth = width - countwidth - maxname - 6
1823 graphwidth = width - countwidth - maxname - 6
1824 if graphwidth < 10:
1824 if graphwidth < 10:
1825 graphwidth = 10
1825 graphwidth = 10
1826
1826
1827 def scale(i):
1827 def scale(i):
1828 if maxtotal <= graphwidth:
1828 if maxtotal <= graphwidth:
1829 return i
1829 return i
1830 # If diffstat runs out of room it doesn't print anything,
1830 # If diffstat runs out of room it doesn't print anything,
1831 # which isn't very useful, so always print at least one + or -
1831 # which isn't very useful, so always print at least one + or -
1832 # if there were at least some changes.
1832 # if there were at least some changes.
1833 return max(i * graphwidth // maxtotal, int(bool(i)))
1833 return max(i * graphwidth // maxtotal, int(bool(i)))
1834
1834
1835 for filename, adds, removes, isbinary in stats:
1835 for filename, adds, removes, isbinary in stats:
1836 if isbinary:
1836 if isbinary:
1837 count = 'Bin'
1837 count = 'Bin'
1838 else:
1838 else:
1839 count = adds + removes
1839 count = adds + removes
1840 pluses = '+' * scale(adds)
1840 pluses = '+' * scale(adds)
1841 minuses = '-' * scale(removes)
1841 minuses = '-' * scale(removes)
1842 output.append(' %s%s | %*s %s%s\n' %
1842 output.append(' %s%s | %*s %s%s\n' %
1843 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1843 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1844 countwidth, count, pluses, minuses))
1844 countwidth, count, pluses, minuses))
1845
1845
1846 if stats:
1846 if stats:
1847 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1847 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1848 % (len(stats), totaladds, totalremoves))
1848 % (len(stats), totaladds, totalremoves))
1849
1849
1850 return ''.join(output)
1850 return ''.join(output)
1851
1851
1852 def diffstatui(*args, **kw):
1852 def diffstatui(*args, **kw):
1853 '''like diffstat(), but yields 2-tuples of (output, label) for
1853 '''like diffstat(), but yields 2-tuples of (output, label) for
1854 ui.write()
1854 ui.write()
1855 '''
1855 '''
1856
1856
1857 for line in diffstat(*args, **kw).splitlines():
1857 for line in diffstat(*args, **kw).splitlines():
1858 if line and line[-1] in '+-':
1858 if line and line[-1] in '+-':
1859 name, graph = line.rsplit(' ', 1)
1859 name, graph = line.rsplit(' ', 1)
1860 yield (name + ' ', '')
1860 yield (name + ' ', '')
1861 m = re.search(r'\++', graph)
1861 m = re.search(r'\++', graph)
1862 if m:
1862 if m:
1863 yield (m.group(0), 'diffstat.inserted')
1863 yield (m.group(0), 'diffstat.inserted')
1864 m = re.search(r'-+', graph)
1864 m = re.search(r'-+', graph)
1865 if m:
1865 if m:
1866 yield (m.group(0), 'diffstat.deleted')
1866 yield (m.group(0), 'diffstat.deleted')
1867 else:
1867 else:
1868 yield (line, '')
1868 yield (line, '')
1869 yield ('\n', '')
1869 yield ('\n', '')
@@ -1,232 +1,233 b''
1 $ cat >> $HGRCPATH <<EOF
1 $ cat >> $HGRCPATH <<EOF
2 > [extensions]
2 > [extensions]
3 > largefiles =
3 > largefiles =
4 > share =
4 > share =
5 > graphlog =
5 > graphlog =
6 > [largefiles]
6 > [largefiles]
7 > minsize = 0.5
7 > minsize = 0.5
8 > patterns = **.dat
8 > patterns = **.other
9 > **.dat
9 > EOF
10 > EOF
10
11
11 "lfconvert" works
12 "lfconvert" works
12 $ hg init bigfile-repo
13 $ hg init bigfile-repo
13 $ cd bigfile-repo
14 $ cd bigfile-repo
14 $ cat >> .hg/hgrc <<EOF
15 $ cat >> .hg/hgrc <<EOF
15 > [extensions]
16 > [extensions]
16 > largefiles = !
17 > largefiles = !
17 > EOF
18 > EOF
18 $ mkdir sub
19 $ mkdir sub
19 $ dd if=/dev/zero bs=1k count=256 > large 2> /dev/null
20 $ dd if=/dev/zero bs=1k count=256 > large 2> /dev/null
20 $ echo normal > normal1
21 $ echo normal > normal1
21 $ echo alsonormal > sub/normal2
22 $ echo alsonormal > sub/normal2
22 $ dd if=/dev/zero bs=1k count=10 > sub/maybelarge.dat 2> /dev/null
23 $ dd if=/dev/zero bs=1k count=10 > sub/maybelarge.dat 2> /dev/null
23 $ hg addremove
24 $ hg addremove
24 adding large
25 adding large
25 adding normal1
26 adding normal1
26 adding sub/maybelarge.dat
27 adding sub/maybelarge.dat
27 adding sub/normal2
28 adding sub/normal2
28 $ hg commit -m"add large, normal1" large normal1
29 $ hg commit -m"add large, normal1" large normal1
29 $ hg commit -m"add sub/*" sub
30 $ hg commit -m"add sub/*" sub
30 $ [ -d .hg/largefiles ] && echo fail || echo pass
31 $ [ -d .hg/largefiles ] && echo fail || echo pass
31 pass
32 pass
32 $ cd ..
33 $ cd ..
33 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
34 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
34 initializing destination largefiles-repo
35 initializing destination largefiles-repo
35
36
36 "lfconvert" converts content correctly
37 "lfconvert" converts content correctly
37 $ cd largefiles-repo
38 $ cd largefiles-repo
38 $ hg up
39 $ hg up
39 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 getting changed largefiles
41 getting changed largefiles
41 2 largefiles updated, 0 removed
42 2 largefiles updated, 0 removed
42 $ hg locate
43 $ hg locate
43 .hglf/large
44 .hglf/large
44 .hglf/sub/maybelarge.dat
45 .hglf/sub/maybelarge.dat
45 normal1
46 normal1
46 sub/normal2
47 sub/normal2
47 $ cat normal1
48 $ cat normal1
48 normal
49 normal
49 $ cat sub/normal2
50 $ cat sub/normal2
50 alsonormal
51 alsonormal
51 $ "$TESTDIR/md5sum.py" large sub/maybelarge.dat
52 $ "$TESTDIR/md5sum.py" large sub/maybelarge.dat
52 ec87a838931d4d5d2e94a04644788a55 large
53 ec87a838931d4d5d2e94a04644788a55 large
53 1276481102f218c981e0324180bafd9f sub/maybelarge.dat
54 1276481102f218c981e0324180bafd9f sub/maybelarge.dat
54
55
55 "lfconvert" adds 'largefiles' to .hg/requires.
56 "lfconvert" adds 'largefiles' to .hg/requires.
56 $ cat .hg/requires
57 $ cat .hg/requires
57 largefiles
58 largefiles
58 revlogv1
59 revlogv1
59 fncache
60 fncache
60 store
61 store
61 dotencode
62 dotencode
62
63
63 "lfconvert" includes a newline at the end of the standin files.
64 "lfconvert" includes a newline at the end of the standin files.
64 $ cat .hglf/large .hglf/sub/maybelarge.dat
65 $ cat .hglf/large .hglf/sub/maybelarge.dat
65 2e000fa7e85759c7f4c254d4d9c33ef481e459a7
66 2e000fa7e85759c7f4c254d4d9c33ef481e459a7
66 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
67 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
67 $ cd ..
68 $ cd ..
68
69
69 add some changesets to rename/remove/merge
70 add some changesets to rename/remove/merge
70 $ cd bigfile-repo
71 $ cd bigfile-repo
71 $ hg mv -q sub stuff
72 $ hg mv -q sub stuff
72 $ hg commit -m"rename sub/ to stuff/"
73 $ hg commit -m"rename sub/ to stuff/"
73 $ hg update -q 1
74 $ hg update -q 1
74 $ echo blah >> normal3
75 $ echo blah >> normal3
75 $ echo blah >> sub/normal2
76 $ echo blah >> sub/normal2
76 $ echo blah >> sub/maybelarge.dat
77 $ echo blah >> sub/maybelarge.dat
77 $ "$TESTDIR/md5sum.py" sub/maybelarge.dat
78 $ "$TESTDIR/md5sum.py" sub/maybelarge.dat
78 1dd0b99ff80e19cff409702a1d3f5e15 sub/maybelarge.dat
79 1dd0b99ff80e19cff409702a1d3f5e15 sub/maybelarge.dat
79 $ hg commit -A -m"add normal3, modify sub/*"
80 $ hg commit -A -m"add normal3, modify sub/*"
80 adding normal3
81 adding normal3
81 created new head
82 created new head
82 $ hg rm large normal3
83 $ hg rm large normal3
83 $ hg commit -q -m"remove large, normal3"
84 $ hg commit -q -m"remove large, normal3"
84 $ hg merge
85 $ hg merge
85 merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
86 merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
86 warning: $TESTTMP/bigfile-repo/stuff/maybelarge.dat looks like a binary file. (glob)
87 warning: $TESTTMP/bigfile-repo/stuff/maybelarge.dat looks like a binary file. (glob)
87 merging stuff/maybelarge.dat incomplete! (edit conflicts, then use 'hg resolve --mark')
88 merging stuff/maybelarge.dat incomplete! (edit conflicts, then use 'hg resolve --mark')
88 merging sub/normal2 and stuff/normal2 to stuff/normal2
89 merging sub/normal2 and stuff/normal2 to stuff/normal2
89 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
90 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
90 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
91 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
91 [1]
92 [1]
92 $ hg cat -r . sub/maybelarge.dat > stuff/maybelarge.dat
93 $ hg cat -r . sub/maybelarge.dat > stuff/maybelarge.dat
93 $ hg resolve -m stuff/maybelarge.dat
94 $ hg resolve -m stuff/maybelarge.dat
94 $ hg commit -m"merge"
95 $ hg commit -m"merge"
95 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
96 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
96 @ 5:4884f215abda merge
97 @ 5:4884f215abda merge
97 |\
98 |\
98 | o 4:7285f817b77e remove large, normal3
99 | o 4:7285f817b77e remove large, normal3
99 | |
100 | |
100 | o 3:67e3892e3534 add normal3, modify sub/*
101 | o 3:67e3892e3534 add normal3, modify sub/*
101 | |
102 | |
102 o | 2:c96c8beb5d56 rename sub/ to stuff/
103 o | 2:c96c8beb5d56 rename sub/ to stuff/
103 |/
104 |/
104 o 1:020c65d24e11 add sub/*
105 o 1:020c65d24e11 add sub/*
105 |
106 |
106 o 0:117b8328f97a add large, normal1
107 o 0:117b8328f97a add large, normal1
107
108
108 $ cd ..
109 $ cd ..
109
110
110 lfconvert with rename, merge, and remove
111 lfconvert with rename, merge, and remove
111 $ rm -rf largefiles-repo
112 $ rm -rf largefiles-repo
112 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
113 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
113 initializing destination largefiles-repo
114 initializing destination largefiles-repo
114 $ cd largefiles-repo
115 $ cd largefiles-repo
115 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
116 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
116 o 5:8e05f5f2b77e merge
117 o 5:8e05f5f2b77e merge
117 |\
118 |\
118 | o 4:a5a02de7a8e4 remove large, normal3
119 | o 4:a5a02de7a8e4 remove large, normal3
119 | |
120 | |
120 | o 3:55759520c76f add normal3, modify sub/*
121 | o 3:55759520c76f add normal3, modify sub/*
121 | |
122 | |
122 o | 2:261ad3f3f037 rename sub/ to stuff/
123 o | 2:261ad3f3f037 rename sub/ to stuff/
123 |/
124 |/
124 o 1:334e5237836d add sub/*
125 o 1:334e5237836d add sub/*
125 |
126 |
126 o 0:d4892ec57ce2 add large, normal1
127 o 0:d4892ec57ce2 add large, normal1
127
128
128 $ hg locate -r 2
129 $ hg locate -r 2
129 .hglf/large
130 .hglf/large
130 .hglf/stuff/maybelarge.dat
131 .hglf/stuff/maybelarge.dat
131 normal1
132 normal1
132 stuff/normal2
133 stuff/normal2
133 $ hg locate -r 3
134 $ hg locate -r 3
134 .hglf/large
135 .hglf/large
135 .hglf/sub/maybelarge.dat
136 .hglf/sub/maybelarge.dat
136 normal1
137 normal1
137 normal3
138 normal3
138 sub/normal2
139 sub/normal2
139 $ hg locate -r 4
140 $ hg locate -r 4
140 .hglf/sub/maybelarge.dat
141 .hglf/sub/maybelarge.dat
141 normal1
142 normal1
142 sub/normal2
143 sub/normal2
143 $ hg locate -r 5
144 $ hg locate -r 5
144 .hglf/stuff/maybelarge.dat
145 .hglf/stuff/maybelarge.dat
145 normal1
146 normal1
146 stuff/normal2
147 stuff/normal2
147 $ hg update
148 $ hg update
148 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 getting changed largefiles
150 getting changed largefiles
150 1 largefiles updated, 0 removed
151 1 largefiles updated, 0 removed
151 $ cat stuff/normal2
152 $ cat stuff/normal2
152 alsonormal
153 alsonormal
153 blah
154 blah
154 $ "$TESTDIR/md5sum.py" stuff/maybelarge.dat
155 $ "$TESTDIR/md5sum.py" stuff/maybelarge.dat
155 1dd0b99ff80e19cff409702a1d3f5e15 stuff/maybelarge.dat
156 1dd0b99ff80e19cff409702a1d3f5e15 stuff/maybelarge.dat
156 $ cat .hglf/stuff/maybelarge.dat
157 $ cat .hglf/stuff/maybelarge.dat
157 76236b6a2c6102826c61af4297dd738fb3b1de38
158 76236b6a2c6102826c61af4297dd738fb3b1de38
158 $ cd ..
159 $ cd ..
159
160
160 "lfconvert" error cases
161 "lfconvert" error cases
161 $ hg lfconvert http://localhost/foo foo
162 $ hg lfconvert http://localhost/foo foo
162 abort: http://localhost/foo is not a local Mercurial repo
163 abort: http://localhost/foo is not a local Mercurial repo
163 [255]
164 [255]
164 $ hg lfconvert foo ssh://localhost/foo
165 $ hg lfconvert foo ssh://localhost/foo
165 abort: ssh://localhost/foo is not a local Mercurial repo
166 abort: ssh://localhost/foo is not a local Mercurial repo
166 [255]
167 [255]
167 $ hg lfconvert nosuchrepo foo
168 $ hg lfconvert nosuchrepo foo
168 abort: repository nosuchrepo not found!
169 abort: repository nosuchrepo not found!
169 [255]
170 [255]
170 $ hg share -q -U bigfile-repo shared
171 $ hg share -q -U bigfile-repo shared
171 $ printf 'bogus' > shared/.hg/sharedpath
172 $ printf 'bogus' > shared/.hg/sharedpath
172 $ hg lfconvert shared foo
173 $ hg lfconvert shared foo
173 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/bogus! (glob)
174 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/bogus! (glob)
174 [255]
175 [255]
175 $ hg lfconvert bigfile-repo largefiles-repo
176 $ hg lfconvert bigfile-repo largefiles-repo
176 initializing destination largefiles-repo
177 initializing destination largefiles-repo
177 abort: repository largefiles-repo already exists!
178 abort: repository largefiles-repo already exists!
178 [255]
179 [255]
179
180
180 add another largefile to the new largefiles repo
181 add another largefile to the new largefiles repo
181 $ cd largefiles-repo
182 $ cd largefiles-repo
182 $ dd if=/dev/zero bs=1k count=1k > anotherlarge 2> /dev/null
183 $ dd if=/dev/zero bs=1k count=1k > anotherlarge 2> /dev/null
183 $ hg add --lfsize=1 anotherlarge
184 $ hg add --lfsize=1 anotherlarge
184 $ hg commit -m "add anotherlarge (should be a largefile)"
185 $ hg commit -m "add anotherlarge (should be a largefile)"
185 $ cat .hglf/anotherlarge
186 $ cat .hglf/anotherlarge
186 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
187 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
187 $ cd ..
188 $ cd ..
188
189
189 round-trip: converting back to a normal (non-largefiles) repo with
190 round-trip: converting back to a normal (non-largefiles) repo with
190 "lfconvert --to-normal" should give the same as ../bigfile-repo
191 "lfconvert --to-normal" should give the same as ../bigfile-repo
191 $ cd largefiles-repo
192 $ cd largefiles-repo
192 $ hg lfconvert --to-normal . ../normal-repo
193 $ hg lfconvert --to-normal . ../normal-repo
193 initializing destination ../normal-repo
194 initializing destination ../normal-repo
194 $ cd ../normal-repo
195 $ cd ../normal-repo
195 $ cat >> .hg/hgrc <<EOF
196 $ cat >> .hg/hgrc <<EOF
196 > [extensions]
197 > [extensions]
197 > largefiles = !
198 > largefiles = !
198 > EOF
199 > EOF
199
200
200 # Hmmm: the changeset ID for rev 5 is different from the original
201 # Hmmm: the changeset ID for rev 5 is different from the original
201 # normal repo (../bigfile-repo), because the changelog filelist
202 # normal repo (../bigfile-repo), because the changelog filelist
202 # differs between the two incarnations of rev 5: this repo includes
203 # differs between the two incarnations of rev 5: this repo includes
203 # 'large' in the list, but ../bigfile-repo does not. Since rev 5
204 # 'large' in the list, but ../bigfile-repo does not. Since rev 5
204 # removes 'large' relative to the first parent in both repos, it seems
205 # removes 'large' relative to the first parent in both repos, it seems
205 # to me that lfconvert is doing a *better* job than
206 # to me that lfconvert is doing a *better* job than
206 # "hg remove" + "hg merge" + "hg commit".
207 # "hg remove" + "hg merge" + "hg commit".
207 # $ hg -R ../bigfile-repo debugdata -c 5
208 # $ hg -R ../bigfile-repo debugdata -c 5
208 # $ hg debugdata -c 5
209 # $ hg debugdata -c 5
209 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
210 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
210 o 6:1635824e6f59 add anotherlarge (should be a largefile)
211 o 6:1635824e6f59 add anotherlarge (should be a largefile)
211 |
212 |
212 o 5:7215f8deeaaf merge
213 o 5:7215f8deeaaf merge
213 |\
214 |\
214 | o 4:7285f817b77e remove large, normal3
215 | o 4:7285f817b77e remove large, normal3
215 | |
216 | |
216 | o 3:67e3892e3534 add normal3, modify sub/*
217 | o 3:67e3892e3534 add normal3, modify sub/*
217 | |
218 | |
218 o | 2:c96c8beb5d56 rename sub/ to stuff/
219 o | 2:c96c8beb5d56 rename sub/ to stuff/
219 |/
220 |/
220 o 1:020c65d24e11 add sub/*
221 o 1:020c65d24e11 add sub/*
221 |
222 |
222 o 0:117b8328f97a add large, normal1
223 o 0:117b8328f97a add large, normal1
223
224
224 $ hg update
225 $ hg update
225 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 $ hg locate
227 $ hg locate
227 anotherlarge
228 anotherlarge
228 normal1
229 normal1
229 stuff/maybelarge.dat
230 stuff/maybelarge.dat
230 stuff/normal2
231 stuff/normal2
231 $ [ -d .hg/largefiles ] && echo fail || echo pass
232 $ [ -d .hg/largefiles ] && echo fail || echo pass
232 pass
233 pass
General Comments 0
You need to be logged in to leave comments. Login now