Show More
@@ -1,488 +1,483 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''High-level command functions: lfadd() et. al, plus the cmdtable.''' |
|
10 | 10 | |
|
11 | 11 | import os |
|
12 | 12 | import shutil |
|
13 | 13 | |
|
14 | 14 | from mercurial import util, match as match_, hg, node, context, error |
|
15 | 15 | from mercurial.i18n import _ |
|
16 | 16 | |
|
17 | 17 | import lfutil |
|
18 | 18 | import basestore |
|
19 | 19 | |
|
20 | 20 | # -- Commands ---------------------------------------------------------- |
|
21 | 21 | |
|
22 | 22 | def lfconvert(ui, src, dest, *pats, **opts): |
|
23 | 23 | '''Convert a normal repository to a largefiles repository |
|
24 | 24 | |
|
25 | 25 | Convert source repository creating an identical repository, except that all |
|
26 | 26 | files that match the patterns given, or are over the given size will be |
|
27 | 27 | added as largefiles. The size used to determine whether or not to track a |
|
28 | 28 | file as a largefile is the size of the first version of the file. After |
|
29 | 29 | running this command you will need to make sure that largefiles is enabled |
|
30 | 30 | anywhere you intend to push the new repository.''' |
|
31 | 31 | |
|
32 | 32 | if opts['tonormal']: |
|
33 | 33 | tolfile = False |
|
34 | 34 | else: |
|
35 | 35 | tolfile = True |
|
36 | 36 | size = opts['size'] |
|
37 | 37 | if not size: |
|
38 | 38 | size = ui.config(lfutil.longname, 'size', default=None) |
|
39 | 39 | try: |
|
40 | 40 | size = int(size) |
|
41 | 41 | except ValueError: |
|
42 | 42 | raise util.Abort(_('largefiles.size must be integer, was %s\n') |
|
43 | 43 | % size) |
|
44 | 44 | except TypeError: |
|
45 | 45 | raise util.Abort(_('size must be specified')) |
|
46 | 46 | |
|
47 | 47 | try: |
|
48 | 48 | rsrc = hg.repository(ui, src) |
|
49 | 49 | if not rsrc.local(): |
|
50 | 50 | raise util.Abort(_('%s is not a local Mercurial repo') % src) |
|
51 | 51 | except error.RepoError, err: |
|
52 | 52 | ui.traceback() |
|
53 | 53 | raise util.Abort(err.args[0]) |
|
54 | 54 | if os.path.exists(dest): |
|
55 | 55 | if not os.path.isdir(dest): |
|
56 | 56 | raise util.Abort(_('destination %s already exists') % dest) |
|
57 | 57 | elif os.listdir(dest): |
|
58 | 58 | raise util.Abort(_('destination %s is not empty') % dest) |
|
59 | 59 | try: |
|
60 | 60 | ui.status(_('initializing destination %s\n') % dest) |
|
61 | 61 | rdst = hg.repository(ui, dest, create=True) |
|
62 | 62 | if not rdst.local(): |
|
63 | 63 | raise util.Abort(_('%s is not a local Mercurial repo') % dest) |
|
64 | 64 | except error.RepoError: |
|
65 | 65 | ui.traceback() |
|
66 | 66 | raise util.Abort(_('%s is not a repo') % dest) |
|
67 | 67 | |
|
68 | 68 | success = False |
|
69 | 69 | try: |
|
70 | 70 | # Lock destination to prevent modification while it is converted to. |
|
71 | 71 | # Don't need to lock src because we are just reading from its history |
|
72 | 72 | # which can't change. |
|
73 | 73 | dst_lock = rdst.lock() |
|
74 | 74 | |
|
75 | 75 | # Get a list of all changesets in the source. The easy way to do this |
|
76 | 76 | # is to simply walk the changelog, using changelog.nodesbewteen(). |
|
77 | 77 | # Take a look at mercurial/revlog.py:639 for more details. |
|
78 | 78 | # Use a generator instead of a list to decrease memory usage |
|
79 | 79 | ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None, |
|
80 | 80 | rsrc.heads())[0]) |
|
81 | 81 | revmap = {node.nullid: node.nullid} |
|
82 | 82 | if tolfile: |
|
83 | 83 | lfiles = set() |
|
84 | 84 | normalfiles = set() |
|
85 | 85 | if not pats: |
|
86 | 86 | pats = ui.config(lfutil.longname, 'patterns', default=()) |
|
87 | 87 | if pats: |
|
88 | 88 | pats = pats.split(' ') |
|
89 | 89 | if pats: |
|
90 | 90 | matcher = match_.match(rsrc.root, '', list(pats)) |
|
91 | 91 | else: |
|
92 | 92 | matcher = None |
|
93 | 93 | |
|
94 | 94 | lfiletohash = {} |
|
95 | 95 | for ctx in ctxs: |
|
96 | 96 | ui.progress(_('converting revisions'), ctx.rev(), |
|
97 | 97 | unit=_('revision'), total=rsrc['tip'].rev()) |
|
98 | 98 | _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, |
|
99 | 99 | lfiles, normalfiles, matcher, size, lfiletohash) |
|
100 | 100 | ui.progress(_('converting revisions'), None) |
|
101 | 101 | |
|
102 | 102 | if os.path.exists(rdst.wjoin(lfutil.shortname)): |
|
103 | 103 | shutil.rmtree(rdst.wjoin(lfutil.shortname)) |
|
104 | 104 | |
|
105 | 105 | for f in lfiletohash.keys(): |
|
106 | 106 | if os.path.isfile(rdst.wjoin(f)): |
|
107 | 107 | os.unlink(rdst.wjoin(f)) |
|
108 | 108 | try: |
|
109 | 109 | os.removedirs(os.path.dirname(rdst.wjoin(f))) |
|
110 | 110 | except OSError: |
|
111 | 111 | pass |
|
112 | 112 | |
|
113 | 113 | else: |
|
114 | 114 | for ctx in ctxs: |
|
115 | 115 | ui.progress(_('converting revisions'), ctx.rev(), |
|
116 | 116 | unit=_('revision'), total=rsrc['tip'].rev()) |
|
117 | 117 | _addchangeset(ui, rsrc, rdst, ctx, revmap) |
|
118 | 118 | |
|
119 | 119 | ui.progress(_('converting revisions'), None) |
|
120 | 120 | success = True |
|
121 | 121 | finally: |
|
122 | 122 | if not success: |
|
123 | 123 | # we failed, remove the new directory |
|
124 | 124 | shutil.rmtree(rdst.root) |
|
125 | 125 | dst_lock.release() |
|
126 | 126 | |
|
127 | 127 | def _addchangeset(ui, rsrc, rdst, ctx, revmap): |
|
128 | 128 | # Convert src parents to dst parents |
|
129 | 129 | parents = [] |
|
130 | 130 | for p in ctx.parents(): |
|
131 | 131 | parents.append(revmap[p.node()]) |
|
132 | 132 | while len(parents) < 2: |
|
133 | 133 | parents.append(node.nullid) |
|
134 | 134 | |
|
135 | 135 | # Generate list of changed files |
|
136 | 136 | files = set(ctx.files()) |
|
137 | 137 | if node.nullid not in parents: |
|
138 | 138 | mc = ctx.manifest() |
|
139 | 139 | mp1 = ctx.parents()[0].manifest() |
|
140 | 140 | mp2 = ctx.parents()[1].manifest() |
|
141 | 141 | files |= (set(mp1) | set(mp2)) - set(mc) |
|
142 | 142 | for f in mc: |
|
143 | 143 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
144 | 144 | files.add(f) |
|
145 | 145 | |
|
146 | 146 | def getfilectx(repo, memctx, f): |
|
147 | 147 | if lfutil.standin(f) in files: |
|
148 | 148 | # if the file isn't in the manifest then it was removed |
|
149 | 149 | # or renamed, raise IOError to indicate this |
|
150 | 150 | try: |
|
151 | 151 | fctx = ctx.filectx(lfutil.standin(f)) |
|
152 | 152 | except error.LookupError: |
|
153 | 153 | raise IOError() |
|
154 | 154 | renamed = fctx.renamed() |
|
155 | 155 | if renamed: |
|
156 | 156 | renamed = lfutil.splitstandin(renamed[0]) |
|
157 | 157 | |
|
158 | 158 | hash = fctx.data().strip() |
|
159 | 159 | path = lfutil.findfile(rsrc, hash) |
|
160 | 160 | ### TODO: What if the file is not cached? |
|
161 | 161 | data = '' |
|
162 | 162 | fd = None |
|
163 | 163 | try: |
|
164 | 164 | fd = open(path, 'rb') |
|
165 | 165 | data = fd.read() |
|
166 | 166 | finally: |
|
167 | 167 | if fd: |
|
168 | 168 | fd.close() |
|
169 | 169 | return context.memfilectx(f, data, 'l' in fctx.flags(), |
|
170 | 170 | 'x' in fctx.flags(), renamed) |
|
171 | 171 | else: |
|
172 | 172 | try: |
|
173 | 173 | fctx = ctx.filectx(f) |
|
174 | 174 | except error.LookupError: |
|
175 | 175 | raise IOError() |
|
176 | 176 | renamed = fctx.renamed() |
|
177 | 177 | if renamed: |
|
178 | 178 | renamed = renamed[0] |
|
179 | 179 | data = fctx.data() |
|
180 | 180 | if f == '.hgtags': |
|
181 | 181 | newdata = [] |
|
182 | 182 | for line in data.splitlines(): |
|
183 | 183 | id, name = line.split(' ', 1) |
|
184 | 184 | newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), |
|
185 | 185 | name)) |
|
186 | 186 | data = ''.join(newdata) |
|
187 | 187 | return context.memfilectx(f, data, 'l' in fctx.flags(), |
|
188 | 188 | 'x' in fctx.flags(), renamed) |
|
189 | 189 | |
|
190 | 190 | dstfiles = [] |
|
191 | 191 | for file in files: |
|
192 | 192 | if lfutil.isstandin(file): |
|
193 | 193 | dstfiles.append(lfutil.splitstandin(file)) |
|
194 | 194 | else: |
|
195 | 195 | dstfiles.append(file) |
|
196 | 196 | # Commit |
|
197 | 197 | mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, |
|
198 | 198 | getfilectx, ctx.user(), ctx.date(), ctx.extra()) |
|
199 | 199 | ret = rdst.commitctx(mctx) |
|
200 | 200 | rdst.dirstate.setparents(ret) |
|
201 | 201 | revmap[ctx.node()] = rdst.changelog.tip() |
|
202 | 202 | |
|
203 | 203 | def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles, |
|
204 | 204 | matcher, size, lfiletohash): |
|
205 | 205 | # Convert src parents to dst parents |
|
206 | 206 | parents = [] |
|
207 | 207 | for p in ctx.parents(): |
|
208 | 208 | parents.append(revmap[p.node()]) |
|
209 | 209 | while len(parents) < 2: |
|
210 | 210 | parents.append(node.nullid) |
|
211 | 211 | |
|
212 | 212 | # Generate list of changed files |
|
213 | 213 | files = set(ctx.files()) |
|
214 | 214 | if node.nullid not in parents: |
|
215 | 215 | mc = ctx.manifest() |
|
216 | 216 | mp1 = ctx.parents()[0].manifest() |
|
217 | 217 | mp2 = ctx.parents()[1].manifest() |
|
218 | 218 | files |= (set(mp1) | set(mp2)) - set(mc) |
|
219 | 219 | for f in mc: |
|
220 | 220 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
221 | 221 | files.add(f) |
|
222 | 222 | |
|
223 | 223 | dstfiles = [] |
|
224 | 224 | for f in files: |
|
225 | 225 | if f not in lfiles and f not in normalfiles: |
|
226 | 226 | islfile = _islfile(f, ctx, matcher, size) |
|
227 | 227 | # If this file was renamed or copied then copy |
|
228 | 228 | # the lfileness of its predecessor |
|
229 | 229 | if f in ctx.manifest(): |
|
230 | 230 | fctx = ctx.filectx(f) |
|
231 | 231 | renamed = fctx.renamed() |
|
232 | 232 | renamedlfile = renamed and renamed[0] in lfiles |
|
233 | 233 | islfile |= renamedlfile |
|
234 | 234 | if 'l' in fctx.flags(): |
|
235 | 235 | if renamedlfile: |
|
236 | 236 | raise util.Abort( |
|
237 | 237 | _('Renamed/copied largefile %s becomes symlink') |
|
238 | 238 | % f) |
|
239 | 239 | islfile = False |
|
240 | 240 | if islfile: |
|
241 | 241 | lfiles.add(f) |
|
242 | 242 | else: |
|
243 | 243 | normalfiles.add(f) |
|
244 | 244 | |
|
245 | 245 | if f in lfiles: |
|
246 | 246 | dstfiles.append(lfutil.standin(f)) |
|
247 | 247 | # lfile in manifest if it has not been removed/renamed |
|
248 | 248 | if f in ctx.manifest(): |
|
249 | 249 | if 'l' in ctx.filectx(f).flags(): |
|
250 | 250 | if renamed and renamed[0] in lfiles: |
|
251 | 251 | raise util.Abort(_('largefile %s becomes symlink') % f) |
|
252 | 252 | |
|
253 | 253 | # lfile was modified, update standins |
|
254 | 254 | fullpath = rdst.wjoin(f) |
|
255 | 255 | lfutil.createdir(os.path.dirname(fullpath)) |
|
256 | 256 | m = util.sha1('') |
|
257 | 257 | m.update(ctx[f].data()) |
|
258 | 258 | hash = m.hexdigest() |
|
259 | 259 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
260 | 260 | try: |
|
261 | 261 | fd = open(fullpath, 'wb') |
|
262 | 262 | fd.write(ctx[f].data()) |
|
263 | 263 | finally: |
|
264 | 264 | if fd: |
|
265 | 265 | fd.close() |
|
266 | 266 | executable = 'x' in ctx[f].flags() |
|
267 | 267 | os.chmod(fullpath, lfutil.getmode(executable)) |
|
268 | 268 | lfutil.writestandin(rdst, lfutil.standin(f), hash, |
|
269 | 269 | executable) |
|
270 | 270 | lfiletohash[f] = hash |
|
271 | 271 | else: |
|
272 | 272 | # normal file |
|
273 | 273 | dstfiles.append(f) |
|
274 | 274 | |
|
275 | 275 | def getfilectx(repo, memctx, f): |
|
276 | 276 | if lfutil.isstandin(f): |
|
277 | 277 | # if the file isn't in the manifest then it was removed |
|
278 | 278 | # or renamed, raise IOError to indicate this |
|
279 | 279 | srcfname = lfutil.splitstandin(f) |
|
280 | 280 | try: |
|
281 | 281 | fctx = ctx.filectx(srcfname) |
|
282 | 282 | except error.LookupError: |
|
283 | 283 | raise IOError() |
|
284 | 284 | renamed = fctx.renamed() |
|
285 | 285 | if renamed: |
|
286 | 286 | # standin is always a lfile because lfileness |
|
287 | 287 | # doesn't change after rename or copy |
|
288 | 288 | renamed = lfutil.standin(renamed[0]) |
|
289 | 289 | |
|
290 | 290 | return context.memfilectx(f, lfiletohash[srcfname], 'l' in |
|
291 | 291 | fctx.flags(), 'x' in fctx.flags(), renamed) |
|
292 | 292 | else: |
|
293 | 293 | try: |
|
294 | 294 | fctx = ctx.filectx(f) |
|
295 | 295 | except error.LookupError: |
|
296 | 296 | raise IOError() |
|
297 | 297 | renamed = fctx.renamed() |
|
298 | 298 | if renamed: |
|
299 | 299 | renamed = renamed[0] |
|
300 | 300 | |
|
301 | 301 | data = fctx.data() |
|
302 | 302 | if f == '.hgtags': |
|
303 | 303 | newdata = [] |
|
304 | 304 | for line in data.splitlines(): |
|
305 | 305 | id, name = line.split(' ', 1) |
|
306 | 306 | newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]), |
|
307 | 307 | name)) |
|
308 | 308 | data = ''.join(newdata) |
|
309 | 309 | return context.memfilectx(f, data, 'l' in fctx.flags(), |
|
310 | 310 | 'x' in fctx.flags(), renamed) |
|
311 | 311 | |
|
312 | 312 | # Commit |
|
313 | 313 | mctx = context.memctx(rdst, parents, ctx.description(), dstfiles, |
|
314 | 314 | getfilectx, ctx.user(), ctx.date(), ctx.extra()) |
|
315 | 315 | ret = rdst.commitctx(mctx) |
|
316 | 316 | rdst.dirstate.setparents(ret) |
|
317 | 317 | revmap[ctx.node()] = rdst.changelog.tip() |
|
318 | 318 | |
|
319 | 319 | def _islfile(file, ctx, matcher, size): |
|
320 | 320 | ''' |
|
321 | 321 | A file is a lfile if it matches a pattern or is over |
|
322 | 322 | the given size. |
|
323 | 323 | ''' |
|
324 | 324 | # Never store hgtags or hgignore as lfiles |
|
325 | 325 | if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs': |
|
326 | 326 | return False |
|
327 | 327 | if matcher and matcher(file): |
|
328 | 328 | return True |
|
329 | 329 | try: |
|
330 | 330 | return ctx.filectx(file).size() >= size * 1024 * 1024 |
|
331 | 331 | except error.LookupError: |
|
332 | 332 | return False |
|
333 | 333 | |
|
334 | 334 | def uploadlfiles(ui, rsrc, rdst, files): |
|
335 | 335 | '''upload largefiles to the central store''' |
|
336 | 336 | |
|
337 | 337 | # Don't upload locally. All largefiles are in the system wide cache |
|
338 | 338 | # so the other repo can just get them from there. |
|
339 | 339 | if not files or rdst.local(): |
|
340 | 340 | return |
|
341 | 341 | |
|
342 | 342 | store = basestore._openstore(rsrc, rdst, put=True) |
|
343 | 343 | |
|
344 | 344 | at = 0 |
|
345 | 345 | files = filter(lambda h: not store.exists(h), files) |
|
346 | 346 | for hash in files: |
|
347 | 347 | ui.progress(_('uploading largefiles'), at, unit='largefile', |
|
348 | 348 | total=len(files)) |
|
349 | 349 | source = lfutil.findfile(rsrc, hash) |
|
350 | 350 | if not source: |
|
351 | 351 | raise util.Abort(_('Missing largefile %s needs to be uploaded') |
|
352 | 352 | % hash) |
|
353 | 353 | # XXX check for errors here |
|
354 | 354 | store.put(source, hash) |
|
355 | 355 | at += 1 |
|
356 | 356 | ui.progress(_('uploading largefiles'), None) |
|
357 | 357 | |
|
358 | 358 | def verifylfiles(ui, repo, all=False, contents=False): |
|
359 | 359 | '''Verify that every big file revision in the current changeset |
|
360 | 360 | exists in the central store. With --contents, also verify that |
|
361 | 361 | the contents of each big file revision are correct (SHA-1 hash |
|
362 | 362 | matches the revision ID). With --all, check every changeset in |
|
363 | 363 | this repository.''' |
|
364 | 364 | if all: |
|
365 | 365 | # Pass a list to the function rather than an iterator because we know a |
|
366 | 366 | # list will work. |
|
367 | 367 | revs = range(len(repo)) |
|
368 | 368 | else: |
|
369 | 369 | revs = ['.'] |
|
370 | 370 | |
|
371 | 371 | store = basestore._openstore(repo) |
|
372 | 372 | return store.verify(revs, contents=contents) |
|
373 | 373 | |
|
374 | 374 | def cachelfiles(ui, repo, node): |
|
375 | 375 | '''cachelfiles ensures that all largefiles needed by the specified revision |
|
376 | 376 | are present in the repository's largefile cache. |
|
377 | 377 | |
|
378 | 378 | returns a tuple (cached, missing). cached is the list of files downloaded |
|
379 | 379 | by this operation; missing is the list of files that were needed but could |
|
380 | 380 | not be found.''' |
|
381 | 381 | lfiles = lfutil.listlfiles(repo, node) |
|
382 | 382 | toget = [] |
|
383 | 383 | |
|
384 | 384 | for lfile in lfiles: |
|
385 | 385 | expectedhash = repo[node][lfutil.standin(lfile)].data().strip() |
|
386 | 386 | # if it exists and its hash matches, it might have been locally |
|
387 | 387 | # modified before updating and the user chose 'local'. in this case, |
|
388 | 388 | # it will not be in any store, so don't look for it. |
|
389 | 389 | if (not os.path.exists(repo.wjoin(lfile)) \ |
|
390 | 390 | or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \ |
|
391 | 391 | not lfutil.findfile(repo, expectedhash): |
|
392 | 392 | toget.append((lfile, expectedhash)) |
|
393 | 393 | |
|
394 | 394 | if toget: |
|
395 | 395 | store = basestore._openstore(repo) |
|
396 | 396 | ret = store.get(toget) |
|
397 | 397 | return ret |
|
398 | 398 | |
|
399 | 399 | return ([], []) |
|
400 | 400 | |
|
401 | 401 | def updatelfiles(ui, repo, filelist=None, printmessage=True): |
|
402 | 402 | wlock = repo.wlock() |
|
403 | 403 | try: |
|
404 | 404 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
405 | 405 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) |
|
406 | 406 | |
|
407 | 407 | if filelist is not None: |
|
408 | 408 | lfiles = [f for f in lfiles if f in filelist] |
|
409 | 409 | |
|
410 | 410 | printed = False |
|
411 | 411 | if printmessage and lfiles: |
|
412 | 412 | ui.status(_('getting changed largefiles\n')) |
|
413 | 413 | printed = True |
|
414 | 414 | cachelfiles(ui, repo, '.') |
|
415 | 415 | |
|
416 | 416 | updated, removed = 0, 0 |
|
417 | 417 | for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles): |
|
418 | 418 | # increment the appropriate counter according to _updatelfile's |
|
419 | 419 | # return value |
|
420 | 420 | updated += i > 0 and i or 0 |
|
421 | 421 | removed -= i < 0 and i or 0 |
|
422 | 422 | if printmessage and (removed or updated) and not printed: |
|
423 | 423 | ui.status(_('getting changed largefiles\n')) |
|
424 | 424 | printed = True |
|
425 | 425 | |
|
426 | 426 | lfdirstate.write() |
|
427 | 427 | if printed and printmessage: |
|
428 | 428 | ui.status(_('%d largefiles updated, %d removed\n') % (updated, |
|
429 | 429 | removed)) |
|
430 | 430 | finally: |
|
431 | 431 | wlock.release() |
|
432 | 432 | |
|
433 | 433 | def _updatelfile(repo, lfdirstate, lfile): |
|
434 | 434 | '''updates a single largefile and copies the state of its standin from |
|
435 | 435 | the repository's dirstate to its state in the lfdirstate. |
|
436 | 436 | |
|
437 | 437 | returns 1 if the file was modified, -1 if the file was removed, 0 if the |
|
438 | 438 | file was unchanged, and None if the needed largefile was missing from the |
|
439 | 439 | cache.''' |
|
440 | 440 | ret = 0 |
|
441 | 441 | abslfile = repo.wjoin(lfile) |
|
442 | 442 | absstandin = repo.wjoin(lfutil.standin(lfile)) |
|
443 | 443 | if os.path.exists(absstandin): |
|
444 | 444 | if os.path.exists(absstandin+'.orig'): |
|
445 | 445 | shutil.copyfile(abslfile, abslfile+'.orig') |
|
446 | 446 | expecthash = lfutil.readstandin(repo, lfile) |
|
447 | 447 | if expecthash != '' and \ |
|
448 | 448 | (not os.path.exists(abslfile) or \ |
|
449 | 449 | expecthash != lfutil.hashfile(abslfile)): |
|
450 | 450 | if not lfutil.copyfromcache(repo, expecthash, lfile): |
|
451 | 451 | return None # don't try to set the mode or update the dirstate |
|
452 | 452 | ret = 1 |
|
453 | 453 | mode = os.stat(absstandin).st_mode |
|
454 | 454 | if mode != os.stat(abslfile).st_mode: |
|
455 | 455 | os.chmod(abslfile, mode) |
|
456 | 456 | ret = 1 |
|
457 | 457 | else: |
|
458 | 458 | if os.path.exists(abslfile): |
|
459 | 459 | os.unlink(abslfile) |
|
460 | 460 | ret = -1 |
|
461 | 461 | state = repo.dirstate[lfutil.standin(lfile)] |
|
462 | 462 | if state == 'n': |
|
463 | 463 | lfdirstate.normal(lfile) |
|
464 | 464 | elif state == 'r': |
|
465 | 465 | lfdirstate.remove(lfile) |
|
466 | 466 | elif state == 'a': |
|
467 | 467 | lfdirstate.add(lfile) |
|
468 | 468 | elif state == '?': |
|
469 | try: | |
|
470 | # Mercurial >= 1.9 | |
|
471 | 469 |
|
|
472 | except AttributeError: | |
|
473 | # Mercurial <= 1.8 | |
|
474 | lfdirstate.forget(lfile) | |
|
475 | 470 | return ret |
|
476 | 471 | |
|
477 | 472 | # -- hg commands declarations ------------------------------------------------ |
|
478 | 473 | |
|
479 | 474 | |
|
480 | 475 | cmdtable = { |
|
481 | 476 | 'lfconvert': (lfconvert, |
|
482 | 477 | [('s', 'size', 0, 'All files over this size (in megabytes) ' |
|
483 | 478 | 'will be considered largefiles. This can also be specified ' |
|
484 | 479 | 'in your hgrc as [largefiles].size.'), |
|
485 | 480 | ('','tonormal',False, |
|
486 | 481 | 'Convert from a largefiles repo to a normal repo')], |
|
487 | 482 | _('hg lfconvert SOURCE DEST [FILE ...]')), |
|
488 | 483 | } |
@@ -1,493 +1,436 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''largefiles utility code: must not import other modules in this package.''' |
|
10 | 10 | |
|
11 | 11 | import os |
|
12 | 12 | import errno |
|
13 | import inspect | |
|
14 | 13 | import shutil |
|
15 | 14 | import stat |
|
16 | 15 | import hashlib |
|
17 | 16 | |
|
18 |
from mercurial import |
|
|
19 | url as url_, util | |
|
17 | from mercurial import dirstate, httpconnection, match as match_, util | |
|
20 | 18 | from mercurial.i18n import _ |
|
21 | 19 | |
|
22 | 20 | try: |
|
23 | 21 | from mercurial import scmutil |
|
24 | 22 | except ImportError: |
|
25 | 23 | pass |
|
26 | 24 | |
|
27 | 25 | shortname = '.hglf' |
|
28 | 26 | longname = 'largefiles' |
|
29 | 27 | |
|
30 | 28 | |
|
31 | 29 | # -- Portability wrappers ---------------------------------------------- |
|
32 | 30 | |
|
33 | if 'subrepos' in inspect.getargspec(dirstate.dirstate.status)[0]: | |
|
34 | # for Mercurial >= 1.5 | |
|
35 | 31 |
|
|
36 | 32 |
|
|
37 | else: | |
|
38 | # for Mercurial <= 1.4 | |
|
39 | def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): | |
|
40 | return dirstate.walk(matcher, unknown, ignored) | |
|
41 | 33 | |
|
42 | 34 | def repo_add(repo, list): |
|
43 | try: | |
|
44 | # Mercurial <= 1.5 | |
|
45 | add = repo.add | |
|
46 | except AttributeError: | |
|
47 | # Mercurial >= 1.6 | |
|
48 | 35 |
|
|
49 | 36 | return add(list) |
|
50 | 37 | |
|
51 | 38 | def repo_remove(repo, list, unlink=False): |
|
52 | try: | |
|
53 | # Mercurial <= 1.5 | |
|
54 | remove = repo.remove | |
|
55 | except AttributeError: | |
|
56 | # Mercurial >= 1.6 | |
|
57 | try: | |
|
58 | # Mercurial <= 1.8 | |
|
59 | remove = repo[None].remove | |
|
60 | except AttributeError: | |
|
61 | # Mercurial >= 1.9 | |
|
62 | 39 |
|
|
63 | 40 |
|
|
64 | 41 |
|
|
65 | 42 |
|
|
66 | 43 |
|
|
67 | 44 |
|
|
68 | 45 |
|
|
69 | 46 |
|
|
70 | 47 |
|
|
71 | 48 |
|
|
72 | 49 |
|
|
73 | 50 |
|
|
74 | 51 |
|
|
75 | ||
|
76 | 52 | return remove(list, unlink=unlink) |
|
77 | 53 | |
|
78 | 54 | def repo_forget(repo, list): |
|
79 | try: | |
|
80 | # Mercurial <= 1.5 | |
|
81 | forget = repo.forget | |
|
82 | except AttributeError: | |
|
83 | # Mercurial >= 1.6 | |
|
84 | 55 |
|
|
85 | 56 | return forget(list) |
|
86 | 57 | |
|
87 | 58 | def findoutgoing(repo, remote, force): |
|
88 | # First attempt is for Mercurial <= 1.5 second is for >= 1.6 | |
|
89 | try: | |
|
90 | return repo.findoutgoing(remote) | |
|
91 | except AttributeError: | |
|
92 | 59 |
|
|
93 | try: | |
|
94 | # Mercurial <= 1.8 | |
|
95 | return discovery.findoutgoing(repo, remote, force=force) | |
|
96 | except AttributeError: | |
|
97 | # Mercurial >= 1.9 | |
|
98 | 60 |
|
|
99 | 61 |
|
|
100 | 62 |
|
|
101 | 63 | |
|
102 | 64 | # -- Private worker functions ------------------------------------------ |
|
103 | 65 | |
|
104 | 66 | def link(src, dest): |
|
105 | 67 | try: |
|
106 | 68 | util.oslink(src, dest) |
|
107 | 69 | except OSError: |
|
108 | 70 | # If hardlinks fail fall back on copy |
|
109 | 71 | shutil.copyfile(src, dest) |
|
110 | 72 | os.chmod(dest, os.stat(src).st_mode) |
|
111 | 73 | |
|
112 | 74 | def systemcachepath(ui, hash): |
|
113 | 75 | path = ui.config(longname, 'systemcache', None) |
|
114 | 76 | if path: |
|
115 | 77 | path = os.path.join(path, hash) |
|
116 | 78 | else: |
|
117 | 79 | if os.name == 'nt': |
|
118 | 80 | path = os.path.join(os.getenv('LOCALAPPDATA') or \ |
|
119 | 81 | os.getenv('APPDATA'), longname, hash) |
|
120 | 82 | elif os.name == 'posix': |
|
121 | 83 | path = os.path.join(os.getenv('HOME'), '.' + longname, hash) |
|
122 | 84 | else: |
|
123 | 85 | raise util.Abort(_('Unknown operating system: %s\n') % os.name) |
|
124 | 86 | return path |
|
125 | 87 | |
|
126 | 88 | def insystemcache(ui, hash): |
|
127 | 89 | return os.path.exists(systemcachepath(ui, hash)) |
|
128 | 90 | |
|
129 | 91 | def findfile(repo, hash): |
|
130 | 92 | if incache(repo, hash): |
|
131 | 93 | repo.ui.note(_('Found %s in cache\n') % hash) |
|
132 | 94 | return cachepath(repo, hash) |
|
133 | 95 | if insystemcache(repo.ui, hash): |
|
134 | 96 | repo.ui.note(_('Found %s in system cache\n') % hash) |
|
135 | 97 | return systemcachepath(repo.ui, hash) |
|
136 | 98 | return None |
|
137 | 99 | |
|
138 | 100 | class largefiles_dirstate(dirstate.dirstate): |
|
139 | 101 | def __getitem__(self, key): |
|
140 | 102 | return super(largefiles_dirstate, self).__getitem__(unixpath(key)) |
|
141 | 103 | def normal(self, f): |
|
142 | 104 | return super(largefiles_dirstate, self).normal(unixpath(f)) |
|
143 | 105 | def remove(self, f): |
|
144 | 106 | return super(largefiles_dirstate, self).remove(unixpath(f)) |
|
145 | 107 | def add(self, f): |
|
146 | 108 | return super(largefiles_dirstate, self).add(unixpath(f)) |
|
147 | 109 | def drop(self, f): |
|
148 | 110 | return super(largefiles_dirstate, self).drop(unixpath(f)) |
|
149 | 111 | def forget(self, f): |
|
150 | 112 | return super(largefiles_dirstate, self).forget(unixpath(f)) |
|
151 | 113 | |
|
152 | 114 | def openlfdirstate(ui, repo): |
|
153 | 115 | ''' |
|
154 | 116 | Return a dirstate object that tracks big files: i.e. its root is the |
|
155 | 117 | repo root, but it is saved in .hg/largefiles/dirstate. |
|
156 | 118 | ''' |
|
157 | 119 | admin = repo.join(longname) |
|
158 | try: | |
|
159 | # Mercurial >= 1.9 | |
|
160 | 120 |
|
|
161 | except ImportError: | |
|
162 | # Mercurial <= 1.8 | |
|
163 | opener = util.opener(admin) | |
|
164 | 121 | if util.safehasattr(repo.dirstate, '_validate'): |
|
165 | 122 | lfdirstate = largefiles_dirstate(opener, ui, repo.root, |
|
166 | 123 | repo.dirstate._validate) |
|
167 | 124 | else: |
|
168 | 125 | lfdirstate = largefiles_dirstate(opener, ui, repo.root) |
|
169 | 126 | |
|
170 | 127 | # If the largefiles dirstate does not exist, populate and create it. This |
|
171 | 128 | # ensures that we create it on the first meaningful largefiles operation in |
|
172 | 129 | # a new clone. It also gives us an easy way to forcibly rebuild largefiles |
|
173 | 130 | # state: |
|
174 | 131 | # rm .hg/largefiles/dirstate && hg status |
|
175 | 132 | # Or even, if things are really messed up: |
|
176 | 133 | # rm -rf .hg/largefiles && hg status |
|
177 | 134 | if not os.path.exists(os.path.join(admin, 'dirstate')): |
|
178 | 135 | util.makedirs(admin) |
|
179 | 136 | matcher = getstandinmatcher(repo) |
|
180 | 137 | for standin in dirstate_walk(repo.dirstate, matcher): |
|
181 | 138 | lfile = splitstandin(standin) |
|
182 | 139 | hash = readstandin(repo, lfile) |
|
183 | 140 | lfdirstate.normallookup(lfile) |
|
184 | 141 | try: |
|
185 | 142 | if hash == hashfile(lfile): |
|
186 | 143 | lfdirstate.normal(lfile) |
|
187 | 144 | except IOError, err: |
|
188 | 145 | if err.errno != errno.ENOENT: |
|
189 | 146 | raise |
|
190 | 147 | |
|
191 | 148 | lfdirstate.write() |
|
192 | 149 | |
|
193 | 150 | return lfdirstate |
|
194 | 151 | |
|
195 | 152 | def lfdirstate_status(lfdirstate, repo, rev): |
|
196 | 153 | wlock = repo.wlock() |
|
197 | 154 | try: |
|
198 | 155 | match = match_.always(repo.root, repo.getcwd()) |
|
199 | 156 | s = lfdirstate.status(match, [], False, False, False) |
|
200 | 157 | unsure, modified, added, removed, missing, unknown, ignored, clean = s |
|
201 | 158 | for lfile in unsure: |
|
202 | 159 | if repo[rev][standin(lfile)].data().strip() != \ |
|
203 | 160 | hashfile(repo.wjoin(lfile)): |
|
204 | 161 | modified.append(lfile) |
|
205 | 162 | else: |
|
206 | 163 | clean.append(lfile) |
|
207 | 164 | lfdirstate.normal(lfile) |
|
208 | 165 | lfdirstate.write() |
|
209 | 166 | finally: |
|
210 | 167 | wlock.release() |
|
211 | 168 | return (modified, added, removed, missing, unknown, ignored, clean) |
|
212 | 169 | |
|
213 | 170 | def listlfiles(repo, rev=None, matcher=None): |
|
214 | 171 | '''list largefiles in the working copy or specified changeset''' |
|
215 | 172 | |
|
216 | 173 | if matcher is None: |
|
217 | 174 | matcher = getstandinmatcher(repo) |
|
218 | 175 | |
|
219 | 176 | # ignore unknown files in working directory |
|
220 | 177 | return [splitstandin(f) for f in repo[rev].walk(matcher) \ |
|
221 | 178 | if rev is not None or repo.dirstate[f] != '?'] |
|
222 | 179 | |
|
223 | 180 | def incache(repo, hash): |
|
224 | 181 | return os.path.exists(cachepath(repo, hash)) |
|
225 | 182 | |
|
226 | 183 | def createdir(dir): |
|
227 | 184 | if not os.path.exists(dir): |
|
228 | 185 | os.makedirs(dir) |
|
229 | 186 | |
|
230 | 187 | def cachepath(repo, hash): |
|
231 | 188 | return repo.join(os.path.join(longname, hash)) |
|
232 | 189 | |
|
233 | 190 | def copyfromcache(repo, hash, filename): |
|
234 | 191 | '''copyfromcache copies the specified largefile from the repo or system |
|
235 | 192 | cache to the specified location in the repository. It will not throw an |
|
236 | 193 | exception on failure, as it is meant to be called only after ensuring that |
|
237 | 194 | the needed largefile exists in the cache.''' |
|
238 | 195 | path = findfile(repo, hash) |
|
239 | 196 | if path is None: |
|
240 | 197 | return False |
|
241 | 198 | util.makedirs(os.path.dirname(repo.wjoin(filename))) |
|
242 | 199 | shutil.copy(path, repo.wjoin(filename)) |
|
243 | 200 | return True |
|
244 | 201 | |
|
245 | 202 | def copytocache(repo, rev, file, uploaded=False): |
|
246 | 203 | hash = readstandin(repo, file) |
|
247 | 204 | if incache(repo, hash): |
|
248 | 205 | return |
|
249 | 206 | copytocacheabsolute(repo, repo.wjoin(file), hash) |
|
250 | 207 | |
|
251 | 208 | def copytocacheabsolute(repo, file, hash): |
|
252 | 209 | createdir(os.path.dirname(cachepath(repo, hash))) |
|
253 | 210 | if insystemcache(repo.ui, hash): |
|
254 | 211 | link(systemcachepath(repo.ui, hash), cachepath(repo, hash)) |
|
255 | 212 | else: |
|
256 | 213 | shutil.copyfile(file, cachepath(repo, hash)) |
|
257 | 214 | os.chmod(cachepath(repo, hash), os.stat(file).st_mode) |
|
258 | 215 | linktosystemcache(repo, hash) |
|
259 | 216 | |
|
260 | 217 | def linktosystemcache(repo, hash): |
|
261 | 218 | createdir(os.path.dirname(systemcachepath(repo.ui, hash))) |
|
262 | 219 | link(cachepath(repo, hash), systemcachepath(repo.ui, hash)) |
|
263 | 220 | |
|
264 | 221 | def getstandinmatcher(repo, pats=[], opts={}): |
|
265 | 222 | '''Return a match object that applies pats to the standin directory''' |
|
266 | 223 | standindir = repo.pathto(shortname) |
|
267 | 224 | if pats: |
|
268 | 225 | # patterns supplied: search standin directory relative to current dir |
|
269 | 226 | cwd = repo.getcwd() |
|
270 | 227 | if os.path.isabs(cwd): |
|
271 | 228 | # cwd is an absolute path for hg -R <reponame> |
|
272 | 229 | # work relative to the repository root in this case |
|
273 | 230 | cwd = '' |
|
274 | 231 | pats = [os.path.join(standindir, cwd, pat) for pat in pats] |
|
275 | 232 | elif os.path.isdir(standindir): |
|
276 | 233 | # no patterns: relative to repo root |
|
277 | 234 | pats = [standindir] |
|
278 | 235 | else: |
|
279 | 236 | # no patterns and no standin dir: return matcher that matches nothing |
|
280 | 237 | match = match_.match(repo.root, None, [], exact=True) |
|
281 | 238 | match.matchfn = lambda f: False |
|
282 | 239 | return match |
|
283 | 240 | return getmatcher(repo, pats, opts, showbad=False) |
|
284 | 241 | |
|
285 | 242 | def getmatcher(repo, pats=[], opts={}, showbad=True): |
|
286 | 243 | '''Wrapper around scmutil.match() that adds showbad: if false, neuter |
|
287 | 244 | the match object\'s bad() method so it does not print any warnings |
|
288 | 245 | about missing files or directories.''' |
|
289 | try: | |
|
290 | # Mercurial >= 1.9 | |
|
291 | 246 |
|
|
292 | except ImportError: | |
|
293 | # Mercurial <= 1.8 | |
|
294 | match = cmdutil.match(repo, pats, opts) | |
|
295 | 247 | |
|
296 | 248 | if not showbad: |
|
297 | 249 | match.bad = lambda f, msg: None |
|
298 | 250 | return match |
|
299 | 251 | |
|
300 | 252 | def composestandinmatcher(repo, rmatcher): |
|
301 | 253 | '''Return a matcher that accepts standins corresponding to the files |
|
302 | 254 | accepted by rmatcher. Pass the list of files in the matcher as the |
|
303 | 255 | paths specified by the user.''' |
|
304 | 256 | smatcher = getstandinmatcher(repo, rmatcher.files()) |
|
305 | 257 | isstandin = smatcher.matchfn |
|
306 | 258 | def composed_matchfn(f): |
|
307 | 259 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
308 | 260 | smatcher.matchfn = composed_matchfn |
|
309 | 261 | |
|
310 | 262 | return smatcher |
|
311 | 263 | |
|
312 | 264 | def standin(filename): |
|
313 | 265 | '''Return the repo-relative path to the standin for the specified big |
|
314 | 266 | file.''' |
|
315 | 267 | # Notes: |
|
316 | 268 | # 1) Most callers want an absolute path, but _create_standin() needs |
|
317 | 269 | # it repo-relative so lfadd() can pass it to repo_add(). So leave |
|
318 | 270 | # it up to the caller to use repo.wjoin() to get an absolute path. |
|
319 | 271 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
320 | 272 | # Windows. Change existing separator to '/' first in case we are |
|
321 | 273 | # passed filenames from an external source (like the command line). |
|
322 | 274 | return shortname + '/' + filename.replace(os.sep, '/') |
|
323 | 275 | |
|
324 | 276 | def isstandin(filename): |
|
325 | 277 | '''Return true if filename is a big file standin. filename must |
|
326 | 278 | be in Mercurial\'s internal form (slash-separated).''' |
|
327 | 279 | return filename.startswith(shortname + '/') |
|
328 | 280 | |
|
329 | 281 | def splitstandin(filename): |
|
330 | 282 | # Split on / because that's what dirstate always uses, even on Windows. |
|
331 | 283 | # Change local separator to / first just in case we are passed filenames |
|
332 | 284 | # from an external source (like the command line). |
|
333 | 285 | bits = filename.replace(os.sep, '/').split('/', 1) |
|
334 | 286 | if len(bits) == 2 and bits[0] == shortname: |
|
335 | 287 | return bits[1] |
|
336 | 288 | else: |
|
337 | 289 | return None |
|
338 | 290 | |
|
339 | 291 | def updatestandin(repo, standin): |
|
340 | 292 | file = repo.wjoin(splitstandin(standin)) |
|
341 | 293 | if os.path.exists(file): |
|
342 | 294 | hash = hashfile(file) |
|
343 | 295 | executable = getexecutable(file) |
|
344 | 296 | writestandin(repo, standin, hash, executable) |
|
345 | 297 | |
|
346 | 298 | def readstandin(repo, filename, node=None): |
|
347 | 299 | '''read hex hash from standin for filename at given node, or working |
|
348 | 300 | directory if no node is given''' |
|
349 | 301 | return repo[node][standin(filename)].data().strip() |
|
350 | 302 | |
|
351 | 303 | def writestandin(repo, standin, hash, executable): |
|
352 | 304 | '''write hhash to <repo.root>/<standin>''' |
|
353 | 305 | writehash(hash, repo.wjoin(standin), executable) |
|
354 | 306 | |
|
355 | 307 | def copyandhash(instream, outfile): |
|
356 | 308 | '''Read bytes from instream (iterable) and write them to outfile, |
|
357 | 309 | computing the SHA-1 hash of the data along the way. Close outfile |
|
358 | 310 | when done and return the binary hash.''' |
|
359 | 311 | hasher = util.sha1('') |
|
360 | 312 | for data in instream: |
|
361 | 313 | hasher.update(data) |
|
362 | 314 | outfile.write(data) |
|
363 | 315 | |
|
364 | 316 | # Blecch: closing a file that somebody else opened is rude and |
|
365 | 317 | # wrong. But it's so darn convenient and practical! After all, |
|
366 | 318 | # outfile was opened just to copy and hash. |
|
367 | 319 | outfile.close() |
|
368 | 320 | |
|
369 | 321 | return hasher.digest() |
|
370 | 322 | |
|
371 | 323 | def hashrepofile(repo, file): |
|
372 | 324 | return hashfile(repo.wjoin(file)) |
|
373 | 325 | |
|
374 | 326 | def hashfile(file): |
|
375 | 327 | if not os.path.exists(file): |
|
376 | 328 | return '' |
|
377 | 329 | hasher = util.sha1('') |
|
378 | 330 | fd = open(file, 'rb') |
|
379 | 331 | for data in blockstream(fd): |
|
380 | 332 | hasher.update(data) |
|
381 | 333 | fd.close() |
|
382 | 334 | return hasher.hexdigest() |
|
383 | 335 | |
|
384 | 336 | class limitreader(object): |
|
385 | 337 | def __init__(self, f, limit): |
|
386 | 338 | self.f = f |
|
387 | 339 | self.limit = limit |
|
388 | 340 | |
|
389 | 341 | def read(self, length): |
|
390 | 342 | if self.limit == 0: |
|
391 | 343 | return '' |
|
392 | 344 | length = length > self.limit and self.limit or length |
|
393 | 345 | self.limit -= length |
|
394 | 346 | return self.f.read(length) |
|
395 | 347 | |
|
396 | 348 | def close(self): |
|
397 | 349 | pass |
|
398 | 350 | |
|
399 | 351 | def blockstream(infile, blocksize=128 * 1024): |
|
400 | 352 | """Generator that yields blocks of data from infile and closes infile.""" |
|
401 | 353 | while True: |
|
402 | 354 | data = infile.read(blocksize) |
|
403 | 355 | if not data: |
|
404 | 356 | break |
|
405 | 357 | yield data |
|
406 | 358 | # Same blecch as above. |
|
407 | 359 | infile.close() |
|
408 | 360 | |
|
409 | 361 | def readhash(filename): |
|
410 | 362 | rfile = open(filename, 'rb') |
|
411 | 363 | hash = rfile.read(40) |
|
412 | 364 | rfile.close() |
|
413 | 365 | if len(hash) < 40: |
|
414 | 366 | raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)') |
|
415 | 367 | % (filename, len(hash))) |
|
416 | 368 | return hash |
|
417 | 369 | |
|
418 | 370 | def writehash(hash, filename, executable): |
|
419 | 371 | util.makedirs(os.path.dirname(filename)) |
|
420 | 372 | if os.path.exists(filename): |
|
421 | 373 | os.unlink(filename) |
|
422 | 374 | wfile = open(filename, 'wb') |
|
423 | 375 | |
|
424 | 376 | try: |
|
425 | 377 | wfile.write(hash) |
|
426 | 378 | wfile.write('\n') |
|
427 | 379 | finally: |
|
428 | 380 | wfile.close() |
|
429 | 381 | if os.path.exists(filename): |
|
430 | 382 | os.chmod(filename, getmode(executable)) |
|
431 | 383 | |
|
432 | 384 | def getexecutable(filename): |
|
433 | 385 | mode = os.stat(filename).st_mode |
|
434 | 386 | return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \ |
|
435 | 387 | stat.S_IXOTH) |
|
436 | 388 | |
|
437 | 389 | def getmode(executable): |
|
438 | 390 | if executable: |
|
439 | 391 | return 0755 |
|
440 | 392 | else: |
|
441 | 393 | return 0644 |
|
442 | 394 | |
|
443 | 395 | def urljoin(first, second, *arg): |
|
444 | 396 | def join(left, right): |
|
445 | 397 | if not left.endswith('/'): |
|
446 | 398 | left += '/' |
|
447 | 399 | if right.startswith('/'): |
|
448 | 400 | right = right[1:] |
|
449 | 401 | return left + right |
|
450 | 402 | |
|
451 | 403 | url = join(first, second) |
|
452 | 404 | for a in arg: |
|
453 | 405 | url = join(url, a) |
|
454 | 406 | return url |
|
455 | 407 | |
|
456 | 408 | def hexsha1(data): |
|
457 | 409 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
458 | 410 | object data""" |
|
459 | 411 | h = hashlib.sha1() |
|
460 | 412 | for chunk in util.filechunkiter(data): |
|
461 | 413 | h.update(chunk) |
|
462 | 414 | return h.hexdigest() |
|
463 | 415 | |
|
464 | 416 | def httpsendfile(ui, filename): |
|
465 | try: | |
|
466 | # Mercurial >= 1.9 | |
|
467 | 417 |
|
|
468 | except ImportError: | |
|
469 | if 'ui' in inspect.getargspec(url_.httpsendfile.__init__)[0]: | |
|
470 | # Mercurial == 1.8 | |
|
471 | return url_.httpsendfile(ui, filename, 'rb') | |
|
472 | else: | |
|
473 | # Mercurial <= 1.7 | |
|
474 | return url_.httpsendfile(filename, 'rb') | |
|
475 | 418 | |
|
476 | 419 | # Convert a path to a unix style path. This is used to give a |
|
477 | 420 | # canonical path to the lfdirstate. |
|
478 | 421 | def unixpath(path): |
|
479 | 422 | return os.path.normpath(path).replace(os.sep, '/') |
|
480 | 423 | |
|
481 | 424 | def islfilesrepo(repo): |
|
482 | 425 | return ('largefiles' in repo.requirements and |
|
483 | 426 | any_(shortname + '/' in f[0] for f in repo.store.datafiles())) |
|
484 | 427 | |
|
485 | 428 | def any_(gen): |
|
486 | 429 | for x in gen: |
|
487 | 430 | if x: |
|
488 | 431 | return True |
|
489 | 432 | return False |
|
490 | 433 | |
|
491 | 434 | class storeprotonotcapable(BaseException): |
|
492 | 435 | def __init__(self, storetypes): |
|
493 | 436 | self.storetypes = storetypes |
@@ -1,904 +1,830 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''Overridden Mercurial commands and functions for the largefiles extension''' |
|
10 | 10 | |
|
11 | 11 | import os |
|
12 | 12 | import copy |
|
13 | 13 | |
|
14 | 14 | from mercurial import hg, commands, util, cmdutil, match as match_, node, \ |
|
15 | 15 | archival, error, merge |
|
16 | 16 | from mercurial.i18n import _ |
|
17 | 17 | from mercurial.node import hex |
|
18 | 18 | from hgext import rebase |
|
19 | 19 | |
|
20 | 20 | try: |
|
21 | 21 | from mercurial import scmutil |
|
22 | 22 | except ImportError: |
|
23 | 23 | pass |
|
24 | 24 | |
|
25 | 25 | import lfutil |
|
26 | 26 | import lfcommands |
|
27 | 27 | |
|
28 | 28 | def installnormalfilesmatchfn(manifest): |
|
29 | 29 | '''overrides scmutil.match so that the matcher it returns will ignore all |
|
30 | 30 | largefiles''' |
|
31 | 31 | oldmatch = None # for the closure |
|
32 | 32 | def override_match(repo, pats=[], opts={}, globbed=False, |
|
33 | 33 | default='relpath'): |
|
34 | 34 | match = oldmatch(repo, pats, opts, globbed, default) |
|
35 | 35 | m = copy.copy(match) |
|
36 | 36 | notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in |
|
37 | 37 | manifest) |
|
38 | 38 | m._files = filter(notlfile, m._files) |
|
39 | 39 | m._fmap = set(m._files) |
|
40 | 40 | orig_matchfn = m.matchfn |
|
41 | 41 | m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None |
|
42 | 42 | return m |
|
43 | 43 | oldmatch = installmatchfn(override_match) |
|
44 | 44 | |
|
45 | 45 | def installmatchfn(f): |
|
46 | try: | |
|
47 | # Mercurial >= 1.9 | |
|
48 | 46 |
|
|
49 | except ImportError: | |
|
50 | # Mercurial <= 1.8 | |
|
51 | oldmatch = cmdutil.match | |
|
52 | 47 | setattr(f, 'oldmatch', oldmatch) |
|
53 | try: | |
|
54 | # Mercurial >= 1.9 | |
|
55 | 48 |
|
|
56 | except ImportError: | |
|
57 | # Mercurial <= 1.8 | |
|
58 | cmdutil.match = f | |
|
59 | 49 | return oldmatch |
|
60 | 50 | |
|
61 | 51 | def restorematchfn(): |
|
62 | 52 | '''restores scmutil.match to what it was before installnormalfilesmatchfn |
|
63 | 53 | was called. no-op if scmutil.match is its original function. |
|
64 | 54 | |
|
65 | 55 | Note that n calls to installnormalfilesmatchfn will require n calls to |
|
66 | 56 | restore matchfn to reverse''' |
|
67 | try: | |
|
68 | # Mercurial >= 1.9 | |
|
69 | 57 |
|
|
70 | except ImportError: | |
|
71 | # Mercurial <= 1.8 | |
|
72 | cmdutil.match = getattr(cmdutil.match, 'oldmatch', cmdutil.match) | |
|
73 | 58 | |
|
74 | 59 | # -- Wrappers: modify existing commands -------------------------------- |
|
75 | 60 | |
|
76 | 61 | # Add works by going through the files that the user wanted to add |
|
77 | 62 | # and checking if they should be added as lfiles. Then making a new |
|
78 | 63 | # matcher which matches only the normal files and running the original |
|
79 | 64 | # version of add. |
|
80 | 65 | def override_add(orig, ui, repo, *pats, **opts): |
|
81 | 66 | large = opts.pop('large', None) |
|
82 | 67 | |
|
83 | 68 | lfsize = opts.pop('lfsize', None) |
|
84 | 69 | if not lfsize and lfutil.islfilesrepo(repo): |
|
85 | 70 | lfsize = ui.config(lfutil.longname, 'size', default='10') |
|
86 | 71 | if lfsize: |
|
87 | 72 | try: |
|
88 | 73 | lfsize = int(lfsize) |
|
89 | 74 | except ValueError: |
|
90 | 75 | raise util.Abort(_('largefiles: size must be an integer, was %s\n') |
|
91 | 76 | % lfsize) |
|
92 | 77 | |
|
93 | 78 | lfmatcher = None |
|
94 | 79 | if os.path.exists(repo.wjoin(lfutil.shortname)): |
|
95 | 80 | lfpats = ui.config(lfutil.longname, 'patterns', default=()) |
|
96 | 81 | if lfpats: |
|
97 | 82 | lfpats = lfpats.split(' ') |
|
98 | 83 | lfmatcher = match_.match(repo.root, '', list(lfpats)) |
|
99 | 84 | |
|
100 | 85 | lfnames = [] |
|
101 | try: | |
|
102 | # Mercurial >= 1.9 | |
|
103 | 86 |
|
|
104 | except ImportError: | |
|
105 | # Mercurial <= 1.8 | |
|
106 | m = cmdutil.match(repo, pats, opts) | |
|
107 | 87 | m.bad = lambda x, y: None |
|
108 | 88 | wctx = repo[None] |
|
109 | 89 | for f in repo.walk(m): |
|
110 | 90 | exact = m.exact(f) |
|
111 | 91 | lfile = lfutil.standin(f) in wctx |
|
112 | 92 | nfile = f in wctx |
|
113 | 93 | exists = lfile or nfile |
|
114 | 94 | |
|
115 | 95 | # Don't warn the user when they attempt to add a normal tracked file. |
|
116 | 96 | # The normal add code will do that for us. |
|
117 | 97 | if exact and exists: |
|
118 | 98 | if lfile: |
|
119 | 99 | ui.warn(_('%s already a largefile\n') % f) |
|
120 | 100 | continue |
|
121 | 101 | |
|
122 | 102 | if exact or not exists: |
|
123 | 103 | if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \ |
|
124 | 104 | lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)): |
|
125 | 105 | lfnames.append(f) |
|
126 | 106 | if ui.verbose or not exact: |
|
127 | 107 | ui.status(_('adding %s as a largefile\n') % m.rel(f)) |
|
128 | 108 | |
|
129 | 109 | bad = [] |
|
130 | 110 | standins = [] |
|
131 | 111 | |
|
132 | 112 | # Need to lock otherwise there could be a race condition inbetween when |
|
133 | 113 | # standins are created and added to the repo |
|
134 | 114 | wlock = repo.wlock() |
|
135 | 115 | try: |
|
136 | 116 | if not opts.get('dry_run'): |
|
137 | 117 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
138 | 118 | for f in lfnames: |
|
139 | 119 | standinname = lfutil.standin(f) |
|
140 | 120 | lfutil.writestandin(repo, standinname, hash='', |
|
141 | 121 | executable=lfutil.getexecutable(repo.wjoin(f))) |
|
142 | 122 | standins.append(standinname) |
|
143 | 123 | if lfdirstate[f] == 'r': |
|
144 | 124 | lfdirstate.normallookup(f) |
|
145 | 125 | else: |
|
146 | 126 | lfdirstate.add(f) |
|
147 | 127 | lfdirstate.write() |
|
148 | 128 | bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo, |
|
149 | 129 | standins) if f in m.files()] |
|
150 | 130 | finally: |
|
151 | 131 | wlock.release() |
|
152 | 132 | |
|
153 | 133 | installnormalfilesmatchfn(repo[None].manifest()) |
|
154 | 134 | result = orig(ui, repo, *pats, **opts) |
|
155 | 135 | restorematchfn() |
|
156 | 136 | |
|
157 | 137 | return (result == 1 or bad) and 1 or 0 |
|
158 | 138 | |
|
159 | 139 | def override_remove(orig, ui, repo, *pats, **opts): |
|
160 | 140 | manifest = repo[None].manifest() |
|
161 | 141 | installnormalfilesmatchfn(manifest) |
|
162 | 142 | orig(ui, repo, *pats, **opts) |
|
163 | 143 | restorematchfn() |
|
164 | 144 | |
|
165 | 145 | after, force = opts.get('after'), opts.get('force') |
|
166 | 146 | if not pats and not after: |
|
167 | 147 | raise util.Abort(_('no files specified')) |
|
168 | try: | |
|
169 | # Mercurial >= 1.9 | |
|
170 | 148 |
|
|
171 | except ImportError: | |
|
172 | # Mercurial <= 1.8 | |
|
173 | m = cmdutil.match(repo, pats, opts) | |
|
174 | 149 | try: |
|
175 | 150 | repo.lfstatus = True |
|
176 | 151 | s = repo.status(match=m, clean=True) |
|
177 | 152 | finally: |
|
178 | 153 | repo.lfstatus = False |
|
179 | 154 | modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \ |
|
180 | 155 | in manifest] for list in [s[0], s[1], s[3], s[6]]] |
|
181 | 156 | |
|
182 | 157 | def warn(files, reason): |
|
183 | 158 | for f in files: |
|
184 | 159 | ui.warn(_('not removing %s: file %s (use -f to force removal)\n') |
|
185 | 160 | % (m.rel(f), reason)) |
|
186 | 161 | |
|
187 | 162 | if force: |
|
188 | 163 | remove, forget = modified + deleted + clean, added |
|
189 | 164 | elif after: |
|
190 | 165 | remove, forget = deleted, [] |
|
191 | 166 | warn(modified + added + clean, _('still exists')) |
|
192 | 167 | else: |
|
193 | 168 | remove, forget = deleted + clean, [] |
|
194 | 169 | warn(modified, _('is modified')) |
|
195 | 170 | warn(added, _('has been marked for add')) |
|
196 | 171 | |
|
197 | 172 | for f in sorted(remove + forget): |
|
198 | 173 | if ui.verbose or not m.exact(f): |
|
199 | 174 | ui.status(_('removing %s\n') % m.rel(f)) |
|
200 | 175 | |
|
201 | 176 | # Need to lock because standin files are deleted then removed from the |
|
202 | 177 | # repository and we could race inbetween. |
|
203 | 178 | wlock = repo.wlock() |
|
204 | 179 | try: |
|
205 | 180 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
206 | 181 | for f in remove: |
|
207 | 182 | if not after: |
|
208 | 183 | os.unlink(repo.wjoin(f)) |
|
209 | 184 | currentdir = os.path.split(f)[0] |
|
210 | 185 | while currentdir and not os.listdir(repo.wjoin(currentdir)): |
|
211 | 186 | os.rmdir(repo.wjoin(currentdir)) |
|
212 | 187 | currentdir = os.path.split(currentdir)[0] |
|
213 | 188 | lfdirstate.remove(f) |
|
214 | 189 | lfdirstate.write() |
|
215 | 190 | |
|
216 | 191 | forget = [lfutil.standin(f) for f in forget] |
|
217 | 192 | remove = [lfutil.standin(f) for f in remove] |
|
218 | 193 | lfutil.repo_forget(repo, forget) |
|
219 | 194 | lfutil.repo_remove(repo, remove, unlink=True) |
|
220 | 195 | finally: |
|
221 | 196 | wlock.release() |
|
222 | 197 | |
|
223 | 198 | def override_status(orig, ui, repo, *pats, **opts): |
|
224 | 199 | try: |
|
225 | 200 | repo.lfstatus = True |
|
226 | 201 | return orig(ui, repo, *pats, **opts) |
|
227 | 202 | finally: |
|
228 | 203 | repo.lfstatus = False |
|
229 | 204 | |
|
230 | 205 | def override_log(orig, ui, repo, *pats, **opts): |
|
231 | 206 | try: |
|
232 | 207 | repo.lfstatus = True |
|
233 | 208 | orig(ui, repo, *pats, **opts) |
|
234 | 209 | finally: |
|
235 | 210 | repo.lfstatus = False |
|
236 | 211 | |
|
237 | 212 | def override_verify(orig, ui, repo, *pats, **opts): |
|
238 | 213 | large = opts.pop('large', False) |
|
239 | 214 | all = opts.pop('lfa', False) |
|
240 | 215 | contents = opts.pop('lfc', False) |
|
241 | 216 | |
|
242 | 217 | result = orig(ui, repo, *pats, **opts) |
|
243 | 218 | if large: |
|
244 | 219 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
245 | 220 | return result |
|
246 | 221 | |
|
247 | 222 | # Override needs to refresh standins so that update's normal merge |
|
248 | 223 | # will go through properly. Then the other update hook (overriding repo.update) |
|
249 | 224 | # will get the new files. Filemerge is also overriden so that the merge |
|
250 | 225 | # will merge standins correctly. |
|
251 | 226 | def override_update(orig, ui, repo, *pats, **opts): |
|
252 | 227 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
253 | 228 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, |
|
254 | 229 | False, False) |
|
255 | 230 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s |
|
256 | 231 | |
|
257 | 232 | # Need to lock between the standins getting updated and their lfiles |
|
258 | 233 | # getting updated |
|
259 | 234 | wlock = repo.wlock() |
|
260 | 235 | try: |
|
261 | 236 | if opts['check']: |
|
262 | 237 | mod = len(modified) > 0 |
|
263 | 238 | for lfile in unsure: |
|
264 | 239 | standin = lfutil.standin(lfile) |
|
265 | 240 | if repo['.'][standin].data().strip() != \ |
|
266 | 241 | lfutil.hashfile(repo.wjoin(lfile)): |
|
267 | 242 | mod = True |
|
268 | 243 | else: |
|
269 | 244 | lfdirstate.normal(lfile) |
|
270 | 245 | lfdirstate.write() |
|
271 | 246 | if mod: |
|
272 | 247 | raise util.Abort(_('uncommitted local changes')) |
|
273 | 248 | # XXX handle removed differently |
|
274 | 249 | if not opts['clean']: |
|
275 | 250 | for lfile in unsure + modified + added: |
|
276 | 251 | lfutil.updatestandin(repo, lfutil.standin(lfile)) |
|
277 | 252 | finally: |
|
278 | 253 | wlock.release() |
|
279 | 254 | return orig(ui, repo, *pats, **opts) |
|
280 | 255 | |
|
281 | 256 | # Override filemerge to prompt the user about how they wish to merge lfiles. |
|
282 | 257 | # This will handle identical edits, and copy/rename + edit without prompting |
|
283 | 258 | # the user. |
|
284 | 259 | def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca): |
|
285 | 260 | # Use better variable names here. Because this is a wrapper we cannot |
|
286 | 261 | # change the variable names in the function declaration. |
|
287 | 262 | fcdest, fcother, fcancestor = fcd, fco, fca |
|
288 | 263 | if not lfutil.isstandin(orig): |
|
289 | 264 | return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) |
|
290 | 265 | else: |
|
291 | 266 | if not fcother.cmp(fcdest): # files identical? |
|
292 | 267 | return None |
|
293 | 268 | |
|
294 | 269 | # backwards, use working dir parent as ancestor |
|
295 | 270 | if fcancestor == fcother: |
|
296 | 271 | fcancestor = fcdest.parents()[0] |
|
297 | 272 | |
|
298 | 273 | if orig != fcother.path(): |
|
299 | 274 | repo.ui.status(_('merging %s and %s to %s\n') |
|
300 | 275 | % (lfutil.splitstandin(orig), |
|
301 | 276 | lfutil.splitstandin(fcother.path()), |
|
302 | 277 | lfutil.splitstandin(fcdest.path()))) |
|
303 | 278 | else: |
|
304 | 279 | repo.ui.status(_('merging %s\n') |
|
305 | 280 | % lfutil.splitstandin(fcdest.path())) |
|
306 | 281 | |
|
307 | 282 | if fcancestor.path() != fcother.path() and fcother.data() == \ |
|
308 | 283 | fcancestor.data(): |
|
309 | 284 | return 0 |
|
310 | 285 | if fcancestor.path() != fcdest.path() and fcdest.data() == \ |
|
311 | 286 | fcancestor.data(): |
|
312 | 287 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) |
|
313 | 288 | return 0 |
|
314 | 289 | |
|
315 | 290 | if repo.ui.promptchoice(_('largefile %s has a merge conflict\n' |
|
316 | 291 | 'keep (l)ocal or take (o)ther?') % |
|
317 | 292 | lfutil.splitstandin(orig), |
|
318 | 293 | (_('&Local'), _('&Other')), 0) == 0: |
|
319 | 294 | return 0 |
|
320 | 295 | else: |
|
321 | 296 | repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) |
|
322 | 297 | return 0 |
|
323 | 298 | |
|
324 | 299 | # Copy first changes the matchers to match standins instead of lfiles. |
|
325 | 300 | # Then it overrides util.copyfile in that function it checks if the destination |
|
326 | 301 | # lfile already exists. It also keeps a list of copied files so that the lfiles |
|
327 | 302 | # can be copied and the dirstate updated. |
|
328 | 303 | def override_copy(orig, ui, repo, pats, opts, rename=False): |
|
329 | 304 | # doesn't remove lfile on rename |
|
330 | 305 | if len(pats) < 2: |
|
331 | 306 | # this isn't legal, let the original function deal with it |
|
332 | 307 | return orig(ui, repo, pats, opts, rename) |
|
333 | 308 | |
|
334 | 309 | def makestandin(relpath): |
|
335 | try: | |
|
336 | # Mercurial >= 1.9 | |
|
337 | 310 |
|
|
338 | except ImportError: | |
|
339 | # Mercurial <= 1.8 | |
|
340 | path = util.canonpath(repo.root, repo.getcwd(), relpath) | |
|
341 | 311 | return os.path.join(os.path.relpath('.', repo.getcwd()), |
|
342 | 312 | lfutil.standin(path)) |
|
343 | 313 | |
|
344 | try: | |
|
345 | # Mercurial >= 1.9 | |
|
346 | 314 |
|
|
347 | except ImportError: | |
|
348 | # Mercurial <= 1.8 | |
|
349 | fullpats = cmdutil.expandpats(pats) | |
|
350 | 315 | dest = fullpats[-1] |
|
351 | 316 | |
|
352 | 317 | if os.path.isdir(dest): |
|
353 | 318 | if not os.path.isdir(makestandin(dest)): |
|
354 | 319 | os.makedirs(makestandin(dest)) |
|
355 | 320 | # This could copy both lfiles and normal files in one command, but we don't |
|
356 | 321 | # want to do that first replace their matcher to only match normal files |
|
357 | 322 | # and run it then replace it to just match lfiles and run it again |
|
358 | 323 | nonormalfiles = False |
|
359 | 324 | nolfiles = False |
|
360 | 325 | try: |
|
361 | 326 | installnormalfilesmatchfn(repo[None].manifest()) |
|
362 | 327 | result = orig(ui, repo, pats, opts, rename) |
|
363 | 328 | except util.Abort, e: |
|
364 | 329 | if str(e) != 'no files to copy': |
|
365 | 330 | raise e |
|
366 | 331 | else: |
|
367 | 332 | nonormalfiles = True |
|
368 | 333 | result = 0 |
|
369 | 334 | finally: |
|
370 | 335 | restorematchfn() |
|
371 | 336 | |
|
372 | 337 | # The first rename can cause our current working directory to be removed. |
|
373 | 338 | # In that case there is nothing left to copy/rename so just quit. |
|
374 | 339 | try: |
|
375 | 340 | repo.getcwd() |
|
376 | 341 | except OSError: |
|
377 | 342 | return result |
|
378 | 343 | |
|
379 | 344 | try: |
|
380 | 345 | # When we call orig below it creates the standins but we don't add them |
|
381 | 346 | # to the dir state until later so lock during that time. |
|
382 | 347 | wlock = repo.wlock() |
|
383 | 348 | |
|
384 | 349 | manifest = repo[None].manifest() |
|
385 | 350 | oldmatch = None # for the closure |
|
386 | 351 | def override_match(repo, pats=[], opts={}, globbed=False, |
|
387 | 352 | default='relpath'): |
|
388 | 353 | newpats = [] |
|
389 | 354 | # The patterns were previously mangled to add the standin |
|
390 | 355 | # directory; we need to remove that now |
|
391 | 356 | for pat in pats: |
|
392 | 357 | if match_.patkind(pat) is None and lfutil.shortname in pat: |
|
393 | 358 | newpats.append(pat.replace(lfutil.shortname, '')) |
|
394 | 359 | else: |
|
395 | 360 | newpats.append(pat) |
|
396 | 361 | match = oldmatch(repo, newpats, opts, globbed, default) |
|
397 | 362 | m = copy.copy(match) |
|
398 | 363 | lfile = lambda f: lfutil.standin(f) in manifest |
|
399 | 364 | m._files = [lfutil.standin(f) for f in m._files if lfile(f)] |
|
400 | 365 | m._fmap = set(m._files) |
|
401 | 366 | orig_matchfn = m.matchfn |
|
402 | 367 | m.matchfn = lambda f: lfutil.isstandin(f) and \ |
|
403 | 368 | lfile(lfutil.splitstandin(f)) and \ |
|
404 | 369 | orig_matchfn(lfutil.splitstandin(f)) or None |
|
405 | 370 | return m |
|
406 | 371 | oldmatch = installmatchfn(override_match) |
|
407 | 372 | listpats = [] |
|
408 | 373 | for pat in pats: |
|
409 | 374 | if match_.patkind(pat) is not None: |
|
410 | 375 | listpats.append(pat) |
|
411 | 376 | else: |
|
412 | 377 | listpats.append(makestandin(pat)) |
|
413 | 378 | |
|
414 | 379 | try: |
|
415 | 380 | origcopyfile = util.copyfile |
|
416 | 381 | copiedfiles = [] |
|
417 | 382 | def override_copyfile(src, dest): |
|
418 | 383 | if lfutil.shortname in src and lfutil.shortname in dest: |
|
419 | 384 | destlfile = dest.replace(lfutil.shortname, '') |
|
420 | 385 | if not opts['force'] and os.path.exists(destlfile): |
|
421 | 386 | raise IOError('', |
|
422 | 387 | _('destination largefile already exists')) |
|
423 | 388 | copiedfiles.append((src, dest)) |
|
424 | 389 | origcopyfile(src, dest) |
|
425 | 390 | |
|
426 | 391 | util.copyfile = override_copyfile |
|
427 | 392 | result += orig(ui, repo, listpats, opts, rename) |
|
428 | 393 | finally: |
|
429 | 394 | util.copyfile = origcopyfile |
|
430 | 395 | |
|
431 | 396 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
432 | 397 | for (src, dest) in copiedfiles: |
|
433 | 398 | if lfutil.shortname in src and lfutil.shortname in dest: |
|
434 | 399 | srclfile = src.replace(lfutil.shortname, '') |
|
435 | 400 | destlfile = dest.replace(lfutil.shortname, '') |
|
436 | 401 | destlfiledir = os.path.dirname(destlfile) or '.' |
|
437 | 402 | if not os.path.isdir(destlfiledir): |
|
438 | 403 | os.makedirs(destlfiledir) |
|
439 | 404 | if rename: |
|
440 | 405 | os.rename(srclfile, destlfile) |
|
441 | 406 | lfdirstate.remove(os.path.relpath(srclfile, |
|
442 | 407 | repo.root)) |
|
443 | 408 | else: |
|
444 | 409 | util.copyfile(srclfile, destlfile) |
|
445 | 410 | lfdirstate.add(os.path.relpath(destlfile, |
|
446 | 411 | repo.root)) |
|
447 | 412 | lfdirstate.write() |
|
448 | 413 | except util.Abort, e: |
|
449 | 414 | if str(e) != 'no files to copy': |
|
450 | 415 | raise e |
|
451 | 416 | else: |
|
452 | 417 | nolfiles = True |
|
453 | 418 | finally: |
|
454 | 419 | restorematchfn() |
|
455 | 420 | wlock.release() |
|
456 | 421 | |
|
457 | 422 | if nolfiles and nonormalfiles: |
|
458 | 423 | raise util.Abort(_('no files to copy')) |
|
459 | 424 | |
|
460 | 425 | return result |
|
461 | 426 | |
|
462 | 427 | # When the user calls revert, we have to be careful to not revert any changes |
|
463 | 428 | # to other lfiles accidentally. This means we have to keep track of the lfiles |
|
464 | 429 | # that are being reverted so we only pull down the necessary lfiles. |
|
465 | 430 | # |
|
466 | 431 | # Standins are only updated (to match the hash of lfiles) before commits. |
|
467 | 432 | # Update the standins then run the original revert (changing the matcher to hit |
|
468 | 433 | # standins instead of lfiles). Based on the resulting standins update the |
|
469 | 434 | # lfiles. Then return the standins to their proper state |
|
470 | 435 | def override_revert(orig, ui, repo, *pats, **opts): |
|
471 | 436 | # Because we put the standins in a bad state (by updating them) and then |
|
472 | 437 | # return them to a correct state we need to lock to prevent others from |
|
473 | 438 | # changing them in their incorrect state. |
|
474 | 439 | wlock = repo.wlock() |
|
475 | 440 | try: |
|
476 | 441 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
477 | 442 | (modified, added, removed, missing, unknown, ignored, clean) = \ |
|
478 | 443 | lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev()) |
|
479 | 444 | for lfile in modified: |
|
480 | 445 | lfutil.updatestandin(repo, lfutil.standin(lfile)) |
|
481 | 446 | |
|
482 | 447 | try: |
|
483 | 448 | ctx = repo[opts.get('rev')] |
|
484 | 449 | oldmatch = None # for the closure |
|
485 | 450 | def override_match(ctxorrepo, pats=[], opts={}, globbed=False, |
|
486 | 451 | default='relpath'): |
|
487 | 452 | if util.safehasattr(ctxorrepo, 'match'): |
|
488 | 453 | ctx0 = ctxorrepo |
|
489 | 454 | else: |
|
490 | 455 | ctx0 = ctxorrepo[None] |
|
491 | 456 | match = oldmatch(ctxorrepo, pats, opts, globbed, default) |
|
492 | 457 | m = copy.copy(match) |
|
493 | 458 | def tostandin(f): |
|
494 | 459 | if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx: |
|
495 | 460 | return lfutil.standin(f) |
|
496 | 461 | elif lfutil.standin(f) in repo[None]: |
|
497 | 462 | return None |
|
498 | 463 | return f |
|
499 | 464 | m._files = [tostandin(f) for f in m._files] |
|
500 | 465 | m._files = [f for f in m._files if f is not None] |
|
501 | 466 | m._fmap = set(m._files) |
|
502 | 467 | orig_matchfn = m.matchfn |
|
503 | 468 | def matchfn(f): |
|
504 | 469 | if lfutil.isstandin(f): |
|
505 | 470 | # We need to keep track of what lfiles are being |
|
506 | 471 | # matched so we know which ones to update later |
|
507 | 472 | # (otherwise we revert changes to other lfiles |
|
508 | 473 | # accidentally). This is repo specific, so duckpunch |
|
509 | 474 | # the repo object to keep the list of lfiles for us |
|
510 | 475 | # later. |
|
511 | 476 | if orig_matchfn(lfutil.splitstandin(f)) and \ |
|
512 | 477 | (f in repo[None] or f in ctx): |
|
513 | 478 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
514 | 479 | lfileslist.append(lfutil.splitstandin(f)) |
|
515 | 480 | repo._lfilestoupdate = lfileslist |
|
516 | 481 | return True |
|
517 | 482 | else: |
|
518 | 483 | return False |
|
519 | 484 | return orig_matchfn(f) |
|
520 | 485 | m.matchfn = matchfn |
|
521 | 486 | return m |
|
522 | 487 | oldmatch = installmatchfn(override_match) |
|
523 | try: | |
|
524 | # Mercurial >= 1.9 | |
|
525 | 488 |
|
|
526 | 489 |
|
|
527 | except ImportError: | |
|
528 | # Mercurial <= 1.8 | |
|
529 | matches = override_match(repo, pats, opts) | |
|
530 | 490 | orig(ui, repo, *pats, **opts) |
|
531 | 491 | finally: |
|
532 | 492 | restorematchfn() |
|
533 | 493 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
534 | 494 | lfcommands.updatelfiles(ui, repo, filelist=lfileslist, |
|
535 | 495 | printmessage=False) |
|
536 | 496 | # Empty out the lfiles list so we start fresh next time |
|
537 | 497 | repo._lfilestoupdate = [] |
|
538 | 498 | for lfile in modified: |
|
539 | 499 | if lfile in lfileslist: |
|
540 | 500 | if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ |
|
541 | 501 | in repo['.']: |
|
542 | 502 | lfutil.writestandin(repo, lfutil.standin(lfile), |
|
543 | 503 | repo['.'][lfile].data().strip(), |
|
544 | 504 | 'x' in repo['.'][lfile].flags()) |
|
545 | 505 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
546 | 506 | for lfile in added: |
|
547 | 507 | standin = lfutil.standin(lfile) |
|
548 | 508 | if standin not in ctx and (standin in matches or opts.get('all')): |
|
549 | 509 | if lfile in lfdirstate: |
|
550 | try: | |
|
551 | # Mercurial >= 1.9 | |
|
552 | 510 |
|
|
553 | except AttributeError: | |
|
554 | # Mercurial <= 1.8 | |
|
555 | lfdirstate.forget(lfile) | |
|
556 | 511 | util.unlinkpath(repo.wjoin(standin)) |
|
557 | 512 | lfdirstate.write() |
|
558 | 513 | finally: |
|
559 | 514 | wlock.release() |
|
560 | 515 | |
|
561 | 516 | def hg_update(orig, repo, node): |
|
562 | 517 | result = orig(repo, node) |
|
563 | 518 | # XXX check if it worked first |
|
564 | 519 | lfcommands.updatelfiles(repo.ui, repo) |
|
565 | 520 | return result |
|
566 | 521 | |
|
567 | 522 | def hg_clean(orig, repo, node, show_stats=True): |
|
568 | 523 | result = orig(repo, node, show_stats) |
|
569 | 524 | lfcommands.updatelfiles(repo.ui, repo) |
|
570 | 525 | return result |
|
571 | 526 | |
|
572 | 527 | def hg_merge(orig, repo, node, force=None, remind=True): |
|
573 | 528 | result = orig(repo, node, force, remind) |
|
574 | 529 | lfcommands.updatelfiles(repo.ui, repo) |
|
575 | 530 | return result |
|
576 | 531 | |
|
577 | 532 | # When we rebase a repository with remotely changed lfiles, we need |
|
578 | 533 | # to take some extra care so that the lfiles are correctly updated |
|
579 | 534 | # in the working copy |
|
580 | 535 | def override_pull(orig, ui, repo, source=None, **opts): |
|
581 | 536 | if opts.get('rebase', False): |
|
582 | 537 | repo._isrebasing = True |
|
583 | 538 | try: |
|
584 | 539 | if opts.get('update'): |
|
585 | 540 | del opts['update'] |
|
586 | 541 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
587 | 542 | 'the update flag\n') |
|
588 | 543 | del opts['rebase'] |
|
589 | try: | |
|
590 | # Mercurial >= 1.9 | |
|
591 | 544 |
|
|
592 | except AttributeError: | |
|
593 | # Mercurial <= 1.8 | |
|
594 | cmdutil.bail_if_changed(repo) | |
|
595 | 545 | revsprepull = len(repo) |
|
596 | 546 | origpostincoming = commands.postincoming |
|
597 | 547 | def _dummy(*args, **kwargs): |
|
598 | 548 | pass |
|
599 | 549 | commands.postincoming = _dummy |
|
600 | 550 | repo.lfpullsource = source |
|
601 | 551 | if not source: |
|
602 | 552 | source = 'default' |
|
603 | 553 | try: |
|
604 | 554 | result = commands.pull(ui, repo, source, **opts) |
|
605 | 555 | finally: |
|
606 | 556 | commands.postincoming = origpostincoming |
|
607 | 557 | revspostpull = len(repo) |
|
608 | 558 | if revspostpull > revsprepull: |
|
609 | 559 | result = result or rebase.rebase(ui, repo) |
|
610 | 560 | finally: |
|
611 | 561 | repo._isrebasing = False |
|
612 | 562 | else: |
|
613 | 563 | repo.lfpullsource = source |
|
614 | 564 | if not source: |
|
615 | 565 | source = 'default' |
|
616 | 566 | result = orig(ui, repo, source, **opts) |
|
617 | 567 | return result |
|
618 | 568 | |
|
619 | 569 | def override_rebase(orig, ui, repo, **opts): |
|
620 | 570 | repo._isrebasing = True |
|
621 | 571 | try: |
|
622 | 572 | orig(ui, repo, **opts) |
|
623 | 573 | finally: |
|
624 | 574 | repo._isrebasing = False |
|
625 | 575 | |
|
626 | 576 | def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None, |
|
627 | 577 | prefix=None, mtime=None, subrepos=None): |
|
628 | 578 | # No need to lock because we are only reading history and lfile caches |
|
629 | 579 | # neither of which are modified |
|
630 | 580 | |
|
631 | 581 | lfcommands.cachelfiles(repo.ui, repo, node) |
|
632 | 582 | |
|
633 | 583 | if kind not in archival.archivers: |
|
634 | 584 | raise util.Abort(_("unknown archive type '%s'") % kind) |
|
635 | 585 | |
|
636 | 586 | ctx = repo[node] |
|
637 | 587 | |
|
638 | # In Mercurial <= 1.5 the prefix is passed to the archiver so try that | |
|
639 | # if that doesn't work we are probably in Mercurial >= 1.6 where the | |
|
640 | # prefix is not handled by the archiver | |
|
641 | try: | |
|
642 | archiver = archival.archivers[kind](dest, prefix, mtime or \ | |
|
643 | ctx.date()[0]) | |
|
644 | ||
|
645 | def write(name, mode, islink, getdata): | |
|
646 | if matchfn and not matchfn(name): | |
|
647 | return | |
|
648 | data = getdata() | |
|
649 | if decode: | |
|
650 | data = repo.wwritedata(name, data) | |
|
651 | archiver.addfile(name, mode, islink, data) | |
|
652 | except TypeError: | |
|
653 | 588 |
|
|
654 | 589 |
|
|
655 | 590 |
|
|
656 | 591 |
|
|
657 | 592 |
|
|
658 | 593 |
|
|
659 | 594 | |
|
660 | 595 |
|
|
661 | 596 |
|
|
662 | 597 |
|
|
663 | 598 |
|
|
664 | 599 |
|
|
665 | 600 |
|
|
666 | 601 |
|
|
667 | 602 | |
|
668 | 603 |
|
|
669 | 604 | |
|
670 | 605 | if repo.ui.configbool("ui", "archivemeta", True): |
|
671 | 606 | def metadata(): |
|
672 | 607 | base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( |
|
673 | 608 | hex(repo.changelog.node(0)), hex(node), ctx.branch()) |
|
674 | 609 | |
|
675 | 610 | tags = ''.join('tag: %s\n' % t for t in ctx.tags() |
|
676 | 611 | if repo.tagtype(t) == 'global') |
|
677 | 612 | if not tags: |
|
678 | 613 | repo.ui.pushbuffer() |
|
679 | 614 | opts = {'template': '{latesttag}\n{latesttagdistance}', |
|
680 | 615 | 'style': '', 'patch': None, 'git': None} |
|
681 | 616 | cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) |
|
682 | 617 | ltags, dist = repo.ui.popbuffer().split('\n') |
|
683 | 618 | tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) |
|
684 | 619 | tags += 'latesttagdistance: %s\n' % dist |
|
685 | 620 | |
|
686 | 621 | return base + tags |
|
687 | 622 | |
|
688 | 623 | write('.hg_archival.txt', 0644, False, metadata) |
|
689 | 624 | |
|
690 | 625 | for f in ctx: |
|
691 | 626 | ff = ctx.flags(f) |
|
692 | 627 | getdata = ctx[f].data |
|
693 | 628 | if lfutil.isstandin(f): |
|
694 | 629 | path = lfutil.findfile(repo, getdata().strip()) |
|
695 | 630 | f = lfutil.splitstandin(f) |
|
696 | 631 | |
|
697 | 632 | def getdatafn(): |
|
698 | 633 | try: |
|
699 | 634 | fd = open(path, 'rb') |
|
700 | 635 | return fd.read() |
|
701 | 636 | finally: |
|
702 | 637 | fd.close() |
|
703 | 638 | |
|
704 | 639 | getdata = getdatafn |
|
705 | 640 | write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata) |
|
706 | 641 | |
|
707 | 642 | if subrepos: |
|
708 | 643 | for subpath in ctx.substate: |
|
709 | 644 | sub = ctx.sub(subpath) |
|
710 | 645 | try: |
|
711 | 646 | sub.archive(repo.ui, archiver, prefix) |
|
712 | 647 | except TypeError: |
|
713 | 648 | sub.archive(archiver, prefix) |
|
714 | 649 | |
|
715 | 650 | archiver.done() |
|
716 | 651 | |
|
717 | 652 | # If a lfile is modified the change is not reflected in its standin until a |
|
718 | 653 | # commit. cmdutil.bailifchanged raises an exception if the repo has |
|
719 | 654 | # uncommitted changes. Wrap it to also check if lfiles were changed. This is |
|
720 | 655 | # used by bisect and backout. |
|
721 | 656 | def override_bailifchanged(orig, repo): |
|
722 | 657 | orig(repo) |
|
723 | 658 | repo.lfstatus = True |
|
724 | 659 | modified, added, removed, deleted = repo.status()[:4] |
|
725 | 660 | repo.lfstatus = False |
|
726 | 661 | if modified or added or removed or deleted: |
|
727 | 662 | raise util.Abort(_('outstanding uncommitted changes')) |
|
728 | 663 | |
|
729 | 664 | # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check |
|
730 | 665 | def override_fetch(orig, ui, repo, *pats, **opts): |
|
731 | 666 | repo.lfstatus = True |
|
732 | 667 | modified, added, removed, deleted = repo.status()[:4] |
|
733 | 668 | repo.lfstatus = False |
|
734 | 669 | if modified or added or removed or deleted: |
|
735 | 670 | raise util.Abort(_('outstanding uncommitted changes')) |
|
736 | 671 | return orig(ui, repo, *pats, **opts) |
|
737 | 672 | |
|
738 | 673 | def override_forget(orig, ui, repo, *pats, **opts): |
|
739 | 674 | installnormalfilesmatchfn(repo[None].manifest()) |
|
740 | 675 | orig(ui, repo, *pats, **opts) |
|
741 | 676 | restorematchfn() |
|
742 | try: | |
|
743 | # Mercurial >= 1.9 | |
|
744 | 677 |
|
|
745 | except ImportError: | |
|
746 | # Mercurial <= 1.8 | |
|
747 | m = cmdutil.match(repo, pats, opts) | |
|
748 | 678 | |
|
749 | 679 | try: |
|
750 | 680 | repo.lfstatus = True |
|
751 | 681 | s = repo.status(match=m, clean=True) |
|
752 | 682 | finally: |
|
753 | 683 | repo.lfstatus = False |
|
754 | 684 | forget = sorted(s[0] + s[1] + s[3] + s[6]) |
|
755 | 685 | forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] |
|
756 | 686 | |
|
757 | 687 | for f in forget: |
|
758 | 688 | if lfutil.standin(f) not in repo.dirstate and not \ |
|
759 | 689 | os.path.isdir(m.rel(lfutil.standin(f))): |
|
760 | 690 | ui.warn(_('not removing %s: file is already untracked\n') |
|
761 | 691 | % m.rel(f)) |
|
762 | 692 | |
|
763 | 693 | for f in forget: |
|
764 | 694 | if ui.verbose or not m.exact(f): |
|
765 | 695 | ui.status(_('removing %s\n') % m.rel(f)) |
|
766 | 696 | |
|
767 | 697 | # Need to lock because standin files are deleted then removed from the |
|
768 | 698 | # repository and we could race inbetween. |
|
769 | 699 | wlock = repo.wlock() |
|
770 | 700 | try: |
|
771 | 701 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
772 | 702 | for f in forget: |
|
773 | 703 | if lfdirstate[f] == 'a': |
|
774 | 704 | lfdirstate.drop(f) |
|
775 | 705 | else: |
|
776 | 706 | lfdirstate.remove(f) |
|
777 | 707 | lfdirstate.write() |
|
778 | 708 | lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget], |
|
779 | 709 | unlink=True) |
|
780 | 710 | finally: |
|
781 | 711 | wlock.release() |
|
782 | 712 | |
|
783 | 713 | def getoutgoinglfiles(ui, repo, dest=None, **opts): |
|
784 | 714 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
785 | 715 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
786 | 716 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
787 | 717 | if revs: |
|
788 | 718 | revs = [repo.lookup(rev) for rev in revs] |
|
789 | 719 | |
|
790 | # Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hg | |
|
791 | try: | |
|
792 | remoteui = cmdutil.remoteui | |
|
793 | except AttributeError: | |
|
794 | 720 |
|
|
795 | 721 | |
|
796 | 722 | try: |
|
797 | 723 | remote = hg.repository(remoteui(repo, opts), dest) |
|
798 | 724 | except error.RepoError: |
|
799 | 725 | return None |
|
800 | 726 | o = lfutil.findoutgoing(repo, remote, False) |
|
801 | 727 | if not o: |
|
802 | 728 | return None |
|
803 | 729 | o = repo.changelog.nodesbetween(o, revs)[0] |
|
804 | 730 | if opts.get('newest_first'): |
|
805 | 731 | o.reverse() |
|
806 | 732 | |
|
807 | 733 | toupload = set() |
|
808 | 734 | for n in o: |
|
809 | 735 | parents = [p for p in repo.changelog.parents(n) if p != node.nullid] |
|
810 | 736 | ctx = repo[n] |
|
811 | 737 | files = set(ctx.files()) |
|
812 | 738 | if len(parents) == 2: |
|
813 | 739 | mc = ctx.manifest() |
|
814 | 740 | mp1 = ctx.parents()[0].manifest() |
|
815 | 741 | mp2 = ctx.parents()[1].manifest() |
|
816 | 742 | for f in mp1: |
|
817 | 743 | if f not in mc: |
|
818 | 744 | files.add(f) |
|
819 | 745 | for f in mp2: |
|
820 | 746 | if f not in mc: |
|
821 | 747 | files.add(f) |
|
822 | 748 | for f in mc: |
|
823 | 749 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
824 | 750 | files.add(f) |
|
825 | 751 | toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\ |
|
826 | 752 | and f in ctx])) |
|
827 | 753 | return toupload |
|
828 | 754 | |
|
829 | 755 | def override_outgoing(orig, ui, repo, dest=None, **opts): |
|
830 | 756 | orig(ui, repo, dest, **opts) |
|
831 | 757 | |
|
832 | 758 | if opts.pop('large', None): |
|
833 | 759 | toupload = getoutgoinglfiles(ui, repo, dest, **opts) |
|
834 | 760 | if toupload is None: |
|
835 | 761 | ui.status(_('largefiles: No remote repo\n')) |
|
836 | 762 | else: |
|
837 | 763 | ui.status(_('largefiles to upload:\n')) |
|
838 | 764 | for file in toupload: |
|
839 | 765 | ui.status(lfutil.splitstandin(file) + '\n') |
|
840 | 766 | ui.status('\n') |
|
841 | 767 | |
|
842 | 768 | def override_summary(orig, ui, repo, *pats, **opts): |
|
843 | 769 | orig(ui, repo, *pats, **opts) |
|
844 | 770 | |
|
845 | 771 | if opts.pop('large', None): |
|
846 | 772 | toupload = getoutgoinglfiles(ui, repo, None, **opts) |
|
847 | 773 | if toupload is None: |
|
848 | 774 | ui.status(_('largefiles: No remote repo\n')) |
|
849 | 775 | else: |
|
850 | 776 | ui.status(_('largefiles: %d to upload\n') % len(toupload)) |
|
851 | 777 | |
|
852 | 778 | def override_addremove(orig, ui, repo, *pats, **opts): |
|
853 | 779 | # Check if the parent or child has lfiles if they do don't allow it. If |
|
854 | 780 | # there is a symlink in the manifest then getting the manifest throws an |
|
855 | 781 | # exception catch it and let addremove deal with it. This happens in |
|
856 | 782 | # Mercurial's test test-addremove-symlink |
|
857 | 783 | try: |
|
858 | 784 | manifesttip = set(repo['tip'].manifest()) |
|
859 | 785 | except util.Abort: |
|
860 | 786 | manifesttip = set() |
|
861 | 787 | try: |
|
862 | 788 | manifestworking = set(repo[None].manifest()) |
|
863 | 789 | except util.Abort: |
|
864 | 790 | manifestworking = set() |
|
865 | 791 | |
|
866 | 792 | # Manifests are only iterable so turn them into sets then union |
|
867 | 793 | for file in manifesttip.union(manifestworking): |
|
868 | 794 | if file.startswith(lfutil.shortname): |
|
869 | 795 | raise util.Abort( |
|
870 | 796 | _('addremove cannot be run on a repo with largefiles')) |
|
871 | 797 | |
|
872 | 798 | return orig(ui, repo, *pats, **opts) |
|
873 | 799 | |
|
874 | 800 | # Calling purge with --all will cause the lfiles to be deleted. |
|
875 | 801 | # Override repo.status to prevent this from happening. |
|
876 | 802 | def override_purge(orig, ui, repo, *dirs, **opts): |
|
877 | 803 | oldstatus = repo.status |
|
878 | 804 | def override_status(node1='.', node2=None, match=None, ignored=False, |
|
879 | 805 | clean=False, unknown=False, listsubrepos=False): |
|
880 | 806 | r = oldstatus(node1, node2, match, ignored, clean, unknown, |
|
881 | 807 | listsubrepos) |
|
882 | 808 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
883 | 809 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
884 | 810 | unknown = [f for f in unknown if lfdirstate[f] == '?'] |
|
885 | 811 | ignored = [f for f in ignored if lfdirstate[f] == '?'] |
|
886 | 812 | return modified, added, removed, deleted, unknown, ignored, clean |
|
887 | 813 | repo.status = override_status |
|
888 | 814 | orig(ui, repo, *dirs, **opts) |
|
889 | 815 | repo.status = oldstatus |
|
890 | 816 | |
|
891 | 817 | def override_rollback(orig, ui, repo, **opts): |
|
892 | 818 | result = orig(ui, repo, **opts) |
|
893 | 819 | merge.update(repo, node=None, branchmerge=False, force=True, |
|
894 | 820 | partial=lfutil.isstandin) |
|
895 | 821 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
896 | 822 | lfiles = lfutil.listlfiles(repo) |
|
897 | 823 | oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev()) |
|
898 | 824 | for file in lfiles: |
|
899 | 825 | if file in oldlfiles: |
|
900 | 826 | lfdirstate.normallookup(file) |
|
901 | 827 | else: |
|
902 | 828 | lfdirstate.add(file) |
|
903 | 829 | lfdirstate.write() |
|
904 | 830 | return result |
@@ -1,162 +1,158 b'' | |||
|
1 | 1 | # Copyright 2011 Fog Creek Software |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | import os |
|
7 | 7 | import tempfile |
|
8 | 8 | import urllib2 |
|
9 | 9 | |
|
10 | 10 | from mercurial import error, httprepo, util, wireproto |
|
11 | 11 | from mercurial.i18n import _ |
|
12 | 12 | |
|
13 | 13 | import lfutil |
|
14 | 14 | |
|
15 | 15 | LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \ |
|
16 | 16 | '\n\nPlease enable it in your Mercurial config ' \ |
|
17 | 17 | 'file.\n' |
|
18 | 18 | |
|
19 | 19 | def putlfile(repo, proto, sha): |
|
20 | 20 | """putlfile puts a largefile into a repository's local cache and into the |
|
21 | 21 | system cache.""" |
|
22 | 22 | f = None |
|
23 | 23 | proto.redirect() |
|
24 | 24 | try: |
|
25 | 25 | try: |
|
26 | 26 | f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-') |
|
27 | 27 | proto.getfile(f) |
|
28 | 28 | f.seek(0) |
|
29 | 29 | if sha != lfutil.hexsha1(f): |
|
30 | 30 | return wireproto.pushres(1) |
|
31 | 31 | lfutil.copytocacheabsolute(repo, f.name, sha) |
|
32 | 32 | except IOError: |
|
33 | 33 | repo.ui.warn( |
|
34 | 34 | _('error: could not put received data into largefile store')) |
|
35 | 35 | return wireproto.pushres(1) |
|
36 | 36 | finally: |
|
37 | 37 | if f: |
|
38 | 38 | f.close() |
|
39 | 39 | |
|
40 | 40 | return wireproto.pushres(0) |
|
41 | 41 | |
|
42 | 42 | def getlfile(repo, proto, sha): |
|
43 | 43 | """getlfile retrieves a largefile from the repository-local cache or system |
|
44 | 44 | cache.""" |
|
45 | 45 | filename = lfutil.findfile(repo, sha) |
|
46 | 46 | if not filename: |
|
47 | 47 | raise util.Abort(_('requested largefile %s not present in cache') % sha) |
|
48 | 48 | f = open(filename, 'rb') |
|
49 | 49 | length = os.fstat(f.fileno())[6] |
|
50 | 50 | # since we can't set an HTTP content-length header here, and mercurial core |
|
51 | 51 | # provides no way to give the length of a streamres (and reading the entire |
|
52 | 52 | # file into RAM would be ill-advised), we just send the length on the first |
|
53 | 53 | # line of the response, like the ssh proto does for string responses. |
|
54 | 54 | def generator(): |
|
55 | 55 | yield '%d\n' % length |
|
56 | 56 | for chunk in f: |
|
57 | 57 | yield chunk |
|
58 | 58 | return wireproto.streamres(generator()) |
|
59 | 59 | |
|
60 | 60 | def statlfile(repo, proto, sha): |
|
61 | 61 | """statlfile sends '2\n' if the largefile is missing, '1\n' if it has a |
|
62 | 62 | mismatched checksum, or '0\n' if it is in good condition""" |
|
63 | 63 | filename = lfutil.findfile(repo, sha) |
|
64 | 64 | if not filename: |
|
65 | 65 | return '2\n' |
|
66 | 66 | fd = None |
|
67 | 67 | try: |
|
68 | 68 | fd = open(filename, 'rb') |
|
69 | 69 | return lfutil.hexsha1(fd) == sha and '0\n' or '1\n' |
|
70 | 70 | finally: |
|
71 | 71 | if fd: |
|
72 | 72 | fd.close() |
|
73 | 73 | |
|
74 | 74 | def wirereposetup(ui, repo): |
|
75 | 75 | class lfileswirerepository(repo.__class__): |
|
76 | 76 | def putlfile(self, sha, fd): |
|
77 | 77 | # unfortunately, httprepository._callpush tries to convert its |
|
78 | 78 | # input file-like into a bundle before sending it, so we can't use |
|
79 | 79 | # it ... |
|
80 | 80 | if issubclass(self.__class__, httprepo.httprepository): |
|
81 | 81 | try: |
|
82 | 82 | return int(self._call('putlfile', data=fd, sha=sha, |
|
83 | 83 | headers={'content-type':'application/mercurial-0.1'})) |
|
84 | 84 | except (ValueError, urllib2.HTTPError): |
|
85 | 85 | return 1 |
|
86 | 86 | # ... but we can't use sshrepository._call because the data= |
|
87 | 87 | # argument won't get sent, and _callpush does exactly what we want |
|
88 | 88 | # in this case: send the data straight through |
|
89 | 89 | else: |
|
90 | 90 | try: |
|
91 | 91 | ret, output = self._callpush("putlfile", fd, sha=sha) |
|
92 | 92 | if ret == "": |
|
93 | 93 | raise error.ResponseError(_('putlfile failed:'), |
|
94 | 94 | output) |
|
95 | 95 | return int(ret) |
|
96 | 96 | except IOError: |
|
97 | 97 | return 1 |
|
98 | 98 | except ValueError: |
|
99 | 99 | raise error.ResponseError( |
|
100 | 100 | _('putlfile failed (unexpected response):'), ret) |
|
101 | 101 | |
|
102 | 102 | def getlfile(self, sha): |
|
103 | 103 | stream = self._callstream("getlfile", sha=sha) |
|
104 | 104 | length = stream.readline() |
|
105 | 105 | try: |
|
106 | 106 | length = int(length) |
|
107 | 107 | except ValueError: |
|
108 | 108 | self._abort(error.ResponseError(_("unexpected response:"), |
|
109 | 109 | length)) |
|
110 | 110 | return (length, stream) |
|
111 | 111 | |
|
112 | 112 | def statlfile(self, sha): |
|
113 | 113 | try: |
|
114 | 114 | return int(self._call("statlfile", sha=sha)) |
|
115 | 115 | except (ValueError, urllib2.HTTPError): |
|
116 | 116 | # if the server returns anything but an integer followed by a |
|
117 | 117 | # newline, newline, it's not speaking our language; if we get |
|
118 | 118 | # an HTTP error, we can't be sure the largefile is present; |
|
119 | 119 | # either way, consider it missing |
|
120 | 120 | return 2 |
|
121 | 121 | |
|
122 | 122 | repo.__class__ = lfileswirerepository |
|
123 | 123 | |
|
124 | 124 | # advertise the largefiles=serve capability |
|
125 | 125 | def capabilities(repo, proto): |
|
126 | 126 | return capabilities_orig(repo, proto) + ' largefiles=serve' |
|
127 | 127 | |
|
128 | 128 | # duplicate what Mercurial's new out-of-band errors mechanism does, because |
|
129 | 129 | # clients old and new alike both handle it well |
|
130 | 130 | def webproto_refuseclient(self, message): |
|
131 | 131 | self.req.header([('Content-Type', 'application/hg-error')]) |
|
132 | 132 | return message |
|
133 | 133 | |
|
134 | 134 | def sshproto_refuseclient(self, message): |
|
135 | 135 | self.ui.write_err('%s\n-\n' % message) |
|
136 | 136 | self.fout.write('\n') |
|
137 | 137 | self.fout.flush() |
|
138 | 138 | |
|
139 | 139 | return '' |
|
140 | 140 | |
|
141 | 141 | def heads(repo, proto): |
|
142 | 142 | if lfutil.islfilesrepo(repo): |
|
143 | try: | |
|
144 | # Mercurial >= f4522df38c65 | |
|
145 | 143 |
|
|
146 | except AttributeError: | |
|
147 | return proto.refuseclient(LARGEFILES_REQUIRED_MSG) | |
|
148 | 144 | return wireproto.heads(repo, proto) |
|
149 | 145 | |
|
150 | 146 | def sshrepo_callstream(self, cmd, **args): |
|
151 | 147 | if cmd == 'heads' and self.capable('largefiles'): |
|
152 | 148 | cmd = 'lheads' |
|
153 | 149 | if cmd == 'batch' and self.capable('largefiles'): |
|
154 | 150 | args['cmds'] = args['cmds'].replace('heads ', 'lheads ') |
|
155 | 151 | return ssh_oldcallstream(self, cmd, **args) |
|
156 | 152 | |
|
157 | 153 | def httprepo_callstream(self, cmd, **args): |
|
158 | 154 | if cmd == 'heads' and self.capable('largefiles'): |
|
159 | 155 | cmd = 'lheads' |
|
160 | 156 | if cmd == 'batch' and self.capable('largefiles'): |
|
161 | 157 | args['cmds'] = args['cmds'].replace('heads ', 'lheads ') |
|
162 | 158 | return http_oldcallstream(self, cmd, **args) |
@@ -1,411 +1,397 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''setup for largefiles repositories: reposetup''' |
|
10 | 10 | import copy |
|
11 | 11 | import types |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | |
|
15 | 15 | from mercurial import context, error, manifest, match as match_, \ |
|
16 | 16 | node, util |
|
17 | 17 | from mercurial.i18n import _ |
|
18 | 18 | |
|
19 | 19 | import lfcommands |
|
20 | 20 | import proto |
|
21 | 21 | import lfutil |
|
22 | 22 | |
|
23 | 23 | def reposetup(ui, repo): |
|
24 | 24 | # wire repositories should be given new wireproto functions but not the |
|
25 | 25 | # other largefiles modifications |
|
26 | 26 | if not repo.local(): |
|
27 | 27 | return proto.wirereposetup(ui, repo) |
|
28 | 28 | |
|
29 | 29 | for name in ('status', 'commitctx', 'commit', 'push'): |
|
30 | 30 | method = getattr(repo, name) |
|
31 | 31 | #if not (isinstance(method, types.MethodType) and |
|
32 | 32 | # method.im_func is repo.__class__.commitctx.im_func): |
|
33 | 33 | if isinstance(method, types.FunctionType) and method.func_name == \ |
|
34 | 34 | 'wrap': |
|
35 | 35 | ui.warn(_('largefiles: repo method %r appears to have already been' |
|
36 | 36 | ' wrapped by another extension: ' |
|
37 | 37 | 'largefiles may behave incorrectly\n') |
|
38 | 38 | % name) |
|
39 | 39 | |
|
40 | 40 | class lfiles_repo(repo.__class__): |
|
41 | 41 | lfstatus = False |
|
42 | 42 | def status_nolfiles(self, *args, **kwargs): |
|
43 | 43 | return super(lfiles_repo, self).status(*args, **kwargs) |
|
44 | 44 | |
|
45 | 45 | # When lfstatus is set, return a context that gives the names of lfiles |
|
46 | 46 | # instead of their corresponding standins and identifies the lfiles as |
|
47 | 47 | # always binary, regardless of their actual contents. |
|
48 | 48 | def __getitem__(self, changeid): |
|
49 | 49 | ctx = super(lfiles_repo, self).__getitem__(changeid) |
|
50 | 50 | if self.lfstatus: |
|
51 | 51 | class lfiles_manifestdict(manifest.manifestdict): |
|
52 | 52 | def __contains__(self, filename): |
|
53 | 53 | if super(lfiles_manifestdict, |
|
54 | 54 | self).__contains__(filename): |
|
55 | 55 | return True |
|
56 | 56 | return super(lfiles_manifestdict, |
|
57 | 57 | self).__contains__(lfutil.shortname+'/' + filename) |
|
58 | 58 | class lfiles_ctx(ctx.__class__): |
|
59 | 59 | def files(self): |
|
60 | 60 | filenames = super(lfiles_ctx, self).files() |
|
61 | 61 | return [re.sub('^\\'+lfutil.shortname+'/', '', |
|
62 | 62 | filename) for filename in filenames] |
|
63 | 63 | def manifest(self): |
|
64 | 64 | man1 = super(lfiles_ctx, self).manifest() |
|
65 | 65 | man1.__class__ = lfiles_manifestdict |
|
66 | 66 | return man1 |
|
67 | 67 | def filectx(self, path, fileid=None, filelog=None): |
|
68 | 68 | try: |
|
69 | 69 | result = super(lfiles_ctx, self).filectx(path, |
|
70 | 70 | fileid, filelog) |
|
71 | 71 | except error.LookupError: |
|
72 | 72 | # Adding a null character will cause Mercurial to |
|
73 | 73 | # identify this as a binary file. |
|
74 | 74 | result = super(lfiles_ctx, self).filectx( |
|
75 | 75 | lfutil.shortname + '/' + path, fileid, |
|
76 | 76 | filelog) |
|
77 | 77 | olddata = result.data |
|
78 | 78 | result.data = lambda: olddata() + '\0' |
|
79 | 79 | return result |
|
80 | 80 | ctx.__class__ = lfiles_ctx |
|
81 | 81 | return ctx |
|
82 | 82 | |
|
83 | 83 | # Figure out the status of big files and insert them into the |
|
84 | 84 | # appropriate list in the result. Also removes standin files from |
|
85 | 85 | # the listing. This function reverts to the original status if |
|
86 | 86 | # self.lfstatus is False |
|
87 | 87 | def status(self, node1='.', node2=None, match=None, ignored=False, |
|
88 | 88 | clean=False, unknown=False, listsubrepos=False): |
|
89 | 89 | listignored, listclean, listunknown = ignored, clean, unknown |
|
90 | 90 | if not self.lfstatus: |
|
91 | 91 | try: |
|
92 | 92 | return super(lfiles_repo, self).status(node1, node2, match, |
|
93 | 93 | listignored, listclean, listunknown, listsubrepos) |
|
94 | 94 | except TypeError: |
|
95 | 95 | return super(lfiles_repo, self).status(node1, node2, match, |
|
96 | 96 | listignored, listclean, listunknown) |
|
97 | 97 | else: |
|
98 | 98 | # some calls in this function rely on the old version of status |
|
99 | 99 | self.lfstatus = False |
|
100 | 100 | if isinstance(node1, context.changectx): |
|
101 | 101 | ctx1 = node1 |
|
102 | 102 | else: |
|
103 | 103 | ctx1 = repo[node1] |
|
104 | 104 | if isinstance(node2, context.changectx): |
|
105 | 105 | ctx2 = node2 |
|
106 | 106 | else: |
|
107 | 107 | ctx2 = repo[node2] |
|
108 | 108 | working = ctx2.rev() is None |
|
109 | 109 | parentworking = working and ctx1 == self['.'] |
|
110 | 110 | |
|
111 | 111 | def inctx(file, ctx): |
|
112 | 112 | try: |
|
113 | 113 | if ctx.rev() is None: |
|
114 | 114 | return file in ctx.manifest() |
|
115 | 115 | ctx[file] |
|
116 | 116 | return True |
|
117 | 117 | except KeyError: |
|
118 | 118 | return False |
|
119 | 119 | |
|
120 | 120 | # create a copy of match that matches standins instead of |
|
121 | 121 | # lfiles if matcher not set then it is the always matcher so |
|
122 | 122 | # overwrite that |
|
123 | 123 | if match is None: |
|
124 | 124 | match = match_.always(self.root, self.getcwd()) |
|
125 | 125 | |
|
126 | 126 | def tostandin(file): |
|
127 | 127 | if inctx(lfutil.standin(file), ctx2): |
|
128 | 128 | return lfutil.standin(file) |
|
129 | 129 | return file |
|
130 | 130 | |
|
131 | 131 | m = copy.copy(match) |
|
132 | 132 | m._files = [tostandin(f) for f in m._files] |
|
133 | 133 | |
|
134 | 134 | # get ignored clean and unknown but remove them later if they |
|
135 | 135 | # were not asked for |
|
136 | 136 | try: |
|
137 | 137 | result = super(lfiles_repo, self).status(node1, node2, m, |
|
138 | 138 | True, True, True, listsubrepos) |
|
139 | 139 | except TypeError: |
|
140 | 140 | result = super(lfiles_repo, self).status(node1, node2, m, |
|
141 | 141 | True, True, True) |
|
142 | 142 | if working: |
|
143 | 143 | # Hold the wlock while we read lfiles and update the |
|
144 | 144 | # lfdirstate |
|
145 | 145 | wlock = repo.wlock() |
|
146 | 146 | try: |
|
147 | 147 | # Any non lfiles that were explicitly listed must be |
|
148 | 148 | # taken out or lfdirstate.status will report an error. |
|
149 | 149 | # The status of these files was already computed using |
|
150 | 150 | # super's status. |
|
151 | 151 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
152 | 152 | match._files = [f for f in match._files if f in |
|
153 | 153 | lfdirstate] |
|
154 | 154 | s = lfdirstate.status(match, [], listignored, |
|
155 | 155 | listclean, listunknown) |
|
156 | 156 | (unsure, modified, added, removed, missing, unknown, |
|
157 | 157 | ignored, clean) = s |
|
158 | 158 | if parentworking: |
|
159 | 159 | for lfile in unsure: |
|
160 | 160 | if ctx1[lfutil.standin(lfile)].data().strip() \ |
|
161 | 161 | != lfutil.hashfile(self.wjoin(lfile)): |
|
162 | 162 | modified.append(lfile) |
|
163 | 163 | else: |
|
164 | 164 | clean.append(lfile) |
|
165 | 165 | lfdirstate.normal(lfile) |
|
166 | 166 | lfdirstate.write() |
|
167 | 167 | else: |
|
168 | 168 | tocheck = unsure + modified + added + clean |
|
169 | 169 | modified, added, clean = [], [], [] |
|
170 | 170 | |
|
171 | 171 | for lfile in tocheck: |
|
172 | 172 | standin = lfutil.standin(lfile) |
|
173 | 173 | if inctx(standin, ctx1): |
|
174 | 174 | if ctx1[standin].data().strip() != \ |
|
175 | 175 | lfutil.hashfile(self.wjoin(lfile)): |
|
176 | 176 | modified.append(lfile) |
|
177 | 177 | else: |
|
178 | 178 | clean.append(lfile) |
|
179 | 179 | else: |
|
180 | 180 | added.append(lfile) |
|
181 | 181 | finally: |
|
182 | 182 | wlock.release() |
|
183 | 183 | |
|
184 | 184 | for standin in ctx1.manifest(): |
|
185 | 185 | if not lfutil.isstandin(standin): |
|
186 | 186 | continue |
|
187 | 187 | lfile = lfutil.splitstandin(standin) |
|
188 | 188 | if not match(lfile): |
|
189 | 189 | continue |
|
190 | 190 | if lfile not in lfdirstate: |
|
191 | 191 | removed.append(lfile) |
|
192 | 192 | # Handle unknown and ignored differently |
|
193 | 193 | lfiles = (modified, added, removed, missing, [], [], clean) |
|
194 | 194 | result = list(result) |
|
195 | 195 | # Unknown files |
|
196 | 196 | result[4] = [f for f in unknown if repo.dirstate[f] == '?'\ |
|
197 | 197 | and not lfutil.isstandin(f)] |
|
198 | 198 | # Ignored files must be ignored by both the dirstate and |
|
199 | 199 | # lfdirstate |
|
200 | 200 | result[5] = set(ignored).intersection(set(result[5])) |
|
201 | 201 | # combine normal files and lfiles |
|
202 | 202 | normals = [[fn for fn in filelist if not \ |
|
203 | 203 | lfutil.isstandin(fn)] for filelist in result] |
|
204 | 204 | result = [sorted(list1 + list2) for (list1, list2) in \ |
|
205 | 205 | zip(normals, lfiles)] |
|
206 | 206 | else: |
|
207 | 207 | def toname(f): |
|
208 | 208 | if lfutil.isstandin(f): |
|
209 | 209 | return lfutil.splitstandin(f) |
|
210 | 210 | return f |
|
211 | 211 | result = [[toname(f) for f in items] for items in result] |
|
212 | 212 | |
|
213 | 213 | if not listunknown: |
|
214 | 214 | result[4] = [] |
|
215 | 215 | if not listignored: |
|
216 | 216 | result[5] = [] |
|
217 | 217 | if not listclean: |
|
218 | 218 | result[6] = [] |
|
219 | 219 | self.lfstatus = True |
|
220 | 220 | return result |
|
221 | 221 | |
|
222 | 222 | # This call happens after a commit has occurred. Copy all of the lfiles |
|
223 | 223 | # into the cache |
|
224 | 224 | def commitctx(self, *args, **kwargs): |
|
225 | 225 | node = super(lfiles_repo, self).commitctx(*args, **kwargs) |
|
226 | 226 | ctx = self[node] |
|
227 | 227 | for filename in ctx.files(): |
|
228 | 228 | if lfutil.isstandin(filename) and filename in ctx.manifest(): |
|
229 | 229 | realfile = lfutil.splitstandin(filename) |
|
230 | 230 | lfutil.copytocache(self, ctx.node(), realfile) |
|
231 | 231 | |
|
232 | 232 | return node |
|
233 | 233 | |
|
234 | 234 | # This call happens before a commit has occurred. The lfile standins |
|
235 | 235 | # have not had their contents updated (to reflect the hash of their |
|
236 | 236 | # lfile). Do that here. |
|
237 | 237 | def commit(self, text="", user=None, date=None, match=None, |
|
238 | 238 | force=False, editor=False, extra={}): |
|
239 | 239 | orig = super(lfiles_repo, self).commit |
|
240 | 240 | |
|
241 | 241 | wlock = repo.wlock() |
|
242 | 242 | try: |
|
243 | 243 | if getattr(repo, "_isrebasing", False): |
|
244 | 244 | # We have to take the time to pull down the new lfiles now. |
|
245 | 245 | # Otherwise if we are rebasing, any lfiles that were |
|
246 | 246 | # modified in the changesets we are rebasing on top of get |
|
247 | 247 | # overwritten either by the rebase or in the first commit |
|
248 | 248 | # after the rebase. |
|
249 | 249 | lfcommands.updatelfiles(repo.ui, repo) |
|
250 | 250 | # Case 1: user calls commit with no specific files or |
|
251 | 251 | # include/exclude patterns: refresh and commit everything. |
|
252 | 252 | if (match is None) or (not match.anypats() and not \ |
|
253 | 253 | match.files()): |
|
254 | 254 | lfiles = lfutil.listlfiles(self) |
|
255 | 255 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
256 | 256 | # this only loops through lfiles that exist (not |
|
257 | 257 | # removed/renamed) |
|
258 | 258 | for lfile in lfiles: |
|
259 | 259 | if os.path.exists(self.wjoin(lfutil.standin(lfile))): |
|
260 | 260 | # this handles the case where a rebase is being |
|
261 | 261 | # performed and the working copy is not updated |
|
262 | 262 | # yet. |
|
263 | 263 | if os.path.exists(self.wjoin(lfile)): |
|
264 | 264 | lfutil.updatestandin(self, |
|
265 | 265 | lfutil.standin(lfile)) |
|
266 | 266 | lfdirstate.normal(lfile) |
|
267 | 267 | for lfile in lfdirstate: |
|
268 | 268 | if not os.path.exists( |
|
269 | 269 | repo.wjoin(lfutil.standin(lfile))): |
|
270 | try: | |
|
271 | # Mercurial >= 1.9 | |
|
272 | 270 |
|
|
273 | except AttributeError: | |
|
274 | # Mercurial <= 1.8 | |
|
275 | lfdirstate.forget(lfile) | |
|
276 | 271 | lfdirstate.write() |
|
277 | 272 | |
|
278 | 273 | return orig(text=text, user=user, date=date, match=match, |
|
279 | 274 | force=force, editor=editor, extra=extra) |
|
280 | 275 | |
|
281 | 276 | for file in match.files(): |
|
282 | 277 | if lfutil.isstandin(file): |
|
283 | 278 | raise util.Abort( |
|
284 | 279 | "Don't commit largefile standin. Commit largefile.") |
|
285 | 280 | |
|
286 | 281 | # Case 2: user calls commit with specified patterns: refresh |
|
287 | 282 | # any matching big files. |
|
288 | 283 | smatcher = lfutil.composestandinmatcher(self, match) |
|
289 | 284 | standins = lfutil.dirstate_walk(self.dirstate, smatcher) |
|
290 | 285 | |
|
291 | 286 | # No matching big files: get out of the way and pass control to |
|
292 | 287 | # the usual commit() method. |
|
293 | 288 | if not standins: |
|
294 | 289 | return orig(text=text, user=user, date=date, match=match, |
|
295 | 290 | force=force, editor=editor, extra=extra) |
|
296 | 291 | |
|
297 | 292 | # Refresh all matching big files. It's possible that the |
|
298 | 293 | # commit will end up failing, in which case the big files will |
|
299 | 294 | # stay refreshed. No harm done: the user modified them and |
|
300 | 295 | # asked to commit them, so sooner or later we're going to |
|
301 | 296 | # refresh the standins. Might as well leave them refreshed. |
|
302 | 297 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
303 | 298 | for standin in standins: |
|
304 | 299 | lfile = lfutil.splitstandin(standin) |
|
305 | 300 | if lfdirstate[lfile] <> 'r': |
|
306 | 301 | lfutil.updatestandin(self, standin) |
|
307 | 302 | lfdirstate.normal(lfile) |
|
308 | 303 | else: |
|
309 | try: | |
|
310 | # Mercurial >= 1.9 | |
|
311 | 304 |
|
|
312 | except AttributeError: | |
|
313 | # Mercurial <= 1.8 | |
|
314 | lfdirstate.forget(lfile) | |
|
315 | 305 | lfdirstate.write() |
|
316 | 306 | |
|
317 | 307 | # Cook up a new matcher that only matches regular files or |
|
318 | 308 | # standins corresponding to the big files requested by the |
|
319 | 309 | # user. Have to modify _files to prevent commit() from |
|
320 | 310 | # complaining "not tracked" for big files. |
|
321 | 311 | lfiles = lfutil.listlfiles(repo) |
|
322 | 312 | match = copy.copy(match) |
|
323 | 313 | orig_matchfn = match.matchfn |
|
324 | 314 | |
|
325 | 315 | # Check both the list of lfiles and the list of standins |
|
326 | 316 | # because if a lfile was removed, it won't be in the list of |
|
327 | 317 | # lfiles at this point |
|
328 | 318 | match._files += sorted(standins) |
|
329 | 319 | |
|
330 | 320 | actualfiles = [] |
|
331 | 321 | for f in match._files: |
|
332 | 322 | fstandin = lfutil.standin(f) |
|
333 | 323 | |
|
334 | 324 | # Ignore known lfiles and standins |
|
335 | 325 | if f in lfiles or fstandin in standins: |
|
336 | 326 | continue |
|
337 | 327 | |
|
338 | 328 | # Append directory separator to avoid collisions |
|
339 | 329 | if not fstandin.endswith(os.sep): |
|
340 | 330 | fstandin += os.sep |
|
341 | 331 | |
|
342 | 332 | # Prevalidate matching standin directories |
|
343 | 333 | if lfutil.any_(st for st in match._files if \ |
|
344 | 334 | st.startswith(fstandin)): |
|
345 | 335 | continue |
|
346 | 336 | actualfiles.append(f) |
|
347 | 337 | match._files = actualfiles |
|
348 | 338 | |
|
349 | 339 | def matchfn(f): |
|
350 | 340 | if orig_matchfn(f): |
|
351 | 341 | return f not in lfiles |
|
352 | 342 | else: |
|
353 | 343 | return f in standins |
|
354 | 344 | |
|
355 | 345 | match.matchfn = matchfn |
|
356 | 346 | return orig(text=text, user=user, date=date, match=match, |
|
357 | 347 | force=force, editor=editor, extra=extra) |
|
358 | 348 | finally: |
|
359 | 349 | wlock.release() |
|
360 | 350 | |
|
361 | 351 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
362 | 352 | o = lfutil.findoutgoing(repo, remote, force) |
|
363 | 353 | if o: |
|
364 | 354 | toupload = set() |
|
365 | 355 | o = repo.changelog.nodesbetween(o, revs)[0] |
|
366 | 356 | for n in o: |
|
367 | 357 | parents = [p for p in repo.changelog.parents(n) if p != \ |
|
368 | 358 | node.nullid] |
|
369 | 359 | ctx = repo[n] |
|
370 | 360 | files = set(ctx.files()) |
|
371 | 361 | if len(parents) == 2: |
|
372 | 362 | mc = ctx.manifest() |
|
373 | 363 | mp1 = ctx.parents()[0].manifest() |
|
374 | 364 | mp2 = ctx.parents()[1].manifest() |
|
375 | 365 | for f in mp1: |
|
376 | 366 | if f not in mc: |
|
377 | 367 | files.add(f) |
|
378 | 368 | for f in mp2: |
|
379 | 369 | if f not in mc: |
|
380 | 370 | files.add(f) |
|
381 | 371 | for f in mc: |
|
382 | 372 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, |
|
383 | 373 | None): |
|
384 | 374 | files.add(f) |
|
385 | 375 | |
|
386 | 376 | toupload = toupload.union(set([ctx[f].data().strip() for f\ |
|
387 | 377 | in files if lfutil.isstandin(f) and f in ctx])) |
|
388 | 378 | lfcommands.uploadlfiles(ui, self, remote, toupload) |
|
389 | # Mercurial >= 1.6 takes the newbranch argument, try that first. | |
|
390 | try: | |
|
391 | 379 |
|
|
392 | 380 |
|
|
393 | except TypeError: | |
|
394 | return super(lfiles_repo, self).push(remote, force, revs) | |
|
395 | 381 | |
|
396 | 382 | repo.__class__ = lfiles_repo |
|
397 | 383 | |
|
398 | 384 | def checkrequireslfiles(ui, repo, **kwargs): |
|
399 | 385 | if 'largefiles' not in repo.requirements and lfutil.any_( |
|
400 | 386 | lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): |
|
401 | 387 | # work around bug in mercurial 1.9 whereby requirements is a list |
|
402 | 388 | # on newly-cloned repos |
|
403 | 389 | repo.requirements = set(repo.requirements) |
|
404 | 390 | |
|
405 | 391 | repo.requirements |= set(['largefiles']) |
|
406 | 392 | repo._writerequirements() |
|
407 | 393 | |
|
408 | 394 | checkrequireslfiles(ui, repo) |
|
409 | 395 | |
|
410 | 396 | ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles) |
|
411 | 397 | ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles) |
General Comments 0
You need to be logged in to leave comments.
Login now