##// END OF EJS Templates
largefiles: factor out procedures to update lfdirstate for post-committing...
FUJIWARA Katsunori -
r23184:3100d1cb default
parent child Browse files
Show More
@@ -1,417 +1,429 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import shutil
14 14 import stat
15 15
16 16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 17 from mercurial.i18n import _
18 18 from mercurial import node
19 19
20 20 shortname = '.hglf'
21 21 shortnameslash = shortname + '/'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Private worker functions ------------------------------------------
26 26
27 27 def getminsize(ui, assumelfiles, opt, default=10):
28 28 lfsize = opt
29 29 if not lfsize and assumelfiles:
30 30 lfsize = ui.config(longname, 'minsize', default=default)
31 31 if lfsize:
32 32 try:
33 33 lfsize = float(lfsize)
34 34 except ValueError:
35 35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 36 % lfsize)
37 37 if lfsize is None:
38 38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 39 return lfsize
40 40
41 41 def link(src, dest):
42 42 util.makedirs(os.path.dirname(dest))
43 43 try:
44 44 util.oslink(src, dest)
45 45 except OSError:
46 46 # if hardlinks fail, fallback on atomic copy
47 47 dst = util.atomictempfile(dest)
48 48 for chunk in util.filechunkiter(open(src, 'rb')):
49 49 dst.write(chunk)
50 50 dst.close()
51 51 os.chmod(dest, os.stat(src).st_mode)
52 52
53 53 def usercachepath(ui, hash):
54 54 path = ui.configpath(longname, 'usercache', None)
55 55 if path:
56 56 path = os.path.join(path, hash)
57 57 else:
58 58 if os.name == 'nt':
59 59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 60 if appdata:
61 61 path = os.path.join(appdata, longname, hash)
62 62 elif platform.system() == 'Darwin':
63 63 home = os.getenv('HOME')
64 64 if home:
65 65 path = os.path.join(home, 'Library', 'Caches',
66 66 longname, hash)
67 67 elif os.name == 'posix':
68 68 path = os.getenv('XDG_CACHE_HOME')
69 69 if path:
70 70 path = os.path.join(path, longname, hash)
71 71 else:
72 72 home = os.getenv('HOME')
73 73 if home:
74 74 path = os.path.join(home, '.cache', longname, hash)
75 75 else:
76 76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 77 return path
78 78
79 79 def inusercache(ui, hash):
80 80 path = usercachepath(ui, hash)
81 81 return path and os.path.exists(path)
82 82
83 83 def findfile(repo, hash):
84 84 if instore(repo, hash):
85 85 repo.ui.note(_('found %s in store\n') % hash)
86 86 return storepath(repo, hash)
87 87 elif inusercache(repo.ui, hash):
88 88 repo.ui.note(_('found %s in system cache\n') % hash)
89 89 path = storepath(repo, hash)
90 90 link(usercachepath(repo.ui, hash), path)
91 91 return path
92 92 return None
93 93
94 94 class largefilesdirstate(dirstate.dirstate):
95 95 def __getitem__(self, key):
96 96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 97 def normal(self, f):
98 98 return super(largefilesdirstate, self).normal(unixpath(f))
99 99 def remove(self, f):
100 100 return super(largefilesdirstate, self).remove(unixpath(f))
101 101 def add(self, f):
102 102 return super(largefilesdirstate, self).add(unixpath(f))
103 103 def drop(self, f):
104 104 return super(largefilesdirstate, self).drop(unixpath(f))
105 105 def forget(self, f):
106 106 return super(largefilesdirstate, self).forget(unixpath(f))
107 107 def normallookup(self, f):
108 108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 109 def _ignore(self, f):
110 110 return False
111 111
112 112 def openlfdirstate(ui, repo, create=True):
113 113 '''
114 114 Return a dirstate object that tracks largefiles: i.e. its root is
115 115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 116 '''
117 117 lfstoredir = repo.join(longname)
118 118 opener = scmutil.opener(lfstoredir)
119 119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 120 repo.dirstate._validate)
121 121
122 122 # If the largefiles dirstate does not exist, populate and create
123 123 # it. This ensures that we create it on the first meaningful
124 124 # largefiles operation in a new clone.
125 125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 126 matcher = getstandinmatcher(repo)
127 127 standins = repo.dirstate.walk(matcher, [], False, False)
128 128
129 129 if len(standins) > 0:
130 130 util.makedirs(lfstoredir)
131 131
132 132 for standin in standins:
133 133 lfile = splitstandin(standin)
134 134 lfdirstate.normallookup(lfile)
135 135 return lfdirstate
136 136
137 137 def lfdirstatestatus(lfdirstate, repo):
138 138 wctx = repo['.']
139 139 match = match_.always(repo.root, repo.getcwd())
140 140 unsure, s = lfdirstate.status(match, [], False, False, False)
141 141 modified, clean = s.modified, s.clean
142 142 for lfile in unsure:
143 143 try:
144 144 fctx = wctx[standin(lfile)]
145 145 except LookupError:
146 146 fctx = None
147 147 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
148 148 modified.append(lfile)
149 149 else:
150 150 clean.append(lfile)
151 151 lfdirstate.normal(lfile)
152 152 return s
153 153
154 154 def listlfiles(repo, rev=None, matcher=None):
155 155 '''return a list of largefiles in the working copy or the
156 156 specified changeset'''
157 157
158 158 if matcher is None:
159 159 matcher = getstandinmatcher(repo)
160 160
161 161 # ignore unknown files in working directory
162 162 return [splitstandin(f)
163 163 for f in repo[rev].walk(matcher)
164 164 if rev is not None or repo.dirstate[f] != '?']
165 165
166 166 def instore(repo, hash):
167 167 return os.path.exists(storepath(repo, hash))
168 168
169 169 def storepath(repo, hash):
170 170 return repo.join(os.path.join(longname, hash))
171 171
172 172 def copyfromcache(repo, hash, filename):
173 173 '''Copy the specified largefile from the repo or system cache to
174 174 filename in the repository. Return true on success or false if the
175 175 file was not found in either cache (which should not happened:
176 176 this is meant to be called only after ensuring that the needed
177 177 largefile exists in the cache).'''
178 178 path = findfile(repo, hash)
179 179 if path is None:
180 180 return False
181 181 util.makedirs(os.path.dirname(repo.wjoin(filename)))
182 182 # The write may fail before the file is fully written, but we
183 183 # don't use atomic writes in the working copy.
184 184 shutil.copy(path, repo.wjoin(filename))
185 185 return True
186 186
187 187 def copytostore(repo, rev, file, uploaded=False):
188 188 hash = readstandin(repo, file, rev)
189 189 if instore(repo, hash):
190 190 return
191 191 copytostoreabsolute(repo, repo.wjoin(file), hash)
192 192
193 193 def copyalltostore(repo, node):
194 194 '''Copy all largefiles in a given revision to the store'''
195 195
196 196 ctx = repo[node]
197 197 for filename in ctx.files():
198 198 if isstandin(filename) and filename in ctx.manifest():
199 199 realfile = splitstandin(filename)
200 200 copytostore(repo, ctx.node(), realfile)
201 201
202 202
203 203 def copytostoreabsolute(repo, file, hash):
204 204 if inusercache(repo.ui, hash):
205 205 link(usercachepath(repo.ui, hash), storepath(repo, hash))
206 206 elif not getattr(repo, "_isconverting", False):
207 207 util.makedirs(os.path.dirname(storepath(repo, hash)))
208 208 dst = util.atomictempfile(storepath(repo, hash),
209 209 createmode=repo.store.createmode)
210 210 for chunk in util.filechunkiter(open(file, 'rb')):
211 211 dst.write(chunk)
212 212 dst.close()
213 213 linktousercache(repo, hash)
214 214
215 215 def linktousercache(repo, hash):
216 216 path = usercachepath(repo.ui, hash)
217 217 if path:
218 218 link(storepath(repo, hash), path)
219 219
220 220 def getstandinmatcher(repo, pats=[], opts={}):
221 221 '''Return a match object that applies pats to the standin directory'''
222 222 standindir = repo.wjoin(shortname)
223 223 if pats:
224 224 pats = [os.path.join(standindir, pat) for pat in pats]
225 225 else:
226 226 # no patterns: relative to repo root
227 227 pats = [standindir]
228 228 # no warnings about missing files or directories
229 229 match = scmutil.match(repo[None], pats, opts)
230 230 match.bad = lambda f, msg: None
231 231 return match
232 232
233 233 def composestandinmatcher(repo, rmatcher):
234 234 '''Return a matcher that accepts standins corresponding to the
235 235 files accepted by rmatcher. Pass the list of files in the matcher
236 236 as the paths specified by the user.'''
237 237 smatcher = getstandinmatcher(repo, rmatcher.files())
238 238 isstandin = smatcher.matchfn
239 239 def composedmatchfn(f):
240 240 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
241 241 smatcher.matchfn = composedmatchfn
242 242
243 243 return smatcher
244 244
245 245 def standin(filename):
246 246 '''Return the repo-relative path to the standin for the specified big
247 247 file.'''
248 248 # Notes:
249 249 # 1) Some callers want an absolute path, but for instance addlargefiles
250 250 # needs it repo-relative so it can be passed to repo[None].add(). So
251 251 # leave it up to the caller to use repo.wjoin() to get an absolute path.
252 252 # 2) Join with '/' because that's what dirstate always uses, even on
253 253 # Windows. Change existing separator to '/' first in case we are
254 254 # passed filenames from an external source (like the command line).
255 255 return shortnameslash + util.pconvert(filename)
256 256
257 257 def isstandin(filename):
258 258 '''Return true if filename is a big file standin. filename must be
259 259 in Mercurial's internal form (slash-separated).'''
260 260 return filename.startswith(shortnameslash)
261 261
262 262 def splitstandin(filename):
263 263 # Split on / because that's what dirstate always uses, even on Windows.
264 264 # Change local separator to / first just in case we are passed filenames
265 265 # from an external source (like the command line).
266 266 bits = util.pconvert(filename).split('/', 1)
267 267 if len(bits) == 2 and bits[0] == shortname:
268 268 return bits[1]
269 269 else:
270 270 return None
271 271
272 272 def updatestandin(repo, standin):
273 273 file = repo.wjoin(splitstandin(standin))
274 274 if os.path.exists(file):
275 275 hash = hashfile(file)
276 276 executable = getexecutable(file)
277 277 writestandin(repo, standin, hash, executable)
278 278
279 279 def readstandin(repo, filename, node=None):
280 280 '''read hex hash from standin for filename at given node, or working
281 281 directory if no node is given'''
282 282 return repo[node][standin(filename)].data().strip()
283 283
284 284 def writestandin(repo, standin, hash, executable):
285 285 '''write hash to <repo.root>/<standin>'''
286 286 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
287 287
288 288 def copyandhash(instream, outfile):
289 289 '''Read bytes from instream (iterable) and write them to outfile,
290 290 computing the SHA-1 hash of the data along the way. Return the hash.'''
291 291 hasher = util.sha1('')
292 292 for data in instream:
293 293 hasher.update(data)
294 294 outfile.write(data)
295 295 return hasher.hexdigest()
296 296
297 297 def hashrepofile(repo, file):
298 298 return hashfile(repo.wjoin(file))
299 299
300 300 def hashfile(file):
301 301 if not os.path.exists(file):
302 302 return ''
303 303 hasher = util.sha1('')
304 304 fd = open(file, 'rb')
305 305 for data in util.filechunkiter(fd, 128 * 1024):
306 306 hasher.update(data)
307 307 fd.close()
308 308 return hasher.hexdigest()
309 309
310 310 def getexecutable(filename):
311 311 mode = os.stat(filename).st_mode
312 312 return ((mode & stat.S_IXUSR) and
313 313 (mode & stat.S_IXGRP) and
314 314 (mode & stat.S_IXOTH))
315 315
316 316 def urljoin(first, second, *arg):
317 317 def join(left, right):
318 318 if not left.endswith('/'):
319 319 left += '/'
320 320 if right.startswith('/'):
321 321 right = right[1:]
322 322 return left + right
323 323
324 324 url = join(first, second)
325 325 for a in arg:
326 326 url = join(url, a)
327 327 return url
328 328
329 329 def hexsha1(data):
330 330 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
331 331 object data"""
332 332 h = util.sha1()
333 333 for chunk in util.filechunkiter(data):
334 334 h.update(chunk)
335 335 return h.hexdigest()
336 336
337 337 def httpsendfile(ui, filename):
338 338 return httpconnection.httpsendfile(ui, filename, 'rb')
339 339
340 340 def unixpath(path):
341 341 '''Return a version of path normalized for use with the lfdirstate.'''
342 342 return util.pconvert(os.path.normpath(path))
343 343
344 344 def islfilesrepo(repo):
345 345 if ('largefiles' in repo.requirements and
346 346 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
347 347 return True
348 348
349 349 return util.any(openlfdirstate(repo.ui, repo, False))
350 350
351 351 class storeprotonotcapable(Exception):
352 352 def __init__(self, storetypes):
353 353 self.storetypes = storetypes
354 354
355 355 def getstandinsstate(repo):
356 356 standins = []
357 357 matcher = getstandinmatcher(repo)
358 358 for standin in repo.dirstate.walk(matcher, [], False, False):
359 359 lfile = splitstandin(standin)
360 360 try:
361 361 hash = readstandin(repo, lfile)
362 362 except IOError:
363 363 hash = None
364 364 standins.append((lfile, hash))
365 365 return standins
366 366
367 367 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
368 368 lfstandin = standin(lfile)
369 369 if lfstandin in repo.dirstate:
370 370 stat = repo.dirstate._map[lfstandin]
371 371 state, mtime = stat[0], stat[3]
372 372 else:
373 373 state, mtime = '?', -1
374 374 if state == 'n':
375 375 if normallookup or mtime < 0:
376 376 # state 'n' doesn't ensure 'clean' in this case
377 377 lfdirstate.normallookup(lfile)
378 378 else:
379 379 lfdirstate.normal(lfile)
380 380 elif state == 'm':
381 381 lfdirstate.normallookup(lfile)
382 382 elif state == 'r':
383 383 lfdirstate.remove(lfile)
384 384 elif state == 'a':
385 385 lfdirstate.add(lfile)
386 386 elif state == '?':
387 387 lfdirstate.drop(lfile)
388 388
389 def markcommitted(orig, ctx, node):
390 repo = ctx._repo
391
392 orig(node)
393
394 lfdirstate = openlfdirstate(repo.ui, repo)
395 for f in ctx.files():
396 if isstandin(f):
397 lfile = splitstandin(f)
398 synclfdirstate(repo, lfdirstate, lfile, False)
399 lfdirstate.write()
400
389 401 def getlfilestoupdate(oldstandins, newstandins):
390 402 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
391 403 filelist = []
392 404 for f in changedstandins:
393 405 if f[0] not in filelist:
394 406 filelist.append(f[0])
395 407 return filelist
396 408
397 409 def getlfilestoupload(repo, missing, addfunc):
398 410 for n in missing:
399 411 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
400 412 ctx = repo[n]
401 413 files = set(ctx.files())
402 414 if len(parents) == 2:
403 415 mc = ctx.manifest()
404 416 mp1 = ctx.parents()[0].manifest()
405 417 mp2 = ctx.parents()[1].manifest()
406 418 for f in mp1:
407 419 if f not in mc:
408 420 files.add(f)
409 421 for f in mp2:
410 422 if f not in mc:
411 423 files.add(f)
412 424 for f in mc:
413 425 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
414 426 files.add(f)
415 427 for fn in files:
416 428 if isstandin(fn) and fn in ctx:
417 429 addfunc(fn, ctx[fn].data().strip())
@@ -1,479 +1,456 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import os
12 12
13 13 from mercurial import error, manifest, match as match_, util
14 14 from mercurial.i18n import _
15 15 from mercurial import localrepo, scmutil
16 16
17 17 import lfcommands
18 18 import lfutil
19 19
20 20 def reposetup(ui, repo):
21 21 # wire repositories should be given new wireproto functions
22 22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 23 if not repo.local():
24 24 return
25 25
26 26 class lfilesrepo(repo.__class__):
27 27 lfstatus = False
28 28 def status_nolfiles(self, *args, **kwargs):
29 29 return super(lfilesrepo, self).status(*args, **kwargs)
30 30
31 31 # When lfstatus is set, return a context that gives the names
32 32 # of largefiles instead of their corresponding standins and
33 33 # identifies the largefiles as always binary, regardless of
34 34 # their actual contents.
35 35 def __getitem__(self, changeid):
36 36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 37 if self.lfstatus:
38 38 class lfilesmanifestdict(manifest.manifestdict):
39 39 def __contains__(self, filename):
40 40 orig = super(lfilesmanifestdict, self).__contains__
41 41 return orig(filename) or orig(lfutil.standin(filename))
42 42 class lfilesctx(ctx.__class__):
43 43 def files(self):
44 44 filenames = super(lfilesctx, self).files()
45 45 return [lfutil.splitstandin(f) or f for f in filenames]
46 46 def manifest(self):
47 47 man1 = super(lfilesctx, self).manifest()
48 48 man1.__class__ = lfilesmanifestdict
49 49 return man1
50 50 def filectx(self, path, fileid=None, filelog=None):
51 51 orig = super(lfilesctx, self).filectx
52 52 try:
53 53 if filelog is not None:
54 54 result = orig(path, fileid, filelog)
55 55 else:
56 56 result = orig(path, fileid)
57 57 except error.LookupError:
58 58 # Adding a null character will cause Mercurial to
59 59 # identify this as a binary file.
60 60 if filelog is not None:
61 61 result = orig(lfutil.standin(path), fileid,
62 62 filelog)
63 63 else:
64 64 result = orig(lfutil.standin(path), fileid)
65 65 olddata = result.data
66 66 result.data = lambda: olddata() + '\0'
67 67 return result
68 68 ctx.__class__ = lfilesctx
69 69 return ctx
70 70
71 71 # Figure out the status of big files and insert them into the
72 72 # appropriate list in the result. Also removes standin files
73 73 # from the listing. Revert to the original status if
74 74 # self.lfstatus is False.
75 75 # XXX large file status is buggy when used on repo proxy.
76 76 # XXX this needs to be investigated.
77 77 @localrepo.unfilteredmethod
78 78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 79 clean=False, unknown=False, listsubrepos=False):
80 80 listignored, listclean, listunknown = ignored, clean, unknown
81 81 orig = super(lfilesrepo, self).status
82 82 if not self.lfstatus:
83 83 return orig(node1, node2, match, listignored, listclean,
84 84 listunknown, listsubrepos)
85 85
86 86 # some calls in this function rely on the old version of status
87 87 self.lfstatus = False
88 88 ctx1 = self[node1]
89 89 ctx2 = self[node2]
90 90 working = ctx2.rev() is None
91 91 parentworking = working and ctx1 == self['.']
92 92
93 93 if match is None:
94 94 match = match_.always(self.root, self.getcwd())
95 95
96 96 wlock = None
97 97 try:
98 98 try:
99 99 # updating the dirstate is optional
100 100 # so we don't wait on the lock
101 101 wlock = self.wlock(False)
102 102 except error.LockError:
103 103 pass
104 104
105 105 # First check if paths or patterns were specified on the
106 106 # command line. If there were, and they don't match any
107 107 # largefiles, we should just bail here and let super
108 108 # handle it -- thus gaining a big performance boost.
109 109 lfdirstate = lfutil.openlfdirstate(ui, self)
110 110 if not match.always():
111 111 for f in lfdirstate:
112 112 if match(f):
113 113 break
114 114 else:
115 115 return orig(node1, node2, match, listignored, listclean,
116 116 listunknown, listsubrepos)
117 117
118 118 # Create a copy of match that matches standins instead
119 119 # of largefiles.
120 120 def tostandins(files):
121 121 if not working:
122 122 return files
123 123 newfiles = []
124 124 dirstate = self.dirstate
125 125 for f in files:
126 126 sf = lfutil.standin(f)
127 127 if sf in dirstate:
128 128 newfiles.append(sf)
129 129 elif sf in dirstate.dirs():
130 130 # Directory entries could be regular or
131 131 # standin, check both
132 132 newfiles.extend((f, sf))
133 133 else:
134 134 newfiles.append(f)
135 135 return newfiles
136 136
137 137 m = copy.copy(match)
138 138 m._files = tostandins(m._files)
139 139
140 140 result = orig(node1, node2, m, ignored, clean, unknown,
141 141 listsubrepos)
142 142 if working:
143 143
144 144 def sfindirstate(f):
145 145 sf = lfutil.standin(f)
146 146 dirstate = self.dirstate
147 147 return sf in dirstate or sf in dirstate.dirs()
148 148
149 149 match._files = [f for f in match._files
150 150 if sfindirstate(f)]
151 151 # Don't waste time getting the ignored and unknown
152 152 # files from lfdirstate
153 153 unsure, s = lfdirstate.status(match, [], False, listclean,
154 154 False)
155 155 (modified, added, removed, clean) = (s.modified, s.added,
156 156 s.removed, s.clean)
157 157 if parentworking:
158 158 for lfile in unsure:
159 159 standin = lfutil.standin(lfile)
160 160 if standin not in ctx1:
161 161 # from second parent
162 162 modified.append(lfile)
163 163 elif ctx1[standin].data().strip() \
164 164 != lfutil.hashfile(self.wjoin(lfile)):
165 165 modified.append(lfile)
166 166 else:
167 167 if listclean:
168 168 clean.append(lfile)
169 169 lfdirstate.normal(lfile)
170 170 else:
171 171 tocheck = unsure + modified + added + clean
172 172 modified, added, clean = [], [], []
173 173
174 174 for lfile in tocheck:
175 175 standin = lfutil.standin(lfile)
176 176 if standin in ctx1:
177 177 abslfile = self.wjoin(lfile)
178 178 if ((ctx1[standin].data().strip() !=
179 179 lfutil.hashfile(abslfile)) or
180 180 (('x' in ctx1.flags(standin)) !=
181 181 bool(lfutil.getexecutable(abslfile)))):
182 182 modified.append(lfile)
183 183 elif listclean:
184 184 clean.append(lfile)
185 185 else:
186 186 added.append(lfile)
187 187
188 188 # at this point, 'removed' contains largefiles
189 189 # marked as 'R' in the working context.
190 190 # then, largefiles not managed also in the target
191 191 # context should be excluded from 'removed'.
192 192 removed = [lfile for lfile in removed
193 193 if lfutil.standin(lfile) in ctx1]
194 194
195 195 # Standins no longer found in lfdirstate has been
196 196 # removed
197 197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
198 198 lfile = lfutil.splitstandin(standin)
199 199 if not match(lfile):
200 200 continue
201 201 if lfile not in lfdirstate:
202 202 removed.append(lfile)
203 203
204 204 # Filter result lists
205 205 result = list(result)
206 206
207 207 # Largefiles are not really removed when they're
208 208 # still in the normal dirstate. Likewise, normal
209 209 # files are not really removed if they are still in
210 210 # lfdirstate. This happens in merges where files
211 211 # change type.
212 212 removed = [f for f in removed
213 213 if f not in self.dirstate]
214 214 result[2] = [f for f in result[2]
215 215 if f not in lfdirstate]
216 216
217 217 lfiles = set(lfdirstate._map)
218 218 # Unknown files
219 219 result[4] = set(result[4]).difference(lfiles)
220 220 # Ignored files
221 221 result[5] = set(result[5]).difference(lfiles)
222 222 # combine normal files and largefiles
223 223 normals = [[fn for fn in filelist
224 224 if not lfutil.isstandin(fn)]
225 225 for filelist in result]
226 226 lfstatus = (modified, added, removed, s.deleted, [], [],
227 227 clean)
228 228 result = [sorted(list1 + list2)
229 229 for (list1, list2) in zip(normals, lfstatus)]
230 230 else: # not against working directory
231 231 result = [[lfutil.splitstandin(f) or f for f in items]
232 232 for items in result]
233 233
234 234 if wlock:
235 235 lfdirstate.write()
236 236
237 237 finally:
238 238 if wlock:
239 239 wlock.release()
240 240
241 241 self.lfstatus = True
242 242 return scmutil.status(*result)
243 243
244 244 # As part of committing, copy all of the largefiles into the
245 245 # cache.
246 def commitctx(self, *args, **kwargs):
247 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
246 def commitctx(self, ctx, *args, **kwargs):
247 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
248 248 lfutil.copyalltostore(self, node)
249 class lfilesctx(ctx.__class__):
250 def markcommitted(self, node):
251 orig = super(lfilesctx, self).markcommitted
252 return lfutil.markcommitted(orig, self, node)
253 ctx.__class__ = lfilesctx
249 254 return node
250 255
251 256 # Before commit, largefile standins have not had their
252 257 # contents updated to reflect the hash of their largefile.
253 258 # Do that here.
254 259 def commit(self, text="", user=None, date=None, match=None,
255 260 force=False, editor=False, extra={}):
256 261 orig = super(lfilesrepo, self).commit
257 262
258 263 wlock = self.wlock()
259 264 try:
260 265 # Case 0: Automated committing
261 266 #
262 267 # While automated committing (like rebase, transplant
263 268 # and so on), this code path is used to avoid:
264 269 # (1) updating standins, because standins should
265 270 # be already updated at this point
266 271 # (2) aborting when standins are matched by "match",
267 272 # because automated committing may specify them directly
268 273 #
269 274 if getattr(self, "_isrebasing", False) or \
270 275 getattr(self, "_istransplanting", False):
271 276 result = orig(text=text, user=user, date=date, match=match,
272 277 force=force, editor=editor, extra=extra)
273
274 if result:
275 lfdirstate = lfutil.openlfdirstate(ui, self)
276 for f in self[result].files():
277 if lfutil.isstandin(f):
278 lfile = lfutil.splitstandin(f)
279 lfutil.synclfdirstate(self, lfdirstate, lfile,
280 False)
281 lfdirstate.write()
282
283 278 return result
284 279 # Case 1: user calls commit with no specific files or
285 280 # include/exclude patterns: refresh and commit all files that
286 281 # are "dirty".
287 282 if match is None or match.always():
288 283 # Spend a bit of time here to get a list of files we know
289 284 # are modified so we can compare only against those.
290 285 # It can cost a lot of time (several seconds)
291 286 # otherwise to update all standins if the largefiles are
292 287 # large.
293 288 lfdirstate = lfutil.openlfdirstate(ui, self)
294 289 dirtymatch = match_.always(self.root, self.getcwd())
295 290 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
296 291 False)
297 292 modifiedfiles = unsure + s.modified + s.added + s.removed
298 293 lfiles = lfutil.listlfiles(self)
299 294 # this only loops through largefiles that exist (not
300 295 # removed/renamed)
301 296 for lfile in lfiles:
302 297 if lfile in modifiedfiles:
303 298 if os.path.exists(
304 299 self.wjoin(lfutil.standin(lfile))):
305 300 # this handles the case where a rebase is being
306 301 # performed and the working copy is not updated
307 302 # yet.
308 303 if os.path.exists(self.wjoin(lfile)):
309 304 lfutil.updatestandin(self,
310 305 lfutil.standin(lfile))
311 lfdirstate.normal(lfile)
312 306
313 307 result = orig(text=text, user=user, date=date, match=match,
314 308 force=force, editor=editor, extra=extra)
315 309
316 if result is not None:
317 for lfile in lfdirstate:
318 if lfile in modifiedfiles:
319 if (not os.path.exists(self.wjoin(
320 lfutil.standin(lfile)))) or \
321 (not os.path.exists(self.wjoin(lfile))):
322 lfdirstate.drop(lfile)
323
324 # This needs to be after commit; otherwise precommit hooks
325 # get the wrong status
326 lfdirstate.write()
327 310 return result
328 311
329 312 lfiles = lfutil.listlfiles(self)
330 313 match._files = self._subdirlfs(match.files(), lfiles)
331 314
332 315 # Case 2: user calls commit with specified patterns: refresh
333 316 # any matching big files.
334 317 smatcher = lfutil.composestandinmatcher(self, match)
335 318 standins = self.dirstate.walk(smatcher, [], False, False)
336 319
337 320 # No matching big files: get out of the way and pass control to
338 321 # the usual commit() method.
339 322 if not standins:
340 323 return orig(text=text, user=user, date=date, match=match,
341 324 force=force, editor=editor, extra=extra)
342 325
343 326 # Refresh all matching big files. It's possible that the
344 327 # commit will end up failing, in which case the big files will
345 328 # stay refreshed. No harm done: the user modified them and
346 329 # asked to commit them, so sooner or later we're going to
347 330 # refresh the standins. Might as well leave them refreshed.
348 331 lfdirstate = lfutil.openlfdirstate(ui, self)
349 332 for standin in standins:
350 333 lfile = lfutil.splitstandin(standin)
351 334 if lfdirstate[lfile] != 'r':
352 335 lfutil.updatestandin(self, standin)
353 lfdirstate.normal(lfile)
354 else:
355 lfdirstate.drop(lfile)
356 336
357 337 # Cook up a new matcher that only matches regular files or
358 338 # standins corresponding to the big files requested by the
359 339 # user. Have to modify _files to prevent commit() from
360 340 # complaining "not tracked" for big files.
361 341 match = copy.copy(match)
362 342 origmatchfn = match.matchfn
363 343
364 344 # Check both the list of largefiles and the list of
365 345 # standins because if a largefile was removed, it
366 346 # won't be in the list of largefiles at this point
367 347 match._files += sorted(standins)
368 348
369 349 actualfiles = []
370 350 for f in match._files:
371 351 fstandin = lfutil.standin(f)
372 352
373 353 # ignore known largefiles and standins
374 354 if f in lfiles or fstandin in standins:
375 355 continue
376 356
377 357 actualfiles.append(f)
378 358 match._files = actualfiles
379 359
380 360 def matchfn(f):
381 361 if origmatchfn(f):
382 362 return f not in lfiles
383 363 else:
384 364 return f in standins
385 365
386 366 match.matchfn = matchfn
387 367 result = orig(text=text, user=user, date=date, match=match,
388 368 force=force, editor=editor, extra=extra)
389 # This needs to be after commit; otherwise precommit hooks
390 # get the wrong status
391 lfdirstate.write()
392 369 return result
393 370 finally:
394 371 wlock.release()
395 372
396 373 def push(self, remote, force=False, revs=None, newbranch=False):
397 374 if remote.local():
398 375 missing = set(self.requirements) - remote.local().supported
399 376 if missing:
400 377 msg = _("required features are not"
401 378 " supported in the destination:"
402 379 " %s") % (', '.join(sorted(missing)))
403 380 raise util.Abort(msg)
404 381 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
405 382 newbranch=newbranch)
406 383
407 384 def _subdirlfs(self, files, lfiles):
408 385 '''
409 386 Adjust matched file list
410 387 If we pass a directory to commit whose only commitable files
411 388 are largefiles, the core commit code aborts before finding
412 389 the largefiles.
413 390 So we do the following:
414 391 For directories that only have largefiles as matches,
415 392 we explicitly add the largefiles to the match list and remove
416 393 the directory.
417 394 In other cases, we leave the match list unmodified.
418 395 '''
419 396 actualfiles = []
420 397 dirs = []
421 398 regulars = []
422 399
423 400 for f in files:
424 401 if lfutil.isstandin(f + '/'):
425 402 raise util.Abort(
426 403 _('file "%s" is a largefile standin') % f,
427 404 hint=('commit the largefile itself instead'))
428 405 # Scan directories
429 406 if os.path.isdir(self.wjoin(f)):
430 407 dirs.append(f)
431 408 else:
432 409 regulars.append(f)
433 410
434 411 for f in dirs:
435 412 matcheddir = False
436 413 d = self.dirstate.normalize(f) + '/'
437 414 # Check for matched normal files
438 415 for mf in regulars:
439 416 if self.dirstate.normalize(mf).startswith(d):
440 417 actualfiles.append(f)
441 418 matcheddir = True
442 419 break
443 420 if not matcheddir:
444 421 # If no normal match, manually append
445 422 # any matching largefiles
446 423 for lf in lfiles:
447 424 if self.dirstate.normalize(lf).startswith(d):
448 425 actualfiles.append(lf)
449 426 if not matcheddir:
450 427 actualfiles.append(lfutil.standin(f))
451 428 matcheddir = True
452 429 # Nothing in dir, so readd it
453 430 # and let commit reject it
454 431 if not matcheddir:
455 432 actualfiles.append(f)
456 433
457 434 # Always add normal files
458 435 actualfiles += regulars
459 436 return actualfiles
460 437
461 438 repo.__class__ = lfilesrepo
462 439
463 440 def prepushoutgoinghook(local, remote, outgoing):
464 441 if outgoing.missing:
465 442 toupload = set()
466 443 addfunc = lambda fn, lfhash: toupload.add(lfhash)
467 444 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
468 445 lfcommands.uploadlfiles(ui, local, remote, toupload)
469 446 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
470 447
471 448 def checkrequireslfiles(ui, repo, **kwargs):
472 449 if 'largefiles' not in repo.requirements and util.any(
473 450 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
474 451 repo.requirements.add('largefiles')
475 452 repo._writerequirements()
476 453
477 454 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
478 455 'largefiles')
479 456 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now