##// END OF EJS Templates
largefiles: factor out procedures to update lfdirstate for post-committing...
FUJIWARA Katsunori -
r23184:3100d1cb default
parent child Browse files
Show More
@@ -1,417 +1,429 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node
18 from mercurial import node
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
43 try:
43 try:
44 util.oslink(src, dest)
44 util.oslink(src, dest)
45 except OSError:
45 except OSError:
46 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
49 dst.write(chunk)
50 dst.close()
50 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
52
52
53 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
55 if path:
55 if path:
56 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
57 else:
57 else:
58 if os.name == 'nt':
58 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
60 if appdata:
61 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
63 home = os.getenv('HOME')
64 if home:
64 if home:
65 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
66 longname, hash)
67 elif os.name == 'posix':
67 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
69 if path:
69 if path:
70 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
71 else:
71 else:
72 home = os.getenv('HOME')
72 home = os.getenv('HOME')
73 if home:
73 if home:
74 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
75 else:
75 else:
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
77 return path
78
78
79 def inusercache(ui, hash):
79 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
81 return path and os.path.exists(path)
82
82
83 def findfile(repo, hash):
83 def findfile(repo, hash):
84 if instore(repo, hash):
84 if instore(repo, hash):
85 repo.ui.note(_('found %s in store\n') % hash)
85 repo.ui.note(_('found %s in store\n') % hash)
86 return storepath(repo, hash)
86 return storepath(repo, hash)
87 elif inusercache(repo.ui, hash):
87 elif inusercache(repo.ui, hash):
88 repo.ui.note(_('found %s in system cache\n') % hash)
88 repo.ui.note(_('found %s in system cache\n') % hash)
89 path = storepath(repo, hash)
89 path = storepath(repo, hash)
90 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
91 return path
91 return path
92 return None
92 return None
93
93
94 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
97 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
99 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
101 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
103 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
105 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
107 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self, f):
109 def _ignore(self, f):
110 return False
110 return False
111
111
112 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
113 '''
113 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
116 '''
117 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
120 repo.dirstate._validate)
121
121
122 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 matcher = getstandinmatcher(repo)
126 matcher = getstandinmatcher(repo)
127 standins = repo.dirstate.walk(matcher, [], False, False)
127 standins = repo.dirstate.walk(matcher, [], False, False)
128
128
129 if len(standins) > 0:
129 if len(standins) > 0:
130 util.makedirs(lfstoredir)
130 util.makedirs(lfstoredir)
131
131
132 for standin in standins:
132 for standin in standins:
133 lfile = splitstandin(standin)
133 lfile = splitstandin(standin)
134 lfdirstate.normallookup(lfile)
134 lfdirstate.normallookup(lfile)
135 return lfdirstate
135 return lfdirstate
136
136
137 def lfdirstatestatus(lfdirstate, repo):
137 def lfdirstatestatus(lfdirstate, repo):
138 wctx = repo['.']
138 wctx = repo['.']
139 match = match_.always(repo.root, repo.getcwd())
139 match = match_.always(repo.root, repo.getcwd())
140 unsure, s = lfdirstate.status(match, [], False, False, False)
140 unsure, s = lfdirstate.status(match, [], False, False, False)
141 modified, clean = s.modified, s.clean
141 modified, clean = s.modified, s.clean
142 for lfile in unsure:
142 for lfile in unsure:
143 try:
143 try:
144 fctx = wctx[standin(lfile)]
144 fctx = wctx[standin(lfile)]
145 except LookupError:
145 except LookupError:
146 fctx = None
146 fctx = None
147 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
147 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
148 modified.append(lfile)
148 modified.append(lfile)
149 else:
149 else:
150 clean.append(lfile)
150 clean.append(lfile)
151 lfdirstate.normal(lfile)
151 lfdirstate.normal(lfile)
152 return s
152 return s
153
153
154 def listlfiles(repo, rev=None, matcher=None):
154 def listlfiles(repo, rev=None, matcher=None):
155 '''return a list of largefiles in the working copy or the
155 '''return a list of largefiles in the working copy or the
156 specified changeset'''
156 specified changeset'''
157
157
158 if matcher is None:
158 if matcher is None:
159 matcher = getstandinmatcher(repo)
159 matcher = getstandinmatcher(repo)
160
160
161 # ignore unknown files in working directory
161 # ignore unknown files in working directory
162 return [splitstandin(f)
162 return [splitstandin(f)
163 for f in repo[rev].walk(matcher)
163 for f in repo[rev].walk(matcher)
164 if rev is not None or repo.dirstate[f] != '?']
164 if rev is not None or repo.dirstate[f] != '?']
165
165
166 def instore(repo, hash):
166 def instore(repo, hash):
167 return os.path.exists(storepath(repo, hash))
167 return os.path.exists(storepath(repo, hash))
168
168
169 def storepath(repo, hash):
169 def storepath(repo, hash):
170 return repo.join(os.path.join(longname, hash))
170 return repo.join(os.path.join(longname, hash))
171
171
172 def copyfromcache(repo, hash, filename):
172 def copyfromcache(repo, hash, filename):
173 '''Copy the specified largefile from the repo or system cache to
173 '''Copy the specified largefile from the repo or system cache to
174 filename in the repository. Return true on success or false if the
174 filename in the repository. Return true on success or false if the
175 file was not found in either cache (which should not happened:
175 file was not found in either cache (which should not happened:
176 this is meant to be called only after ensuring that the needed
176 this is meant to be called only after ensuring that the needed
177 largefile exists in the cache).'''
177 largefile exists in the cache).'''
178 path = findfile(repo, hash)
178 path = findfile(repo, hash)
179 if path is None:
179 if path is None:
180 return False
180 return False
181 util.makedirs(os.path.dirname(repo.wjoin(filename)))
181 util.makedirs(os.path.dirname(repo.wjoin(filename)))
182 # The write may fail before the file is fully written, but we
182 # The write may fail before the file is fully written, but we
183 # don't use atomic writes in the working copy.
183 # don't use atomic writes in the working copy.
184 shutil.copy(path, repo.wjoin(filename))
184 shutil.copy(path, repo.wjoin(filename))
185 return True
185 return True
186
186
187 def copytostore(repo, rev, file, uploaded=False):
187 def copytostore(repo, rev, file, uploaded=False):
188 hash = readstandin(repo, file, rev)
188 hash = readstandin(repo, file, rev)
189 if instore(repo, hash):
189 if instore(repo, hash):
190 return
190 return
191 copytostoreabsolute(repo, repo.wjoin(file), hash)
191 copytostoreabsolute(repo, repo.wjoin(file), hash)
192
192
193 def copyalltostore(repo, node):
193 def copyalltostore(repo, node):
194 '''Copy all largefiles in a given revision to the store'''
194 '''Copy all largefiles in a given revision to the store'''
195
195
196 ctx = repo[node]
196 ctx = repo[node]
197 for filename in ctx.files():
197 for filename in ctx.files():
198 if isstandin(filename) and filename in ctx.manifest():
198 if isstandin(filename) and filename in ctx.manifest():
199 realfile = splitstandin(filename)
199 realfile = splitstandin(filename)
200 copytostore(repo, ctx.node(), realfile)
200 copytostore(repo, ctx.node(), realfile)
201
201
202
202
203 def copytostoreabsolute(repo, file, hash):
203 def copytostoreabsolute(repo, file, hash):
204 if inusercache(repo.ui, hash):
204 if inusercache(repo.ui, hash):
205 link(usercachepath(repo.ui, hash), storepath(repo, hash))
205 link(usercachepath(repo.ui, hash), storepath(repo, hash))
206 elif not getattr(repo, "_isconverting", False):
206 elif not getattr(repo, "_isconverting", False):
207 util.makedirs(os.path.dirname(storepath(repo, hash)))
207 util.makedirs(os.path.dirname(storepath(repo, hash)))
208 dst = util.atomictempfile(storepath(repo, hash),
208 dst = util.atomictempfile(storepath(repo, hash),
209 createmode=repo.store.createmode)
209 createmode=repo.store.createmode)
210 for chunk in util.filechunkiter(open(file, 'rb')):
210 for chunk in util.filechunkiter(open(file, 'rb')):
211 dst.write(chunk)
211 dst.write(chunk)
212 dst.close()
212 dst.close()
213 linktousercache(repo, hash)
213 linktousercache(repo, hash)
214
214
215 def linktousercache(repo, hash):
215 def linktousercache(repo, hash):
216 path = usercachepath(repo.ui, hash)
216 path = usercachepath(repo.ui, hash)
217 if path:
217 if path:
218 link(storepath(repo, hash), path)
218 link(storepath(repo, hash), path)
219
219
220 def getstandinmatcher(repo, pats=[], opts={}):
220 def getstandinmatcher(repo, pats=[], opts={}):
221 '''Return a match object that applies pats to the standin directory'''
221 '''Return a match object that applies pats to the standin directory'''
222 standindir = repo.wjoin(shortname)
222 standindir = repo.wjoin(shortname)
223 if pats:
223 if pats:
224 pats = [os.path.join(standindir, pat) for pat in pats]
224 pats = [os.path.join(standindir, pat) for pat in pats]
225 else:
225 else:
226 # no patterns: relative to repo root
226 # no patterns: relative to repo root
227 pats = [standindir]
227 pats = [standindir]
228 # no warnings about missing files or directories
228 # no warnings about missing files or directories
229 match = scmutil.match(repo[None], pats, opts)
229 match = scmutil.match(repo[None], pats, opts)
230 match.bad = lambda f, msg: None
230 match.bad = lambda f, msg: None
231 return match
231 return match
232
232
233 def composestandinmatcher(repo, rmatcher):
233 def composestandinmatcher(repo, rmatcher):
234 '''Return a matcher that accepts standins corresponding to the
234 '''Return a matcher that accepts standins corresponding to the
235 files accepted by rmatcher. Pass the list of files in the matcher
235 files accepted by rmatcher. Pass the list of files in the matcher
236 as the paths specified by the user.'''
236 as the paths specified by the user.'''
237 smatcher = getstandinmatcher(repo, rmatcher.files())
237 smatcher = getstandinmatcher(repo, rmatcher.files())
238 isstandin = smatcher.matchfn
238 isstandin = smatcher.matchfn
239 def composedmatchfn(f):
239 def composedmatchfn(f):
240 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
240 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
241 smatcher.matchfn = composedmatchfn
241 smatcher.matchfn = composedmatchfn
242
242
243 return smatcher
243 return smatcher
244
244
245 def standin(filename):
245 def standin(filename):
246 '''Return the repo-relative path to the standin for the specified big
246 '''Return the repo-relative path to the standin for the specified big
247 file.'''
247 file.'''
248 # Notes:
248 # Notes:
249 # 1) Some callers want an absolute path, but for instance addlargefiles
249 # 1) Some callers want an absolute path, but for instance addlargefiles
250 # needs it repo-relative so it can be passed to repo[None].add(). So
250 # needs it repo-relative so it can be passed to repo[None].add(). So
251 # leave it up to the caller to use repo.wjoin() to get an absolute path.
251 # leave it up to the caller to use repo.wjoin() to get an absolute path.
252 # 2) Join with '/' because that's what dirstate always uses, even on
252 # 2) Join with '/' because that's what dirstate always uses, even on
253 # Windows. Change existing separator to '/' first in case we are
253 # Windows. Change existing separator to '/' first in case we are
254 # passed filenames from an external source (like the command line).
254 # passed filenames from an external source (like the command line).
255 return shortnameslash + util.pconvert(filename)
255 return shortnameslash + util.pconvert(filename)
256
256
257 def isstandin(filename):
257 def isstandin(filename):
258 '''Return true if filename is a big file standin. filename must be
258 '''Return true if filename is a big file standin. filename must be
259 in Mercurial's internal form (slash-separated).'''
259 in Mercurial's internal form (slash-separated).'''
260 return filename.startswith(shortnameslash)
260 return filename.startswith(shortnameslash)
261
261
262 def splitstandin(filename):
262 def splitstandin(filename):
263 # Split on / because that's what dirstate always uses, even on Windows.
263 # Split on / because that's what dirstate always uses, even on Windows.
264 # Change local separator to / first just in case we are passed filenames
264 # Change local separator to / first just in case we are passed filenames
265 # from an external source (like the command line).
265 # from an external source (like the command line).
266 bits = util.pconvert(filename).split('/', 1)
266 bits = util.pconvert(filename).split('/', 1)
267 if len(bits) == 2 and bits[0] == shortname:
267 if len(bits) == 2 and bits[0] == shortname:
268 return bits[1]
268 return bits[1]
269 else:
269 else:
270 return None
270 return None
271
271
272 def updatestandin(repo, standin):
272 def updatestandin(repo, standin):
273 file = repo.wjoin(splitstandin(standin))
273 file = repo.wjoin(splitstandin(standin))
274 if os.path.exists(file):
274 if os.path.exists(file):
275 hash = hashfile(file)
275 hash = hashfile(file)
276 executable = getexecutable(file)
276 executable = getexecutable(file)
277 writestandin(repo, standin, hash, executable)
277 writestandin(repo, standin, hash, executable)
278
278
279 def readstandin(repo, filename, node=None):
279 def readstandin(repo, filename, node=None):
280 '''read hex hash from standin for filename at given node, or working
280 '''read hex hash from standin for filename at given node, or working
281 directory if no node is given'''
281 directory if no node is given'''
282 return repo[node][standin(filename)].data().strip()
282 return repo[node][standin(filename)].data().strip()
283
283
284 def writestandin(repo, standin, hash, executable):
284 def writestandin(repo, standin, hash, executable):
285 '''write hash to <repo.root>/<standin>'''
285 '''write hash to <repo.root>/<standin>'''
286 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
286 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
287
287
288 def copyandhash(instream, outfile):
288 def copyandhash(instream, outfile):
289 '''Read bytes from instream (iterable) and write them to outfile,
289 '''Read bytes from instream (iterable) and write them to outfile,
290 computing the SHA-1 hash of the data along the way. Return the hash.'''
290 computing the SHA-1 hash of the data along the way. Return the hash.'''
291 hasher = util.sha1('')
291 hasher = util.sha1('')
292 for data in instream:
292 for data in instream:
293 hasher.update(data)
293 hasher.update(data)
294 outfile.write(data)
294 outfile.write(data)
295 return hasher.hexdigest()
295 return hasher.hexdigest()
296
296
297 def hashrepofile(repo, file):
297 def hashrepofile(repo, file):
298 return hashfile(repo.wjoin(file))
298 return hashfile(repo.wjoin(file))
299
299
300 def hashfile(file):
300 def hashfile(file):
301 if not os.path.exists(file):
301 if not os.path.exists(file):
302 return ''
302 return ''
303 hasher = util.sha1('')
303 hasher = util.sha1('')
304 fd = open(file, 'rb')
304 fd = open(file, 'rb')
305 for data in util.filechunkiter(fd, 128 * 1024):
305 for data in util.filechunkiter(fd, 128 * 1024):
306 hasher.update(data)
306 hasher.update(data)
307 fd.close()
307 fd.close()
308 return hasher.hexdigest()
308 return hasher.hexdigest()
309
309
310 def getexecutable(filename):
310 def getexecutable(filename):
311 mode = os.stat(filename).st_mode
311 mode = os.stat(filename).st_mode
312 return ((mode & stat.S_IXUSR) and
312 return ((mode & stat.S_IXUSR) and
313 (mode & stat.S_IXGRP) and
313 (mode & stat.S_IXGRP) and
314 (mode & stat.S_IXOTH))
314 (mode & stat.S_IXOTH))
315
315
316 def urljoin(first, second, *arg):
316 def urljoin(first, second, *arg):
317 def join(left, right):
317 def join(left, right):
318 if not left.endswith('/'):
318 if not left.endswith('/'):
319 left += '/'
319 left += '/'
320 if right.startswith('/'):
320 if right.startswith('/'):
321 right = right[1:]
321 right = right[1:]
322 return left + right
322 return left + right
323
323
324 url = join(first, second)
324 url = join(first, second)
325 for a in arg:
325 for a in arg:
326 url = join(url, a)
326 url = join(url, a)
327 return url
327 return url
328
328
329 def hexsha1(data):
329 def hexsha1(data):
330 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
330 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
331 object data"""
331 object data"""
332 h = util.sha1()
332 h = util.sha1()
333 for chunk in util.filechunkiter(data):
333 for chunk in util.filechunkiter(data):
334 h.update(chunk)
334 h.update(chunk)
335 return h.hexdigest()
335 return h.hexdigest()
336
336
337 def httpsendfile(ui, filename):
337 def httpsendfile(ui, filename):
338 return httpconnection.httpsendfile(ui, filename, 'rb')
338 return httpconnection.httpsendfile(ui, filename, 'rb')
339
339
340 def unixpath(path):
340 def unixpath(path):
341 '''Return a version of path normalized for use with the lfdirstate.'''
341 '''Return a version of path normalized for use with the lfdirstate.'''
342 return util.pconvert(os.path.normpath(path))
342 return util.pconvert(os.path.normpath(path))
343
343
344 def islfilesrepo(repo):
344 def islfilesrepo(repo):
345 if ('largefiles' in repo.requirements and
345 if ('largefiles' in repo.requirements and
346 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
346 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
347 return True
347 return True
348
348
349 return util.any(openlfdirstate(repo.ui, repo, False))
349 return util.any(openlfdirstate(repo.ui, repo, False))
350
350
351 class storeprotonotcapable(Exception):
351 class storeprotonotcapable(Exception):
352 def __init__(self, storetypes):
352 def __init__(self, storetypes):
353 self.storetypes = storetypes
353 self.storetypes = storetypes
354
354
355 def getstandinsstate(repo):
355 def getstandinsstate(repo):
356 standins = []
356 standins = []
357 matcher = getstandinmatcher(repo)
357 matcher = getstandinmatcher(repo)
358 for standin in repo.dirstate.walk(matcher, [], False, False):
358 for standin in repo.dirstate.walk(matcher, [], False, False):
359 lfile = splitstandin(standin)
359 lfile = splitstandin(standin)
360 try:
360 try:
361 hash = readstandin(repo, lfile)
361 hash = readstandin(repo, lfile)
362 except IOError:
362 except IOError:
363 hash = None
363 hash = None
364 standins.append((lfile, hash))
364 standins.append((lfile, hash))
365 return standins
365 return standins
366
366
367 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
367 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
368 lfstandin = standin(lfile)
368 lfstandin = standin(lfile)
369 if lfstandin in repo.dirstate:
369 if lfstandin in repo.dirstate:
370 stat = repo.dirstate._map[lfstandin]
370 stat = repo.dirstate._map[lfstandin]
371 state, mtime = stat[0], stat[3]
371 state, mtime = stat[0], stat[3]
372 else:
372 else:
373 state, mtime = '?', -1
373 state, mtime = '?', -1
374 if state == 'n':
374 if state == 'n':
375 if normallookup or mtime < 0:
375 if normallookup or mtime < 0:
376 # state 'n' doesn't ensure 'clean' in this case
376 # state 'n' doesn't ensure 'clean' in this case
377 lfdirstate.normallookup(lfile)
377 lfdirstate.normallookup(lfile)
378 else:
378 else:
379 lfdirstate.normal(lfile)
379 lfdirstate.normal(lfile)
380 elif state == 'm':
380 elif state == 'm':
381 lfdirstate.normallookup(lfile)
381 lfdirstate.normallookup(lfile)
382 elif state == 'r':
382 elif state == 'r':
383 lfdirstate.remove(lfile)
383 lfdirstate.remove(lfile)
384 elif state == 'a':
384 elif state == 'a':
385 lfdirstate.add(lfile)
385 lfdirstate.add(lfile)
386 elif state == '?':
386 elif state == '?':
387 lfdirstate.drop(lfile)
387 lfdirstate.drop(lfile)
388
388
389 def markcommitted(orig, ctx, node):
390 repo = ctx._repo
391
392 orig(node)
393
394 lfdirstate = openlfdirstate(repo.ui, repo)
395 for f in ctx.files():
396 if isstandin(f):
397 lfile = splitstandin(f)
398 synclfdirstate(repo, lfdirstate, lfile, False)
399 lfdirstate.write()
400
389 def getlfilestoupdate(oldstandins, newstandins):
401 def getlfilestoupdate(oldstandins, newstandins):
390 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
402 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
391 filelist = []
403 filelist = []
392 for f in changedstandins:
404 for f in changedstandins:
393 if f[0] not in filelist:
405 if f[0] not in filelist:
394 filelist.append(f[0])
406 filelist.append(f[0])
395 return filelist
407 return filelist
396
408
397 def getlfilestoupload(repo, missing, addfunc):
409 def getlfilestoupload(repo, missing, addfunc):
398 for n in missing:
410 for n in missing:
399 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
411 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
400 ctx = repo[n]
412 ctx = repo[n]
401 files = set(ctx.files())
413 files = set(ctx.files())
402 if len(parents) == 2:
414 if len(parents) == 2:
403 mc = ctx.manifest()
415 mc = ctx.manifest()
404 mp1 = ctx.parents()[0].manifest()
416 mp1 = ctx.parents()[0].manifest()
405 mp2 = ctx.parents()[1].manifest()
417 mp2 = ctx.parents()[1].manifest()
406 for f in mp1:
418 for f in mp1:
407 if f not in mc:
419 if f not in mc:
408 files.add(f)
420 files.add(f)
409 for f in mp2:
421 for f in mp2:
410 if f not in mc:
422 if f not in mc:
411 files.add(f)
423 files.add(f)
412 for f in mc:
424 for f in mc:
413 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
425 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
414 files.add(f)
426 files.add(f)
415 for fn in files:
427 for fn in files:
416 if isstandin(fn) and fn in ctx:
428 if isstandin(fn) and fn in ctx:
417 addfunc(fn, ctx[fn].data().strip())
429 addfunc(fn, ctx[fn].data().strip())
@@ -1,479 +1,456 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import os
11 import os
12
12
13 from mercurial import error, manifest, match as match_, util
13 from mercurial import error, manifest, match as match_, util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import localrepo, scmutil
15 from mercurial import localrepo, scmutil
16
16
17 import lfcommands
17 import lfcommands
18 import lfutil
18 import lfutil
19
19
20 def reposetup(ui, repo):
20 def reposetup(ui, repo):
21 # wire repositories should be given new wireproto functions
21 # wire repositories should be given new wireproto functions
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 if not repo.local():
23 if not repo.local():
24 return
24 return
25
25
26 class lfilesrepo(repo.__class__):
26 class lfilesrepo(repo.__class__):
27 lfstatus = False
27 lfstatus = False
28 def status_nolfiles(self, *args, **kwargs):
28 def status_nolfiles(self, *args, **kwargs):
29 return super(lfilesrepo, self).status(*args, **kwargs)
29 return super(lfilesrepo, self).status(*args, **kwargs)
30
30
31 # When lfstatus is set, return a context that gives the names
31 # When lfstatus is set, return a context that gives the names
32 # of largefiles instead of their corresponding standins and
32 # of largefiles instead of their corresponding standins and
33 # identifies the largefiles as always binary, regardless of
33 # identifies the largefiles as always binary, regardless of
34 # their actual contents.
34 # their actual contents.
35 def __getitem__(self, changeid):
35 def __getitem__(self, changeid):
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 if self.lfstatus:
37 if self.lfstatus:
38 class lfilesmanifestdict(manifest.manifestdict):
38 class lfilesmanifestdict(manifest.manifestdict):
39 def __contains__(self, filename):
39 def __contains__(self, filename):
40 orig = super(lfilesmanifestdict, self).__contains__
40 orig = super(lfilesmanifestdict, self).__contains__
41 return orig(filename) or orig(lfutil.standin(filename))
41 return orig(filename) or orig(lfutil.standin(filename))
42 class lfilesctx(ctx.__class__):
42 class lfilesctx(ctx.__class__):
43 def files(self):
43 def files(self):
44 filenames = super(lfilesctx, self).files()
44 filenames = super(lfilesctx, self).files()
45 return [lfutil.splitstandin(f) or f for f in filenames]
45 return [lfutil.splitstandin(f) or f for f in filenames]
46 def manifest(self):
46 def manifest(self):
47 man1 = super(lfilesctx, self).manifest()
47 man1 = super(lfilesctx, self).manifest()
48 man1.__class__ = lfilesmanifestdict
48 man1.__class__ = lfilesmanifestdict
49 return man1
49 return man1
50 def filectx(self, path, fileid=None, filelog=None):
50 def filectx(self, path, fileid=None, filelog=None):
51 orig = super(lfilesctx, self).filectx
51 orig = super(lfilesctx, self).filectx
52 try:
52 try:
53 if filelog is not None:
53 if filelog is not None:
54 result = orig(path, fileid, filelog)
54 result = orig(path, fileid, filelog)
55 else:
55 else:
56 result = orig(path, fileid)
56 result = orig(path, fileid)
57 except error.LookupError:
57 except error.LookupError:
58 # Adding a null character will cause Mercurial to
58 # Adding a null character will cause Mercurial to
59 # identify this as a binary file.
59 # identify this as a binary file.
60 if filelog is not None:
60 if filelog is not None:
61 result = orig(lfutil.standin(path), fileid,
61 result = orig(lfutil.standin(path), fileid,
62 filelog)
62 filelog)
63 else:
63 else:
64 result = orig(lfutil.standin(path), fileid)
64 result = orig(lfutil.standin(path), fileid)
65 olddata = result.data
65 olddata = result.data
66 result.data = lambda: olddata() + '\0'
66 result.data = lambda: olddata() + '\0'
67 return result
67 return result
68 ctx.__class__ = lfilesctx
68 ctx.__class__ = lfilesctx
69 return ctx
69 return ctx
70
70
71 # Figure out the status of big files and insert them into the
71 # Figure out the status of big files and insert them into the
72 # appropriate list in the result. Also removes standin files
72 # appropriate list in the result. Also removes standin files
73 # from the listing. Revert to the original status if
73 # from the listing. Revert to the original status if
74 # self.lfstatus is False.
74 # self.lfstatus is False.
75 # XXX large file status is buggy when used on repo proxy.
75 # XXX large file status is buggy when used on repo proxy.
76 # XXX this needs to be investigated.
76 # XXX this needs to be investigated.
77 @localrepo.unfilteredmethod
77 @localrepo.unfilteredmethod
78 def status(self, node1='.', node2=None, match=None, ignored=False,
78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 clean=False, unknown=False, listsubrepos=False):
79 clean=False, unknown=False, listsubrepos=False):
80 listignored, listclean, listunknown = ignored, clean, unknown
80 listignored, listclean, listunknown = ignored, clean, unknown
81 orig = super(lfilesrepo, self).status
81 orig = super(lfilesrepo, self).status
82 if not self.lfstatus:
82 if not self.lfstatus:
83 return orig(node1, node2, match, listignored, listclean,
83 return orig(node1, node2, match, listignored, listclean,
84 listunknown, listsubrepos)
84 listunknown, listsubrepos)
85
85
86 # some calls in this function rely on the old version of status
86 # some calls in this function rely on the old version of status
87 self.lfstatus = False
87 self.lfstatus = False
88 ctx1 = self[node1]
88 ctx1 = self[node1]
89 ctx2 = self[node2]
89 ctx2 = self[node2]
90 working = ctx2.rev() is None
90 working = ctx2.rev() is None
91 parentworking = working and ctx1 == self['.']
91 parentworking = working and ctx1 == self['.']
92
92
93 if match is None:
93 if match is None:
94 match = match_.always(self.root, self.getcwd())
94 match = match_.always(self.root, self.getcwd())
95
95
96 wlock = None
96 wlock = None
97 try:
97 try:
98 try:
98 try:
99 # updating the dirstate is optional
99 # updating the dirstate is optional
100 # so we don't wait on the lock
100 # so we don't wait on the lock
101 wlock = self.wlock(False)
101 wlock = self.wlock(False)
102 except error.LockError:
102 except error.LockError:
103 pass
103 pass
104
104
105 # First check if paths or patterns were specified on the
105 # First check if paths or patterns were specified on the
106 # command line. If there were, and they don't match any
106 # command line. If there were, and they don't match any
107 # largefiles, we should just bail here and let super
107 # largefiles, we should just bail here and let super
108 # handle it -- thus gaining a big performance boost.
108 # handle it -- thus gaining a big performance boost.
109 lfdirstate = lfutil.openlfdirstate(ui, self)
109 lfdirstate = lfutil.openlfdirstate(ui, self)
110 if not match.always():
110 if not match.always():
111 for f in lfdirstate:
111 for f in lfdirstate:
112 if match(f):
112 if match(f):
113 break
113 break
114 else:
114 else:
115 return orig(node1, node2, match, listignored, listclean,
115 return orig(node1, node2, match, listignored, listclean,
116 listunknown, listsubrepos)
116 listunknown, listsubrepos)
117
117
118 # Create a copy of match that matches standins instead
118 # Create a copy of match that matches standins instead
119 # of largefiles.
119 # of largefiles.
120 def tostandins(files):
120 def tostandins(files):
121 if not working:
121 if not working:
122 return files
122 return files
123 newfiles = []
123 newfiles = []
124 dirstate = self.dirstate
124 dirstate = self.dirstate
125 for f in files:
125 for f in files:
126 sf = lfutil.standin(f)
126 sf = lfutil.standin(f)
127 if sf in dirstate:
127 if sf in dirstate:
128 newfiles.append(sf)
128 newfiles.append(sf)
129 elif sf in dirstate.dirs():
129 elif sf in dirstate.dirs():
130 # Directory entries could be regular or
130 # Directory entries could be regular or
131 # standin, check both
131 # standin, check both
132 newfiles.extend((f, sf))
132 newfiles.extend((f, sf))
133 else:
133 else:
134 newfiles.append(f)
134 newfiles.append(f)
135 return newfiles
135 return newfiles
136
136
137 m = copy.copy(match)
137 m = copy.copy(match)
138 m._files = tostandins(m._files)
138 m._files = tostandins(m._files)
139
139
140 result = orig(node1, node2, m, ignored, clean, unknown,
140 result = orig(node1, node2, m, ignored, clean, unknown,
141 listsubrepos)
141 listsubrepos)
142 if working:
142 if working:
143
143
144 def sfindirstate(f):
144 def sfindirstate(f):
145 sf = lfutil.standin(f)
145 sf = lfutil.standin(f)
146 dirstate = self.dirstate
146 dirstate = self.dirstate
147 return sf in dirstate or sf in dirstate.dirs()
147 return sf in dirstate or sf in dirstate.dirs()
148
148
149 match._files = [f for f in match._files
149 match._files = [f for f in match._files
150 if sfindirstate(f)]
150 if sfindirstate(f)]
151 # Don't waste time getting the ignored and unknown
151 # Don't waste time getting the ignored and unknown
152 # files from lfdirstate
152 # files from lfdirstate
153 unsure, s = lfdirstate.status(match, [], False, listclean,
153 unsure, s = lfdirstate.status(match, [], False, listclean,
154 False)
154 False)
155 (modified, added, removed, clean) = (s.modified, s.added,
155 (modified, added, removed, clean) = (s.modified, s.added,
156 s.removed, s.clean)
156 s.removed, s.clean)
157 if parentworking:
157 if parentworking:
158 for lfile in unsure:
158 for lfile in unsure:
159 standin = lfutil.standin(lfile)
159 standin = lfutil.standin(lfile)
160 if standin not in ctx1:
160 if standin not in ctx1:
161 # from second parent
161 # from second parent
162 modified.append(lfile)
162 modified.append(lfile)
163 elif ctx1[standin].data().strip() \
163 elif ctx1[standin].data().strip() \
164 != lfutil.hashfile(self.wjoin(lfile)):
164 != lfutil.hashfile(self.wjoin(lfile)):
165 modified.append(lfile)
165 modified.append(lfile)
166 else:
166 else:
167 if listclean:
167 if listclean:
168 clean.append(lfile)
168 clean.append(lfile)
169 lfdirstate.normal(lfile)
169 lfdirstate.normal(lfile)
170 else:
170 else:
171 tocheck = unsure + modified + added + clean
171 tocheck = unsure + modified + added + clean
172 modified, added, clean = [], [], []
172 modified, added, clean = [], [], []
173
173
174 for lfile in tocheck:
174 for lfile in tocheck:
175 standin = lfutil.standin(lfile)
175 standin = lfutil.standin(lfile)
176 if standin in ctx1:
176 if standin in ctx1:
177 abslfile = self.wjoin(lfile)
177 abslfile = self.wjoin(lfile)
178 if ((ctx1[standin].data().strip() !=
178 if ((ctx1[standin].data().strip() !=
179 lfutil.hashfile(abslfile)) or
179 lfutil.hashfile(abslfile)) or
180 (('x' in ctx1.flags(standin)) !=
180 (('x' in ctx1.flags(standin)) !=
181 bool(lfutil.getexecutable(abslfile)))):
181 bool(lfutil.getexecutable(abslfile)))):
182 modified.append(lfile)
182 modified.append(lfile)
183 elif listclean:
183 elif listclean:
184 clean.append(lfile)
184 clean.append(lfile)
185 else:
185 else:
186 added.append(lfile)
186 added.append(lfile)
187
187
188 # at this point, 'removed' contains largefiles
188 # at this point, 'removed' contains largefiles
189 # marked as 'R' in the working context.
189 # marked as 'R' in the working context.
190 # then, largefiles not managed also in the target
190 # then, largefiles not managed also in the target
191 # context should be excluded from 'removed'.
191 # context should be excluded from 'removed'.
192 removed = [lfile for lfile in removed
192 removed = [lfile for lfile in removed
193 if lfutil.standin(lfile) in ctx1]
193 if lfutil.standin(lfile) in ctx1]
194
194
195 # Standins no longer found in lfdirstate has been
195 # Standins no longer found in lfdirstate has been
196 # removed
196 # removed
197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
198 lfile = lfutil.splitstandin(standin)
198 lfile = lfutil.splitstandin(standin)
199 if not match(lfile):
199 if not match(lfile):
200 continue
200 continue
201 if lfile not in lfdirstate:
201 if lfile not in lfdirstate:
202 removed.append(lfile)
202 removed.append(lfile)
203
203
204 # Filter result lists
204 # Filter result lists
205 result = list(result)
205 result = list(result)
206
206
207 # Largefiles are not really removed when they're
207 # Largefiles are not really removed when they're
208 # still in the normal dirstate. Likewise, normal
208 # still in the normal dirstate. Likewise, normal
209 # files are not really removed if they are still in
209 # files are not really removed if they are still in
210 # lfdirstate. This happens in merges where files
210 # lfdirstate. This happens in merges where files
211 # change type.
211 # change type.
212 removed = [f for f in removed
212 removed = [f for f in removed
213 if f not in self.dirstate]
213 if f not in self.dirstate]
214 result[2] = [f for f in result[2]
214 result[2] = [f for f in result[2]
215 if f not in lfdirstate]
215 if f not in lfdirstate]
216
216
217 lfiles = set(lfdirstate._map)
217 lfiles = set(lfdirstate._map)
218 # Unknown files
218 # Unknown files
219 result[4] = set(result[4]).difference(lfiles)
219 result[4] = set(result[4]).difference(lfiles)
220 # Ignored files
220 # Ignored files
221 result[5] = set(result[5]).difference(lfiles)
221 result[5] = set(result[5]).difference(lfiles)
222 # combine normal files and largefiles
222 # combine normal files and largefiles
223 normals = [[fn for fn in filelist
223 normals = [[fn for fn in filelist
224 if not lfutil.isstandin(fn)]
224 if not lfutil.isstandin(fn)]
225 for filelist in result]
225 for filelist in result]
226 lfstatus = (modified, added, removed, s.deleted, [], [],
226 lfstatus = (modified, added, removed, s.deleted, [], [],
227 clean)
227 clean)
228 result = [sorted(list1 + list2)
228 result = [sorted(list1 + list2)
229 for (list1, list2) in zip(normals, lfstatus)]
229 for (list1, list2) in zip(normals, lfstatus)]
230 else: # not against working directory
230 else: # not against working directory
231 result = [[lfutil.splitstandin(f) or f for f in items]
231 result = [[lfutil.splitstandin(f) or f for f in items]
232 for items in result]
232 for items in result]
233
233
234 if wlock:
234 if wlock:
235 lfdirstate.write()
235 lfdirstate.write()
236
236
237 finally:
237 finally:
238 if wlock:
238 if wlock:
239 wlock.release()
239 wlock.release()
240
240
241 self.lfstatus = True
241 self.lfstatus = True
242 return scmutil.status(*result)
242 return scmutil.status(*result)
243
243
244 # As part of committing, copy all of the largefiles into the
244 # As part of committing, copy all of the largefiles into the
245 # cache.
245 # cache.
246 def commitctx(self, *args, **kwargs):
246 def commitctx(self, ctx, *args, **kwargs):
247 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
247 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
248 lfutil.copyalltostore(self, node)
248 lfutil.copyalltostore(self, node)
249 class lfilesctx(ctx.__class__):
250 def markcommitted(self, node):
251 orig = super(lfilesctx, self).markcommitted
252 return lfutil.markcommitted(orig, self, node)
253 ctx.__class__ = lfilesctx
249 return node
254 return node
250
255
251 # Before commit, largefile standins have not had their
256 # Before commit, largefile standins have not had their
252 # contents updated to reflect the hash of their largefile.
257 # contents updated to reflect the hash of their largefile.
253 # Do that here.
258 # Do that here.
254 def commit(self, text="", user=None, date=None, match=None,
259 def commit(self, text="", user=None, date=None, match=None,
255 force=False, editor=False, extra={}):
260 force=False, editor=False, extra={}):
256 orig = super(lfilesrepo, self).commit
261 orig = super(lfilesrepo, self).commit
257
262
258 wlock = self.wlock()
263 wlock = self.wlock()
259 try:
264 try:
260 # Case 0: Automated committing
265 # Case 0: Automated committing
261 #
266 #
262 # While automated committing (like rebase, transplant
267 # While automated committing (like rebase, transplant
263 # and so on), this code path is used to avoid:
268 # and so on), this code path is used to avoid:
264 # (1) updating standins, because standins should
269 # (1) updating standins, because standins should
265 # be already updated at this point
270 # be already updated at this point
266 # (2) aborting when standins are matched by "match",
271 # (2) aborting when standins are matched by "match",
267 # because automated committing may specify them directly
272 # because automated committing may specify them directly
268 #
273 #
269 if getattr(self, "_isrebasing", False) or \
274 if getattr(self, "_isrebasing", False) or \
270 getattr(self, "_istransplanting", False):
275 getattr(self, "_istransplanting", False):
271 result = orig(text=text, user=user, date=date, match=match,
276 result = orig(text=text, user=user, date=date, match=match,
272 force=force, editor=editor, extra=extra)
277 force=force, editor=editor, extra=extra)
273
274 if result:
275 lfdirstate = lfutil.openlfdirstate(ui, self)
276 for f in self[result].files():
277 if lfutil.isstandin(f):
278 lfile = lfutil.splitstandin(f)
279 lfutil.synclfdirstate(self, lfdirstate, lfile,
280 False)
281 lfdirstate.write()
282
283 return result
278 return result
284 # Case 1: user calls commit with no specific files or
279 # Case 1: user calls commit with no specific files or
285 # include/exclude patterns: refresh and commit all files that
280 # include/exclude patterns: refresh and commit all files that
286 # are "dirty".
281 # are "dirty".
287 if match is None or match.always():
282 if match is None or match.always():
288 # Spend a bit of time here to get a list of files we know
283 # Spend a bit of time here to get a list of files we know
289 # are modified so we can compare only against those.
284 # are modified so we can compare only against those.
290 # It can cost a lot of time (several seconds)
285 # It can cost a lot of time (several seconds)
291 # otherwise to update all standins if the largefiles are
286 # otherwise to update all standins if the largefiles are
292 # large.
287 # large.
293 lfdirstate = lfutil.openlfdirstate(ui, self)
288 lfdirstate = lfutil.openlfdirstate(ui, self)
294 dirtymatch = match_.always(self.root, self.getcwd())
289 dirtymatch = match_.always(self.root, self.getcwd())
295 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
290 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
296 False)
291 False)
297 modifiedfiles = unsure + s.modified + s.added + s.removed
292 modifiedfiles = unsure + s.modified + s.added + s.removed
298 lfiles = lfutil.listlfiles(self)
293 lfiles = lfutil.listlfiles(self)
299 # this only loops through largefiles that exist (not
294 # this only loops through largefiles that exist (not
300 # removed/renamed)
295 # removed/renamed)
301 for lfile in lfiles:
296 for lfile in lfiles:
302 if lfile in modifiedfiles:
297 if lfile in modifiedfiles:
303 if os.path.exists(
298 if os.path.exists(
304 self.wjoin(lfutil.standin(lfile))):
299 self.wjoin(lfutil.standin(lfile))):
305 # this handles the case where a rebase is being
300 # this handles the case where a rebase is being
306 # performed and the working copy is not updated
301 # performed and the working copy is not updated
307 # yet.
302 # yet.
308 if os.path.exists(self.wjoin(lfile)):
303 if os.path.exists(self.wjoin(lfile)):
309 lfutil.updatestandin(self,
304 lfutil.updatestandin(self,
310 lfutil.standin(lfile))
305 lfutil.standin(lfile))
311 lfdirstate.normal(lfile)
312
306
313 result = orig(text=text, user=user, date=date, match=match,
307 result = orig(text=text, user=user, date=date, match=match,
314 force=force, editor=editor, extra=extra)
308 force=force, editor=editor, extra=extra)
315
309
316 if result is not None:
317 for lfile in lfdirstate:
318 if lfile in modifiedfiles:
319 if (not os.path.exists(self.wjoin(
320 lfutil.standin(lfile)))) or \
321 (not os.path.exists(self.wjoin(lfile))):
322 lfdirstate.drop(lfile)
323
324 # This needs to be after commit; otherwise precommit hooks
325 # get the wrong status
326 lfdirstate.write()
327 return result
310 return result
328
311
329 lfiles = lfutil.listlfiles(self)
312 lfiles = lfutil.listlfiles(self)
330 match._files = self._subdirlfs(match.files(), lfiles)
313 match._files = self._subdirlfs(match.files(), lfiles)
331
314
332 # Case 2: user calls commit with specified patterns: refresh
315 # Case 2: user calls commit with specified patterns: refresh
333 # any matching big files.
316 # any matching big files.
334 smatcher = lfutil.composestandinmatcher(self, match)
317 smatcher = lfutil.composestandinmatcher(self, match)
335 standins = self.dirstate.walk(smatcher, [], False, False)
318 standins = self.dirstate.walk(smatcher, [], False, False)
336
319
337 # No matching big files: get out of the way and pass control to
320 # No matching big files: get out of the way and pass control to
338 # the usual commit() method.
321 # the usual commit() method.
339 if not standins:
322 if not standins:
340 return orig(text=text, user=user, date=date, match=match,
323 return orig(text=text, user=user, date=date, match=match,
341 force=force, editor=editor, extra=extra)
324 force=force, editor=editor, extra=extra)
342
325
343 # Refresh all matching big files. It's possible that the
326 # Refresh all matching big files. It's possible that the
344 # commit will end up failing, in which case the big files will
327 # commit will end up failing, in which case the big files will
345 # stay refreshed. No harm done: the user modified them and
328 # stay refreshed. No harm done: the user modified them and
346 # asked to commit them, so sooner or later we're going to
329 # asked to commit them, so sooner or later we're going to
347 # refresh the standins. Might as well leave them refreshed.
330 # refresh the standins. Might as well leave them refreshed.
348 lfdirstate = lfutil.openlfdirstate(ui, self)
331 lfdirstate = lfutil.openlfdirstate(ui, self)
349 for standin in standins:
332 for standin in standins:
350 lfile = lfutil.splitstandin(standin)
333 lfile = lfutil.splitstandin(standin)
351 if lfdirstate[lfile] != 'r':
334 if lfdirstate[lfile] != 'r':
352 lfutil.updatestandin(self, standin)
335 lfutil.updatestandin(self, standin)
353 lfdirstate.normal(lfile)
354 else:
355 lfdirstate.drop(lfile)
356
336
357 # Cook up a new matcher that only matches regular files or
337 # Cook up a new matcher that only matches regular files or
358 # standins corresponding to the big files requested by the
338 # standins corresponding to the big files requested by the
359 # user. Have to modify _files to prevent commit() from
339 # user. Have to modify _files to prevent commit() from
360 # complaining "not tracked" for big files.
340 # complaining "not tracked" for big files.
361 match = copy.copy(match)
341 match = copy.copy(match)
362 origmatchfn = match.matchfn
342 origmatchfn = match.matchfn
363
343
364 # Check both the list of largefiles and the list of
344 # Check both the list of largefiles and the list of
365 # standins because if a largefile was removed, it
345 # standins because if a largefile was removed, it
366 # won't be in the list of largefiles at this point
346 # won't be in the list of largefiles at this point
367 match._files += sorted(standins)
347 match._files += sorted(standins)
368
348
369 actualfiles = []
349 actualfiles = []
370 for f in match._files:
350 for f in match._files:
371 fstandin = lfutil.standin(f)
351 fstandin = lfutil.standin(f)
372
352
373 # ignore known largefiles and standins
353 # ignore known largefiles and standins
374 if f in lfiles or fstandin in standins:
354 if f in lfiles or fstandin in standins:
375 continue
355 continue
376
356
377 actualfiles.append(f)
357 actualfiles.append(f)
378 match._files = actualfiles
358 match._files = actualfiles
379
359
380 def matchfn(f):
360 def matchfn(f):
381 if origmatchfn(f):
361 if origmatchfn(f):
382 return f not in lfiles
362 return f not in lfiles
383 else:
363 else:
384 return f in standins
364 return f in standins
385
365
386 match.matchfn = matchfn
366 match.matchfn = matchfn
387 result = orig(text=text, user=user, date=date, match=match,
367 result = orig(text=text, user=user, date=date, match=match,
388 force=force, editor=editor, extra=extra)
368 force=force, editor=editor, extra=extra)
389 # This needs to be after commit; otherwise precommit hooks
390 # get the wrong status
391 lfdirstate.write()
392 return result
369 return result
393 finally:
370 finally:
394 wlock.release()
371 wlock.release()
395
372
396 def push(self, remote, force=False, revs=None, newbranch=False):
373 def push(self, remote, force=False, revs=None, newbranch=False):
397 if remote.local():
374 if remote.local():
398 missing = set(self.requirements) - remote.local().supported
375 missing = set(self.requirements) - remote.local().supported
399 if missing:
376 if missing:
400 msg = _("required features are not"
377 msg = _("required features are not"
401 " supported in the destination:"
378 " supported in the destination:"
402 " %s") % (', '.join(sorted(missing)))
379 " %s") % (', '.join(sorted(missing)))
403 raise util.Abort(msg)
380 raise util.Abort(msg)
404 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
381 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
405 newbranch=newbranch)
382 newbranch=newbranch)
406
383
407 def _subdirlfs(self, files, lfiles):
384 def _subdirlfs(self, files, lfiles):
408 '''
385 '''
409 Adjust matched file list
386 Adjust matched file list
410 If we pass a directory to commit whose only commitable files
387 If we pass a directory to commit whose only commitable files
411 are largefiles, the core commit code aborts before finding
388 are largefiles, the core commit code aborts before finding
412 the largefiles.
389 the largefiles.
413 So we do the following:
390 So we do the following:
414 For directories that only have largefiles as matches,
391 For directories that only have largefiles as matches,
415 we explicitly add the largefiles to the match list and remove
392 we explicitly add the largefiles to the match list and remove
416 the directory.
393 the directory.
417 In other cases, we leave the match list unmodified.
394 In other cases, we leave the match list unmodified.
418 '''
395 '''
419 actualfiles = []
396 actualfiles = []
420 dirs = []
397 dirs = []
421 regulars = []
398 regulars = []
422
399
423 for f in files:
400 for f in files:
424 if lfutil.isstandin(f + '/'):
401 if lfutil.isstandin(f + '/'):
425 raise util.Abort(
402 raise util.Abort(
426 _('file "%s" is a largefile standin') % f,
403 _('file "%s" is a largefile standin') % f,
427 hint=('commit the largefile itself instead'))
404 hint=('commit the largefile itself instead'))
428 # Scan directories
405 # Scan directories
429 if os.path.isdir(self.wjoin(f)):
406 if os.path.isdir(self.wjoin(f)):
430 dirs.append(f)
407 dirs.append(f)
431 else:
408 else:
432 regulars.append(f)
409 regulars.append(f)
433
410
434 for f in dirs:
411 for f in dirs:
435 matcheddir = False
412 matcheddir = False
436 d = self.dirstate.normalize(f) + '/'
413 d = self.dirstate.normalize(f) + '/'
437 # Check for matched normal files
414 # Check for matched normal files
438 for mf in regulars:
415 for mf in regulars:
439 if self.dirstate.normalize(mf).startswith(d):
416 if self.dirstate.normalize(mf).startswith(d):
440 actualfiles.append(f)
417 actualfiles.append(f)
441 matcheddir = True
418 matcheddir = True
442 break
419 break
443 if not matcheddir:
420 if not matcheddir:
444 # If no normal match, manually append
421 # If no normal match, manually append
445 # any matching largefiles
422 # any matching largefiles
446 for lf in lfiles:
423 for lf in lfiles:
447 if self.dirstate.normalize(lf).startswith(d):
424 if self.dirstate.normalize(lf).startswith(d):
448 actualfiles.append(lf)
425 actualfiles.append(lf)
449 if not matcheddir:
426 if not matcheddir:
450 actualfiles.append(lfutil.standin(f))
427 actualfiles.append(lfutil.standin(f))
451 matcheddir = True
428 matcheddir = True
452 # Nothing in dir, so readd it
429 # Nothing in dir, so readd it
453 # and let commit reject it
430 # and let commit reject it
454 if not matcheddir:
431 if not matcheddir:
455 actualfiles.append(f)
432 actualfiles.append(f)
456
433
457 # Always add normal files
434 # Always add normal files
458 actualfiles += regulars
435 actualfiles += regulars
459 return actualfiles
436 return actualfiles
460
437
461 repo.__class__ = lfilesrepo
438 repo.__class__ = lfilesrepo
462
439
463 def prepushoutgoinghook(local, remote, outgoing):
440 def prepushoutgoinghook(local, remote, outgoing):
464 if outgoing.missing:
441 if outgoing.missing:
465 toupload = set()
442 toupload = set()
466 addfunc = lambda fn, lfhash: toupload.add(lfhash)
443 addfunc = lambda fn, lfhash: toupload.add(lfhash)
467 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
444 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
468 lfcommands.uploadlfiles(ui, local, remote, toupload)
445 lfcommands.uploadlfiles(ui, local, remote, toupload)
469 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
446 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
470
447
471 def checkrequireslfiles(ui, repo, **kwargs):
448 def checkrequireslfiles(ui, repo, **kwargs):
472 if 'largefiles' not in repo.requirements and util.any(
449 if 'largefiles' not in repo.requirements and util.any(
473 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
450 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
474 repo.requirements.add('largefiles')
451 repo.requirements.add('largefiles')
475 repo._writerequirements()
452 repo._writerequirements()
476
453
477 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
454 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
478 'largefiles')
455 'largefiles')
479 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
456 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now