##// END OF EJS Templates
largefiles: factor out procedures to update standins for pre-committing...
FUJIWARA Katsunori -
r23185:9870173e default
parent child Browse files
Show More
@@ -1,429 +1,539 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import copy
15
16
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
18 from mercurial.i18n import _
18 from mercurial import node
19 from mercurial import node
19
20
20 shortname = '.hglf'
21 shortname = '.hglf'
21 shortnameslash = shortname + '/'
22 shortnameslash = shortname + '/'
22 longname = 'largefiles'
23 longname = 'largefiles'
23
24
24
25
25 # -- Private worker functions ------------------------------------------
26 # -- Private worker functions ------------------------------------------
26
27
27 def getminsize(ui, assumelfiles, opt, default=10):
28 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
29 lfsize = opt
29 if not lfsize and assumelfiles:
30 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
31 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
32 if lfsize:
32 try:
33 try:
33 lfsize = float(lfsize)
34 lfsize = float(lfsize)
34 except ValueError:
35 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
37 % lfsize)
37 if lfsize is None:
38 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
40 return lfsize
40
41
41 def link(src, dest):
42 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
43 util.makedirs(os.path.dirname(dest))
43 try:
44 try:
44 util.oslink(src, dest)
45 util.oslink(src, dest)
45 except OSError:
46 except OSError:
46 # if hardlinks fail, fallback on atomic copy
47 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
48 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
50 dst.write(chunk)
50 dst.close()
51 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
52 os.chmod(dest, os.stat(src).st_mode)
52
53
53 def usercachepath(ui, hash):
54 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
55 path = ui.configpath(longname, 'usercache', None)
55 if path:
56 if path:
56 path = os.path.join(path, hash)
57 path = os.path.join(path, hash)
57 else:
58 else:
58 if os.name == 'nt':
59 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
61 if appdata:
61 path = os.path.join(appdata, longname, hash)
62 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
63 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
64 home = os.getenv('HOME')
64 if home:
65 if home:
65 path = os.path.join(home, 'Library', 'Caches',
66 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
67 longname, hash)
67 elif os.name == 'posix':
68 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
69 path = os.getenv('XDG_CACHE_HOME')
69 if path:
70 if path:
70 path = os.path.join(path, longname, hash)
71 path = os.path.join(path, longname, hash)
71 else:
72 else:
72 home = os.getenv('HOME')
73 home = os.getenv('HOME')
73 if home:
74 if home:
74 path = os.path.join(home, '.cache', longname, hash)
75 path = os.path.join(home, '.cache', longname, hash)
75 else:
76 else:
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
78 return path
78
79
79 def inusercache(ui, hash):
80 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
81 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
82 return path and os.path.exists(path)
82
83
83 def findfile(repo, hash):
84 def findfile(repo, hash):
84 if instore(repo, hash):
85 if instore(repo, hash):
85 repo.ui.note(_('found %s in store\n') % hash)
86 repo.ui.note(_('found %s in store\n') % hash)
86 return storepath(repo, hash)
87 return storepath(repo, hash)
87 elif inusercache(repo.ui, hash):
88 elif inusercache(repo.ui, hash):
88 repo.ui.note(_('found %s in system cache\n') % hash)
89 repo.ui.note(_('found %s in system cache\n') % hash)
89 path = storepath(repo, hash)
90 path = storepath(repo, hash)
90 link(usercachepath(repo.ui, hash), path)
91 link(usercachepath(repo.ui, hash), path)
91 return path
92 return path
92 return None
93 return None
93
94
94 class largefilesdirstate(dirstate.dirstate):
95 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
96 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
98 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
100 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
102 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
103 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
104 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
106 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
108 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self, f):
110 def _ignore(self, f):
110 return False
111 return False
111
112
112 def openlfdirstate(ui, repo, create=True):
113 def openlfdirstate(ui, repo, create=True):
113 '''
114 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
117 '''
117 lfstoredir = repo.join(longname)
118 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
119 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
121 repo.dirstate._validate)
121
122
122 # If the largefiles dirstate does not exist, populate and create
123 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
124 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
125 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
127 standins = repo.dirstate.walk(matcher, [], False, False)
128 standins = repo.dirstate.walk(matcher, [], False, False)
128
129
129 if len(standins) > 0:
130 if len(standins) > 0:
130 util.makedirs(lfstoredir)
131 util.makedirs(lfstoredir)
131
132
132 for standin in standins:
133 for standin in standins:
133 lfile = splitstandin(standin)
134 lfile = splitstandin(standin)
134 lfdirstate.normallookup(lfile)
135 lfdirstate.normallookup(lfile)
135 return lfdirstate
136 return lfdirstate
136
137
137 def lfdirstatestatus(lfdirstate, repo):
138 def lfdirstatestatus(lfdirstate, repo):
138 wctx = repo['.']
139 wctx = repo['.']
139 match = match_.always(repo.root, repo.getcwd())
140 match = match_.always(repo.root, repo.getcwd())
140 unsure, s = lfdirstate.status(match, [], False, False, False)
141 unsure, s = lfdirstate.status(match, [], False, False, False)
141 modified, clean = s.modified, s.clean
142 modified, clean = s.modified, s.clean
142 for lfile in unsure:
143 for lfile in unsure:
143 try:
144 try:
144 fctx = wctx[standin(lfile)]
145 fctx = wctx[standin(lfile)]
145 except LookupError:
146 except LookupError:
146 fctx = None
147 fctx = None
147 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
148 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
148 modified.append(lfile)
149 modified.append(lfile)
149 else:
150 else:
150 clean.append(lfile)
151 clean.append(lfile)
151 lfdirstate.normal(lfile)
152 lfdirstate.normal(lfile)
152 return s
153 return s
153
154
154 def listlfiles(repo, rev=None, matcher=None):
155 def listlfiles(repo, rev=None, matcher=None):
155 '''return a list of largefiles in the working copy or the
156 '''return a list of largefiles in the working copy or the
156 specified changeset'''
157 specified changeset'''
157
158
158 if matcher is None:
159 if matcher is None:
159 matcher = getstandinmatcher(repo)
160 matcher = getstandinmatcher(repo)
160
161
161 # ignore unknown files in working directory
162 # ignore unknown files in working directory
162 return [splitstandin(f)
163 return [splitstandin(f)
163 for f in repo[rev].walk(matcher)
164 for f in repo[rev].walk(matcher)
164 if rev is not None or repo.dirstate[f] != '?']
165 if rev is not None or repo.dirstate[f] != '?']
165
166
166 def instore(repo, hash):
167 def instore(repo, hash):
167 return os.path.exists(storepath(repo, hash))
168 return os.path.exists(storepath(repo, hash))
168
169
169 def storepath(repo, hash):
170 def storepath(repo, hash):
170 return repo.join(os.path.join(longname, hash))
171 return repo.join(os.path.join(longname, hash))
171
172
172 def copyfromcache(repo, hash, filename):
173 def copyfromcache(repo, hash, filename):
173 '''Copy the specified largefile from the repo or system cache to
174 '''Copy the specified largefile from the repo or system cache to
174 filename in the repository. Return true on success or false if the
175 filename in the repository. Return true on success or false if the
175 file was not found in either cache (which should not happened:
176 file was not found in either cache (which should not happened:
176 this is meant to be called only after ensuring that the needed
177 this is meant to be called only after ensuring that the needed
177 largefile exists in the cache).'''
178 largefile exists in the cache).'''
178 path = findfile(repo, hash)
179 path = findfile(repo, hash)
179 if path is None:
180 if path is None:
180 return False
181 return False
181 util.makedirs(os.path.dirname(repo.wjoin(filename)))
182 util.makedirs(os.path.dirname(repo.wjoin(filename)))
182 # The write may fail before the file is fully written, but we
183 # The write may fail before the file is fully written, but we
183 # don't use atomic writes in the working copy.
184 # don't use atomic writes in the working copy.
184 shutil.copy(path, repo.wjoin(filename))
185 shutil.copy(path, repo.wjoin(filename))
185 return True
186 return True
186
187
187 def copytostore(repo, rev, file, uploaded=False):
188 def copytostore(repo, rev, file, uploaded=False):
188 hash = readstandin(repo, file, rev)
189 hash = readstandin(repo, file, rev)
189 if instore(repo, hash):
190 if instore(repo, hash):
190 return
191 return
191 copytostoreabsolute(repo, repo.wjoin(file), hash)
192 copytostoreabsolute(repo, repo.wjoin(file), hash)
192
193
193 def copyalltostore(repo, node):
194 def copyalltostore(repo, node):
194 '''Copy all largefiles in a given revision to the store'''
195 '''Copy all largefiles in a given revision to the store'''
195
196
196 ctx = repo[node]
197 ctx = repo[node]
197 for filename in ctx.files():
198 for filename in ctx.files():
198 if isstandin(filename) and filename in ctx.manifest():
199 if isstandin(filename) and filename in ctx.manifest():
199 realfile = splitstandin(filename)
200 realfile = splitstandin(filename)
200 copytostore(repo, ctx.node(), realfile)
201 copytostore(repo, ctx.node(), realfile)
201
202
202
203
203 def copytostoreabsolute(repo, file, hash):
204 def copytostoreabsolute(repo, file, hash):
204 if inusercache(repo.ui, hash):
205 if inusercache(repo.ui, hash):
205 link(usercachepath(repo.ui, hash), storepath(repo, hash))
206 link(usercachepath(repo.ui, hash), storepath(repo, hash))
206 elif not getattr(repo, "_isconverting", False):
207 elif not getattr(repo, "_isconverting", False):
207 util.makedirs(os.path.dirname(storepath(repo, hash)))
208 util.makedirs(os.path.dirname(storepath(repo, hash)))
208 dst = util.atomictempfile(storepath(repo, hash),
209 dst = util.atomictempfile(storepath(repo, hash),
209 createmode=repo.store.createmode)
210 createmode=repo.store.createmode)
210 for chunk in util.filechunkiter(open(file, 'rb')):
211 for chunk in util.filechunkiter(open(file, 'rb')):
211 dst.write(chunk)
212 dst.write(chunk)
212 dst.close()
213 dst.close()
213 linktousercache(repo, hash)
214 linktousercache(repo, hash)
214
215
215 def linktousercache(repo, hash):
216 def linktousercache(repo, hash):
216 path = usercachepath(repo.ui, hash)
217 path = usercachepath(repo.ui, hash)
217 if path:
218 if path:
218 link(storepath(repo, hash), path)
219 link(storepath(repo, hash), path)
219
220
220 def getstandinmatcher(repo, pats=[], opts={}):
221 def getstandinmatcher(repo, pats=[], opts={}):
221 '''Return a match object that applies pats to the standin directory'''
222 '''Return a match object that applies pats to the standin directory'''
222 standindir = repo.wjoin(shortname)
223 standindir = repo.wjoin(shortname)
223 if pats:
224 if pats:
224 pats = [os.path.join(standindir, pat) for pat in pats]
225 pats = [os.path.join(standindir, pat) for pat in pats]
225 else:
226 else:
226 # no patterns: relative to repo root
227 # no patterns: relative to repo root
227 pats = [standindir]
228 pats = [standindir]
228 # no warnings about missing files or directories
229 # no warnings about missing files or directories
229 match = scmutil.match(repo[None], pats, opts)
230 match = scmutil.match(repo[None], pats, opts)
230 match.bad = lambda f, msg: None
231 match.bad = lambda f, msg: None
231 return match
232 return match
232
233
233 def composestandinmatcher(repo, rmatcher):
234 def composestandinmatcher(repo, rmatcher):
234 '''Return a matcher that accepts standins corresponding to the
235 '''Return a matcher that accepts standins corresponding to the
235 files accepted by rmatcher. Pass the list of files in the matcher
236 files accepted by rmatcher. Pass the list of files in the matcher
236 as the paths specified by the user.'''
237 as the paths specified by the user.'''
237 smatcher = getstandinmatcher(repo, rmatcher.files())
238 smatcher = getstandinmatcher(repo, rmatcher.files())
238 isstandin = smatcher.matchfn
239 isstandin = smatcher.matchfn
239 def composedmatchfn(f):
240 def composedmatchfn(f):
240 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
241 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
241 smatcher.matchfn = composedmatchfn
242 smatcher.matchfn = composedmatchfn
242
243
243 return smatcher
244 return smatcher
244
245
245 def standin(filename):
246 def standin(filename):
246 '''Return the repo-relative path to the standin for the specified big
247 '''Return the repo-relative path to the standin for the specified big
247 file.'''
248 file.'''
248 # Notes:
249 # Notes:
249 # 1) Some callers want an absolute path, but for instance addlargefiles
250 # 1) Some callers want an absolute path, but for instance addlargefiles
250 # needs it repo-relative so it can be passed to repo[None].add(). So
251 # needs it repo-relative so it can be passed to repo[None].add(). So
251 # leave it up to the caller to use repo.wjoin() to get an absolute path.
252 # leave it up to the caller to use repo.wjoin() to get an absolute path.
252 # 2) Join with '/' because that's what dirstate always uses, even on
253 # 2) Join with '/' because that's what dirstate always uses, even on
253 # Windows. Change existing separator to '/' first in case we are
254 # Windows. Change existing separator to '/' first in case we are
254 # passed filenames from an external source (like the command line).
255 # passed filenames from an external source (like the command line).
255 return shortnameslash + util.pconvert(filename)
256 return shortnameslash + util.pconvert(filename)
256
257
257 def isstandin(filename):
258 def isstandin(filename):
258 '''Return true if filename is a big file standin. filename must be
259 '''Return true if filename is a big file standin. filename must be
259 in Mercurial's internal form (slash-separated).'''
260 in Mercurial's internal form (slash-separated).'''
260 return filename.startswith(shortnameslash)
261 return filename.startswith(shortnameslash)
261
262
262 def splitstandin(filename):
263 def splitstandin(filename):
263 # Split on / because that's what dirstate always uses, even on Windows.
264 # Split on / because that's what dirstate always uses, even on Windows.
264 # Change local separator to / first just in case we are passed filenames
265 # Change local separator to / first just in case we are passed filenames
265 # from an external source (like the command line).
266 # from an external source (like the command line).
266 bits = util.pconvert(filename).split('/', 1)
267 bits = util.pconvert(filename).split('/', 1)
267 if len(bits) == 2 and bits[0] == shortname:
268 if len(bits) == 2 and bits[0] == shortname:
268 return bits[1]
269 return bits[1]
269 else:
270 else:
270 return None
271 return None
271
272
272 def updatestandin(repo, standin):
273 def updatestandin(repo, standin):
273 file = repo.wjoin(splitstandin(standin))
274 file = repo.wjoin(splitstandin(standin))
274 if os.path.exists(file):
275 if os.path.exists(file):
275 hash = hashfile(file)
276 hash = hashfile(file)
276 executable = getexecutable(file)
277 executable = getexecutable(file)
277 writestandin(repo, standin, hash, executable)
278 writestandin(repo, standin, hash, executable)
278
279
279 def readstandin(repo, filename, node=None):
280 def readstandin(repo, filename, node=None):
280 '''read hex hash from standin for filename at given node, or working
281 '''read hex hash from standin for filename at given node, or working
281 directory if no node is given'''
282 directory if no node is given'''
282 return repo[node][standin(filename)].data().strip()
283 return repo[node][standin(filename)].data().strip()
283
284
284 def writestandin(repo, standin, hash, executable):
285 def writestandin(repo, standin, hash, executable):
285 '''write hash to <repo.root>/<standin>'''
286 '''write hash to <repo.root>/<standin>'''
286 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
287 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
287
288
288 def copyandhash(instream, outfile):
289 def copyandhash(instream, outfile):
289 '''Read bytes from instream (iterable) and write them to outfile,
290 '''Read bytes from instream (iterable) and write them to outfile,
290 computing the SHA-1 hash of the data along the way. Return the hash.'''
291 computing the SHA-1 hash of the data along the way. Return the hash.'''
291 hasher = util.sha1('')
292 hasher = util.sha1('')
292 for data in instream:
293 for data in instream:
293 hasher.update(data)
294 hasher.update(data)
294 outfile.write(data)
295 outfile.write(data)
295 return hasher.hexdigest()
296 return hasher.hexdigest()
296
297
297 def hashrepofile(repo, file):
298 def hashrepofile(repo, file):
298 return hashfile(repo.wjoin(file))
299 return hashfile(repo.wjoin(file))
299
300
300 def hashfile(file):
301 def hashfile(file):
301 if not os.path.exists(file):
302 if not os.path.exists(file):
302 return ''
303 return ''
303 hasher = util.sha1('')
304 hasher = util.sha1('')
304 fd = open(file, 'rb')
305 fd = open(file, 'rb')
305 for data in util.filechunkiter(fd, 128 * 1024):
306 for data in util.filechunkiter(fd, 128 * 1024):
306 hasher.update(data)
307 hasher.update(data)
307 fd.close()
308 fd.close()
308 return hasher.hexdigest()
309 return hasher.hexdigest()
309
310
310 def getexecutable(filename):
311 def getexecutable(filename):
311 mode = os.stat(filename).st_mode
312 mode = os.stat(filename).st_mode
312 return ((mode & stat.S_IXUSR) and
313 return ((mode & stat.S_IXUSR) and
313 (mode & stat.S_IXGRP) and
314 (mode & stat.S_IXGRP) and
314 (mode & stat.S_IXOTH))
315 (mode & stat.S_IXOTH))
315
316
316 def urljoin(first, second, *arg):
317 def urljoin(first, second, *arg):
317 def join(left, right):
318 def join(left, right):
318 if not left.endswith('/'):
319 if not left.endswith('/'):
319 left += '/'
320 left += '/'
320 if right.startswith('/'):
321 if right.startswith('/'):
321 right = right[1:]
322 right = right[1:]
322 return left + right
323 return left + right
323
324
324 url = join(first, second)
325 url = join(first, second)
325 for a in arg:
326 for a in arg:
326 url = join(url, a)
327 url = join(url, a)
327 return url
328 return url
328
329
329 def hexsha1(data):
330 def hexsha1(data):
330 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
331 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
331 object data"""
332 object data"""
332 h = util.sha1()
333 h = util.sha1()
333 for chunk in util.filechunkiter(data):
334 for chunk in util.filechunkiter(data):
334 h.update(chunk)
335 h.update(chunk)
335 return h.hexdigest()
336 return h.hexdigest()
336
337
337 def httpsendfile(ui, filename):
338 def httpsendfile(ui, filename):
338 return httpconnection.httpsendfile(ui, filename, 'rb')
339 return httpconnection.httpsendfile(ui, filename, 'rb')
339
340
340 def unixpath(path):
341 def unixpath(path):
341 '''Return a version of path normalized for use with the lfdirstate.'''
342 '''Return a version of path normalized for use with the lfdirstate.'''
342 return util.pconvert(os.path.normpath(path))
343 return util.pconvert(os.path.normpath(path))
343
344
344 def islfilesrepo(repo):
345 def islfilesrepo(repo):
345 if ('largefiles' in repo.requirements and
346 if ('largefiles' in repo.requirements and
346 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
347 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
347 return True
348 return True
348
349
349 return util.any(openlfdirstate(repo.ui, repo, False))
350 return util.any(openlfdirstate(repo.ui, repo, False))
350
351
351 class storeprotonotcapable(Exception):
352 class storeprotonotcapable(Exception):
352 def __init__(self, storetypes):
353 def __init__(self, storetypes):
353 self.storetypes = storetypes
354 self.storetypes = storetypes
354
355
355 def getstandinsstate(repo):
356 def getstandinsstate(repo):
356 standins = []
357 standins = []
357 matcher = getstandinmatcher(repo)
358 matcher = getstandinmatcher(repo)
358 for standin in repo.dirstate.walk(matcher, [], False, False):
359 for standin in repo.dirstate.walk(matcher, [], False, False):
359 lfile = splitstandin(standin)
360 lfile = splitstandin(standin)
360 try:
361 try:
361 hash = readstandin(repo, lfile)
362 hash = readstandin(repo, lfile)
362 except IOError:
363 except IOError:
363 hash = None
364 hash = None
364 standins.append((lfile, hash))
365 standins.append((lfile, hash))
365 return standins
366 return standins
366
367
367 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
368 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
368 lfstandin = standin(lfile)
369 lfstandin = standin(lfile)
369 if lfstandin in repo.dirstate:
370 if lfstandin in repo.dirstate:
370 stat = repo.dirstate._map[lfstandin]
371 stat = repo.dirstate._map[lfstandin]
371 state, mtime = stat[0], stat[3]
372 state, mtime = stat[0], stat[3]
372 else:
373 else:
373 state, mtime = '?', -1
374 state, mtime = '?', -1
374 if state == 'n':
375 if state == 'n':
375 if normallookup or mtime < 0:
376 if normallookup or mtime < 0:
376 # state 'n' doesn't ensure 'clean' in this case
377 # state 'n' doesn't ensure 'clean' in this case
377 lfdirstate.normallookup(lfile)
378 lfdirstate.normallookup(lfile)
378 else:
379 else:
379 lfdirstate.normal(lfile)
380 lfdirstate.normal(lfile)
380 elif state == 'm':
381 elif state == 'm':
381 lfdirstate.normallookup(lfile)
382 lfdirstate.normallookup(lfile)
382 elif state == 'r':
383 elif state == 'r':
383 lfdirstate.remove(lfile)
384 lfdirstate.remove(lfile)
384 elif state == 'a':
385 elif state == 'a':
385 lfdirstate.add(lfile)
386 lfdirstate.add(lfile)
386 elif state == '?':
387 elif state == '?':
387 lfdirstate.drop(lfile)
388 lfdirstate.drop(lfile)
388
389
389 def markcommitted(orig, ctx, node):
390 def markcommitted(orig, ctx, node):
390 repo = ctx._repo
391 repo = ctx._repo
391
392
392 orig(node)
393 orig(node)
393
394
394 lfdirstate = openlfdirstate(repo.ui, repo)
395 lfdirstate = openlfdirstate(repo.ui, repo)
395 for f in ctx.files():
396 for f in ctx.files():
396 if isstandin(f):
397 if isstandin(f):
397 lfile = splitstandin(f)
398 lfile = splitstandin(f)
398 synclfdirstate(repo, lfdirstate, lfile, False)
399 synclfdirstate(repo, lfdirstate, lfile, False)
399 lfdirstate.write()
400 lfdirstate.write()
400
401
401 def getlfilestoupdate(oldstandins, newstandins):
402 def getlfilestoupdate(oldstandins, newstandins):
402 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
403 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
403 filelist = []
404 filelist = []
404 for f in changedstandins:
405 for f in changedstandins:
405 if f[0] not in filelist:
406 if f[0] not in filelist:
406 filelist.append(f[0])
407 filelist.append(f[0])
407 return filelist
408 return filelist
408
409
409 def getlfilestoupload(repo, missing, addfunc):
410 def getlfilestoupload(repo, missing, addfunc):
410 for n in missing:
411 for n in missing:
411 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
412 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
412 ctx = repo[n]
413 ctx = repo[n]
413 files = set(ctx.files())
414 files = set(ctx.files())
414 if len(parents) == 2:
415 if len(parents) == 2:
415 mc = ctx.manifest()
416 mc = ctx.manifest()
416 mp1 = ctx.parents()[0].manifest()
417 mp1 = ctx.parents()[0].manifest()
417 mp2 = ctx.parents()[1].manifest()
418 mp2 = ctx.parents()[1].manifest()
418 for f in mp1:
419 for f in mp1:
419 if f not in mc:
420 if f not in mc:
420 files.add(f)
421 files.add(f)
421 for f in mp2:
422 for f in mp2:
422 if f not in mc:
423 if f not in mc:
423 files.add(f)
424 files.add(f)
424 for f in mc:
425 for f in mc:
425 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
426 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
426 files.add(f)
427 files.add(f)
427 for fn in files:
428 for fn in files:
428 if isstandin(fn) and fn in ctx:
429 if isstandin(fn) and fn in ctx:
429 addfunc(fn, ctx[fn].data().strip())
430 addfunc(fn, ctx[fn].data().strip())
431
432 def updatestandinsbymatch(repo, match):
433 '''Update standins in the working directory according to specified match
434
435 This returns (possibly modified) ``match`` object to be used for
436 subsequent commit process.
437 '''
438
439 ui = repo.ui
440
441 # Case 0: Automated committing
442 #
443 # While automated committing (like rebase, transplant
444 # and so on), this code path is used to avoid:
445 # (1) updating standins, because standins should
446 # be already updated at this point
447 # (2) aborting when standins are matched by "match",
448 # because automated committing may specify them directly
449 #
450 if getattr(repo, "_isrebasing", False) or \
451 getattr(repo, "_istransplanting", False):
452 return match
453
454 # Case 1: user calls commit with no specific files or
455 # include/exclude patterns: refresh and commit all files that
456 # are "dirty".
457 if match is None or match.always():
458 # Spend a bit of time here to get a list of files we know
459 # are modified so we can compare only against those.
460 # It can cost a lot of time (several seconds)
461 # otherwise to update all standins if the largefiles are
462 # large.
463 lfdirstate = openlfdirstate(ui, repo)
464 dirtymatch = match_.always(repo.root, repo.getcwd())
465 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
466 False)
467 modifiedfiles = unsure + s.modified + s.added + s.removed
468 lfiles = listlfiles(repo)
469 # this only loops through largefiles that exist (not
470 # removed/renamed)
471 for lfile in lfiles:
472 if lfile in modifiedfiles:
473 if os.path.exists(
474 repo.wjoin(standin(lfile))):
475 # this handles the case where a rebase is being
476 # performed and the working copy is not updated
477 # yet.
478 if os.path.exists(repo.wjoin(lfile)):
479 updatestandin(repo,
480 standin(lfile))
481
482 return match
483
484 lfiles = listlfiles(repo)
485 match._files = repo._subdirlfs(match.files(), lfiles)
486
487 # Case 2: user calls commit with specified patterns: refresh
488 # any matching big files.
489 smatcher = composestandinmatcher(repo, match)
490 standins = repo.dirstate.walk(smatcher, [], False, False)
491
492 # No matching big files: get out of the way and pass control to
493 # the usual commit() method.
494 if not standins:
495 return match
496
497 # Refresh all matching big files. It's possible that the
498 # commit will end up failing, in which case the big files will
499 # stay refreshed. No harm done: the user modified them and
500 # asked to commit them, so sooner or later we're going to
501 # refresh the standins. Might as well leave them refreshed.
502 lfdirstate = openlfdirstate(ui, repo)
503 for fstandin in standins:
504 lfile = splitstandin(fstandin)
505 if lfdirstate[lfile] != 'r':
506 updatestandin(repo, fstandin)
507
508 # Cook up a new matcher that only matches regular files or
509 # standins corresponding to the big files requested by the
510 # user. Have to modify _files to prevent commit() from
511 # complaining "not tracked" for big files.
512 match = copy.copy(match)
513 origmatchfn = match.matchfn
514
515 # Check both the list of largefiles and the list of
516 # standins because if a largefile was removed, it
517 # won't be in the list of largefiles at this point
518 match._files += sorted(standins)
519
520 actualfiles = []
521 for f in match._files:
522 fstandin = standin(f)
523
524 # ignore known largefiles and standins
525 if f in lfiles or fstandin in standins:
526 continue
527
528 actualfiles.append(f)
529 match._files = actualfiles
530
531 def matchfn(f):
532 if origmatchfn(f):
533 return f not in lfiles
534 else:
535 return f in standins
536
537 match.matchfn = matchfn
538
539 return match
@@ -1,456 +1,357 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import os
11 import os
12
12
13 from mercurial import error, manifest, match as match_, util
13 from mercurial import error, manifest, match as match_, util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import localrepo, scmutil
15 from mercurial import localrepo, scmutil
16
16
17 import lfcommands
17 import lfcommands
18 import lfutil
18 import lfutil
19
19
20 def reposetup(ui, repo):
20 def reposetup(ui, repo):
21 # wire repositories should be given new wireproto functions
21 # wire repositories should be given new wireproto functions
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 if not repo.local():
23 if not repo.local():
24 return
24 return
25
25
26 class lfilesrepo(repo.__class__):
26 class lfilesrepo(repo.__class__):
27 lfstatus = False
27 lfstatus = False
28 def status_nolfiles(self, *args, **kwargs):
28 def status_nolfiles(self, *args, **kwargs):
29 return super(lfilesrepo, self).status(*args, **kwargs)
29 return super(lfilesrepo, self).status(*args, **kwargs)
30
30
31 # When lfstatus is set, return a context that gives the names
31 # When lfstatus is set, return a context that gives the names
32 # of largefiles instead of their corresponding standins and
32 # of largefiles instead of their corresponding standins and
33 # identifies the largefiles as always binary, regardless of
33 # identifies the largefiles as always binary, regardless of
34 # their actual contents.
34 # their actual contents.
35 def __getitem__(self, changeid):
35 def __getitem__(self, changeid):
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 if self.lfstatus:
37 if self.lfstatus:
38 class lfilesmanifestdict(manifest.manifestdict):
38 class lfilesmanifestdict(manifest.manifestdict):
39 def __contains__(self, filename):
39 def __contains__(self, filename):
40 orig = super(lfilesmanifestdict, self).__contains__
40 orig = super(lfilesmanifestdict, self).__contains__
41 return orig(filename) or orig(lfutil.standin(filename))
41 return orig(filename) or orig(lfutil.standin(filename))
42 class lfilesctx(ctx.__class__):
42 class lfilesctx(ctx.__class__):
43 def files(self):
43 def files(self):
44 filenames = super(lfilesctx, self).files()
44 filenames = super(lfilesctx, self).files()
45 return [lfutil.splitstandin(f) or f for f in filenames]
45 return [lfutil.splitstandin(f) or f for f in filenames]
46 def manifest(self):
46 def manifest(self):
47 man1 = super(lfilesctx, self).manifest()
47 man1 = super(lfilesctx, self).manifest()
48 man1.__class__ = lfilesmanifestdict
48 man1.__class__ = lfilesmanifestdict
49 return man1
49 return man1
50 def filectx(self, path, fileid=None, filelog=None):
50 def filectx(self, path, fileid=None, filelog=None):
51 orig = super(lfilesctx, self).filectx
51 orig = super(lfilesctx, self).filectx
52 try:
52 try:
53 if filelog is not None:
53 if filelog is not None:
54 result = orig(path, fileid, filelog)
54 result = orig(path, fileid, filelog)
55 else:
55 else:
56 result = orig(path, fileid)
56 result = orig(path, fileid)
57 except error.LookupError:
57 except error.LookupError:
58 # Adding a null character will cause Mercurial to
58 # Adding a null character will cause Mercurial to
59 # identify this as a binary file.
59 # identify this as a binary file.
60 if filelog is not None:
60 if filelog is not None:
61 result = orig(lfutil.standin(path), fileid,
61 result = orig(lfutil.standin(path), fileid,
62 filelog)
62 filelog)
63 else:
63 else:
64 result = orig(lfutil.standin(path), fileid)
64 result = orig(lfutil.standin(path), fileid)
65 olddata = result.data
65 olddata = result.data
66 result.data = lambda: olddata() + '\0'
66 result.data = lambda: olddata() + '\0'
67 return result
67 return result
68 ctx.__class__ = lfilesctx
68 ctx.__class__ = lfilesctx
69 return ctx
69 return ctx
70
70
71 # Figure out the status of big files and insert them into the
71 # Figure out the status of big files and insert them into the
72 # appropriate list in the result. Also removes standin files
72 # appropriate list in the result. Also removes standin files
73 # from the listing. Revert to the original status if
73 # from the listing. Revert to the original status if
74 # self.lfstatus is False.
74 # self.lfstatus is False.
75 # XXX large file status is buggy when used on repo proxy.
75 # XXX large file status is buggy when used on repo proxy.
76 # XXX this needs to be investigated.
76 # XXX this needs to be investigated.
77 @localrepo.unfilteredmethod
77 @localrepo.unfilteredmethod
78 def status(self, node1='.', node2=None, match=None, ignored=False,
78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 clean=False, unknown=False, listsubrepos=False):
79 clean=False, unknown=False, listsubrepos=False):
80 listignored, listclean, listunknown = ignored, clean, unknown
80 listignored, listclean, listunknown = ignored, clean, unknown
81 orig = super(lfilesrepo, self).status
81 orig = super(lfilesrepo, self).status
82 if not self.lfstatus:
82 if not self.lfstatus:
83 return orig(node1, node2, match, listignored, listclean,
83 return orig(node1, node2, match, listignored, listclean,
84 listunknown, listsubrepos)
84 listunknown, listsubrepos)
85
85
86 # some calls in this function rely on the old version of status
86 # some calls in this function rely on the old version of status
87 self.lfstatus = False
87 self.lfstatus = False
88 ctx1 = self[node1]
88 ctx1 = self[node1]
89 ctx2 = self[node2]
89 ctx2 = self[node2]
90 working = ctx2.rev() is None
90 working = ctx2.rev() is None
91 parentworking = working and ctx1 == self['.']
91 parentworking = working and ctx1 == self['.']
92
92
93 if match is None:
93 if match is None:
94 match = match_.always(self.root, self.getcwd())
94 match = match_.always(self.root, self.getcwd())
95
95
96 wlock = None
96 wlock = None
97 try:
97 try:
98 try:
98 try:
99 # updating the dirstate is optional
99 # updating the dirstate is optional
100 # so we don't wait on the lock
100 # so we don't wait on the lock
101 wlock = self.wlock(False)
101 wlock = self.wlock(False)
102 except error.LockError:
102 except error.LockError:
103 pass
103 pass
104
104
105 # First check if paths or patterns were specified on the
105 # First check if paths or patterns were specified on the
106 # command line. If there were, and they don't match any
106 # command line. If there were, and they don't match any
107 # largefiles, we should just bail here and let super
107 # largefiles, we should just bail here and let super
108 # handle it -- thus gaining a big performance boost.
108 # handle it -- thus gaining a big performance boost.
109 lfdirstate = lfutil.openlfdirstate(ui, self)
109 lfdirstate = lfutil.openlfdirstate(ui, self)
110 if not match.always():
110 if not match.always():
111 for f in lfdirstate:
111 for f in lfdirstate:
112 if match(f):
112 if match(f):
113 break
113 break
114 else:
114 else:
115 return orig(node1, node2, match, listignored, listclean,
115 return orig(node1, node2, match, listignored, listclean,
116 listunknown, listsubrepos)
116 listunknown, listsubrepos)
117
117
118 # Create a copy of match that matches standins instead
118 # Create a copy of match that matches standins instead
119 # of largefiles.
119 # of largefiles.
120 def tostandins(files):
120 def tostandins(files):
121 if not working:
121 if not working:
122 return files
122 return files
123 newfiles = []
123 newfiles = []
124 dirstate = self.dirstate
124 dirstate = self.dirstate
125 for f in files:
125 for f in files:
126 sf = lfutil.standin(f)
126 sf = lfutil.standin(f)
127 if sf in dirstate:
127 if sf in dirstate:
128 newfiles.append(sf)
128 newfiles.append(sf)
129 elif sf in dirstate.dirs():
129 elif sf in dirstate.dirs():
130 # Directory entries could be regular or
130 # Directory entries could be regular or
131 # standin, check both
131 # standin, check both
132 newfiles.extend((f, sf))
132 newfiles.extend((f, sf))
133 else:
133 else:
134 newfiles.append(f)
134 newfiles.append(f)
135 return newfiles
135 return newfiles
136
136
137 m = copy.copy(match)
137 m = copy.copy(match)
138 m._files = tostandins(m._files)
138 m._files = tostandins(m._files)
139
139
140 result = orig(node1, node2, m, ignored, clean, unknown,
140 result = orig(node1, node2, m, ignored, clean, unknown,
141 listsubrepos)
141 listsubrepos)
142 if working:
142 if working:
143
143
144 def sfindirstate(f):
144 def sfindirstate(f):
145 sf = lfutil.standin(f)
145 sf = lfutil.standin(f)
146 dirstate = self.dirstate
146 dirstate = self.dirstate
147 return sf in dirstate or sf in dirstate.dirs()
147 return sf in dirstate or sf in dirstate.dirs()
148
148
149 match._files = [f for f in match._files
149 match._files = [f for f in match._files
150 if sfindirstate(f)]
150 if sfindirstate(f)]
151 # Don't waste time getting the ignored and unknown
151 # Don't waste time getting the ignored and unknown
152 # files from lfdirstate
152 # files from lfdirstate
153 unsure, s = lfdirstate.status(match, [], False, listclean,
153 unsure, s = lfdirstate.status(match, [], False, listclean,
154 False)
154 False)
155 (modified, added, removed, clean) = (s.modified, s.added,
155 (modified, added, removed, clean) = (s.modified, s.added,
156 s.removed, s.clean)
156 s.removed, s.clean)
157 if parentworking:
157 if parentworking:
158 for lfile in unsure:
158 for lfile in unsure:
159 standin = lfutil.standin(lfile)
159 standin = lfutil.standin(lfile)
160 if standin not in ctx1:
160 if standin not in ctx1:
161 # from second parent
161 # from second parent
162 modified.append(lfile)
162 modified.append(lfile)
163 elif ctx1[standin].data().strip() \
163 elif ctx1[standin].data().strip() \
164 != lfutil.hashfile(self.wjoin(lfile)):
164 != lfutil.hashfile(self.wjoin(lfile)):
165 modified.append(lfile)
165 modified.append(lfile)
166 else:
166 else:
167 if listclean:
167 if listclean:
168 clean.append(lfile)
168 clean.append(lfile)
169 lfdirstate.normal(lfile)
169 lfdirstate.normal(lfile)
170 else:
170 else:
171 tocheck = unsure + modified + added + clean
171 tocheck = unsure + modified + added + clean
172 modified, added, clean = [], [], []
172 modified, added, clean = [], [], []
173
173
174 for lfile in tocheck:
174 for lfile in tocheck:
175 standin = lfutil.standin(lfile)
175 standin = lfutil.standin(lfile)
176 if standin in ctx1:
176 if standin in ctx1:
177 abslfile = self.wjoin(lfile)
177 abslfile = self.wjoin(lfile)
178 if ((ctx1[standin].data().strip() !=
178 if ((ctx1[standin].data().strip() !=
179 lfutil.hashfile(abslfile)) or
179 lfutil.hashfile(abslfile)) or
180 (('x' in ctx1.flags(standin)) !=
180 (('x' in ctx1.flags(standin)) !=
181 bool(lfutil.getexecutable(abslfile)))):
181 bool(lfutil.getexecutable(abslfile)))):
182 modified.append(lfile)
182 modified.append(lfile)
183 elif listclean:
183 elif listclean:
184 clean.append(lfile)
184 clean.append(lfile)
185 else:
185 else:
186 added.append(lfile)
186 added.append(lfile)
187
187
188 # at this point, 'removed' contains largefiles
188 # at this point, 'removed' contains largefiles
189 # marked as 'R' in the working context.
189 # marked as 'R' in the working context.
190 # then, largefiles not managed also in the target
190 # then, largefiles not managed also in the target
191 # context should be excluded from 'removed'.
191 # context should be excluded from 'removed'.
192 removed = [lfile for lfile in removed
192 removed = [lfile for lfile in removed
193 if lfutil.standin(lfile) in ctx1]
193 if lfutil.standin(lfile) in ctx1]
194
194
195 # Standins no longer found in lfdirstate has been
195 # Standins no longer found in lfdirstate has been
196 # removed
196 # removed
197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
198 lfile = lfutil.splitstandin(standin)
198 lfile = lfutil.splitstandin(standin)
199 if not match(lfile):
199 if not match(lfile):
200 continue
200 continue
201 if lfile not in lfdirstate:
201 if lfile not in lfdirstate:
202 removed.append(lfile)
202 removed.append(lfile)
203
203
204 # Filter result lists
204 # Filter result lists
205 result = list(result)
205 result = list(result)
206
206
207 # Largefiles are not really removed when they're
207 # Largefiles are not really removed when they're
208 # still in the normal dirstate. Likewise, normal
208 # still in the normal dirstate. Likewise, normal
209 # files are not really removed if they are still in
209 # files are not really removed if they are still in
210 # lfdirstate. This happens in merges where files
210 # lfdirstate. This happens in merges where files
211 # change type.
211 # change type.
212 removed = [f for f in removed
212 removed = [f for f in removed
213 if f not in self.dirstate]
213 if f not in self.dirstate]
214 result[2] = [f for f in result[2]
214 result[2] = [f for f in result[2]
215 if f not in lfdirstate]
215 if f not in lfdirstate]
216
216
217 lfiles = set(lfdirstate._map)
217 lfiles = set(lfdirstate._map)
218 # Unknown files
218 # Unknown files
219 result[4] = set(result[4]).difference(lfiles)
219 result[4] = set(result[4]).difference(lfiles)
220 # Ignored files
220 # Ignored files
221 result[5] = set(result[5]).difference(lfiles)
221 result[5] = set(result[5]).difference(lfiles)
222 # combine normal files and largefiles
222 # combine normal files and largefiles
223 normals = [[fn for fn in filelist
223 normals = [[fn for fn in filelist
224 if not lfutil.isstandin(fn)]
224 if not lfutil.isstandin(fn)]
225 for filelist in result]
225 for filelist in result]
226 lfstatus = (modified, added, removed, s.deleted, [], [],
226 lfstatus = (modified, added, removed, s.deleted, [], [],
227 clean)
227 clean)
228 result = [sorted(list1 + list2)
228 result = [sorted(list1 + list2)
229 for (list1, list2) in zip(normals, lfstatus)]
229 for (list1, list2) in zip(normals, lfstatus)]
230 else: # not against working directory
230 else: # not against working directory
231 result = [[lfutil.splitstandin(f) or f for f in items]
231 result = [[lfutil.splitstandin(f) or f for f in items]
232 for items in result]
232 for items in result]
233
233
234 if wlock:
234 if wlock:
235 lfdirstate.write()
235 lfdirstate.write()
236
236
237 finally:
237 finally:
238 if wlock:
238 if wlock:
239 wlock.release()
239 wlock.release()
240
240
241 self.lfstatus = True
241 self.lfstatus = True
242 return scmutil.status(*result)
242 return scmutil.status(*result)
243
243
244 # As part of committing, copy all of the largefiles into the
244 # As part of committing, copy all of the largefiles into the
245 # cache.
245 # cache.
246 def commitctx(self, ctx, *args, **kwargs):
246 def commitctx(self, ctx, *args, **kwargs):
247 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
247 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
248 lfutil.copyalltostore(self, node)
248 lfutil.copyalltostore(self, node)
249 class lfilesctx(ctx.__class__):
249 class lfilesctx(ctx.__class__):
250 def markcommitted(self, node):
250 def markcommitted(self, node):
251 orig = super(lfilesctx, self).markcommitted
251 orig = super(lfilesctx, self).markcommitted
252 return lfutil.markcommitted(orig, self, node)
252 return lfutil.markcommitted(orig, self, node)
253 ctx.__class__ = lfilesctx
253 ctx.__class__ = lfilesctx
254 return node
254 return node
255
255
256 # Before commit, largefile standins have not had their
256 # Before commit, largefile standins have not had their
257 # contents updated to reflect the hash of their largefile.
257 # contents updated to reflect the hash of their largefile.
258 # Do that here.
258 # Do that here.
259 def commit(self, text="", user=None, date=None, match=None,
259 def commit(self, text="", user=None, date=None, match=None,
260 force=False, editor=False, extra={}):
260 force=False, editor=False, extra={}):
261 orig = super(lfilesrepo, self).commit
261 orig = super(lfilesrepo, self).commit
262
262
263 wlock = self.wlock()
263 wlock = self.wlock()
264 try:
264 try:
265 # Case 0: Automated committing
265 match = lfutil.updatestandinsbymatch(self, match)
266 #
267 # While automated committing (like rebase, transplant
268 # and so on), this code path is used to avoid:
269 # (1) updating standins, because standins should
270 # be already updated at this point
271 # (2) aborting when standins are matched by "match",
272 # because automated committing may specify them directly
273 #
274 if getattr(self, "_isrebasing", False) or \
275 getattr(self, "_istransplanting", False):
276 result = orig(text=text, user=user, date=date, match=match,
277 force=force, editor=editor, extra=extra)
278 return result
279 # Case 1: user calls commit with no specific files or
280 # include/exclude patterns: refresh and commit all files that
281 # are "dirty".
282 if match is None or match.always():
283 # Spend a bit of time here to get a list of files we know
284 # are modified so we can compare only against those.
285 # It can cost a lot of time (several seconds)
286 # otherwise to update all standins if the largefiles are
287 # large.
288 lfdirstate = lfutil.openlfdirstate(ui, self)
289 dirtymatch = match_.always(self.root, self.getcwd())
290 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
291 False)
292 modifiedfiles = unsure + s.modified + s.added + s.removed
293 lfiles = lfutil.listlfiles(self)
294 # this only loops through largefiles that exist (not
295 # removed/renamed)
296 for lfile in lfiles:
297 if lfile in modifiedfiles:
298 if os.path.exists(
299 self.wjoin(lfutil.standin(lfile))):
300 # this handles the case where a rebase is being
301 # performed and the working copy is not updated
302 # yet.
303 if os.path.exists(self.wjoin(lfile)):
304 lfutil.updatestandin(self,
305 lfutil.standin(lfile))
306
307 result = orig(text=text, user=user, date=date, match=match,
308 force=force, editor=editor, extra=extra)
309
310 return result
311
312 lfiles = lfutil.listlfiles(self)
313 match._files = self._subdirlfs(match.files(), lfiles)
314
315 # Case 2: user calls commit with specified patterns: refresh
316 # any matching big files.
317 smatcher = lfutil.composestandinmatcher(self, match)
318 standins = self.dirstate.walk(smatcher, [], False, False)
319
320 # No matching big files: get out of the way and pass control to
321 # the usual commit() method.
322 if not standins:
323 return orig(text=text, user=user, date=date, match=match,
324 force=force, editor=editor, extra=extra)
325
326 # Refresh all matching big files. It's possible that the
327 # commit will end up failing, in which case the big files will
328 # stay refreshed. No harm done: the user modified them and
329 # asked to commit them, so sooner or later we're going to
330 # refresh the standins. Might as well leave them refreshed.
331 lfdirstate = lfutil.openlfdirstate(ui, self)
332 for standin in standins:
333 lfile = lfutil.splitstandin(standin)
334 if lfdirstate[lfile] != 'r':
335 lfutil.updatestandin(self, standin)
336
337 # Cook up a new matcher that only matches regular files or
338 # standins corresponding to the big files requested by the
339 # user. Have to modify _files to prevent commit() from
340 # complaining "not tracked" for big files.
341 match = copy.copy(match)
342 origmatchfn = match.matchfn
343
344 # Check both the list of largefiles and the list of
345 # standins because if a largefile was removed, it
346 # won't be in the list of largefiles at this point
347 match._files += sorted(standins)
348
349 actualfiles = []
350 for f in match._files:
351 fstandin = lfutil.standin(f)
352
353 # ignore known largefiles and standins
354 if f in lfiles or fstandin in standins:
355 continue
356
357 actualfiles.append(f)
358 match._files = actualfiles
359
360 def matchfn(f):
361 if origmatchfn(f):
362 return f not in lfiles
363 else:
364 return f in standins
365
366 match.matchfn = matchfn
367 result = orig(text=text, user=user, date=date, match=match,
266 result = orig(text=text, user=user, date=date, match=match,
368 force=force, editor=editor, extra=extra)
267 force=force, editor=editor, extra=extra)
369 return result
268 return result
370 finally:
269 finally:
371 wlock.release()
270 wlock.release()
372
271
373 def push(self, remote, force=False, revs=None, newbranch=False):
272 def push(self, remote, force=False, revs=None, newbranch=False):
374 if remote.local():
273 if remote.local():
375 missing = set(self.requirements) - remote.local().supported
274 missing = set(self.requirements) - remote.local().supported
376 if missing:
275 if missing:
377 msg = _("required features are not"
276 msg = _("required features are not"
378 " supported in the destination:"
277 " supported in the destination:"
379 " %s") % (', '.join(sorted(missing)))
278 " %s") % (', '.join(sorted(missing)))
380 raise util.Abort(msg)
279 raise util.Abort(msg)
381 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
280 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
382 newbranch=newbranch)
281 newbranch=newbranch)
383
282
283 # TODO: _subdirlfs should be moved into "lfutil.py", because
284 # it is referred only from "lfutil.updatestandinsbymatch"
384 def _subdirlfs(self, files, lfiles):
285 def _subdirlfs(self, files, lfiles):
385 '''
286 '''
386 Adjust matched file list
287 Adjust matched file list
387 If we pass a directory to commit whose only commitable files
288 If we pass a directory to commit whose only commitable files
388 are largefiles, the core commit code aborts before finding
289 are largefiles, the core commit code aborts before finding
389 the largefiles.
290 the largefiles.
390 So we do the following:
291 So we do the following:
391 For directories that only have largefiles as matches,
292 For directories that only have largefiles as matches,
392 we explicitly add the largefiles to the match list and remove
293 we explicitly add the largefiles to the match list and remove
393 the directory.
294 the directory.
394 In other cases, we leave the match list unmodified.
295 In other cases, we leave the match list unmodified.
395 '''
296 '''
396 actualfiles = []
297 actualfiles = []
397 dirs = []
298 dirs = []
398 regulars = []
299 regulars = []
399
300
400 for f in files:
301 for f in files:
401 if lfutil.isstandin(f + '/'):
302 if lfutil.isstandin(f + '/'):
402 raise util.Abort(
303 raise util.Abort(
403 _('file "%s" is a largefile standin') % f,
304 _('file "%s" is a largefile standin') % f,
404 hint=('commit the largefile itself instead'))
305 hint=('commit the largefile itself instead'))
405 # Scan directories
306 # Scan directories
406 if os.path.isdir(self.wjoin(f)):
307 if os.path.isdir(self.wjoin(f)):
407 dirs.append(f)
308 dirs.append(f)
408 else:
309 else:
409 regulars.append(f)
310 regulars.append(f)
410
311
411 for f in dirs:
312 for f in dirs:
412 matcheddir = False
313 matcheddir = False
413 d = self.dirstate.normalize(f) + '/'
314 d = self.dirstate.normalize(f) + '/'
414 # Check for matched normal files
315 # Check for matched normal files
415 for mf in regulars:
316 for mf in regulars:
416 if self.dirstate.normalize(mf).startswith(d):
317 if self.dirstate.normalize(mf).startswith(d):
417 actualfiles.append(f)
318 actualfiles.append(f)
418 matcheddir = True
319 matcheddir = True
419 break
320 break
420 if not matcheddir:
321 if not matcheddir:
421 # If no normal match, manually append
322 # If no normal match, manually append
422 # any matching largefiles
323 # any matching largefiles
423 for lf in lfiles:
324 for lf in lfiles:
424 if self.dirstate.normalize(lf).startswith(d):
325 if self.dirstate.normalize(lf).startswith(d):
425 actualfiles.append(lf)
326 actualfiles.append(lf)
426 if not matcheddir:
327 if not matcheddir:
427 actualfiles.append(lfutil.standin(f))
328 actualfiles.append(lfutil.standin(f))
428 matcheddir = True
329 matcheddir = True
429 # Nothing in dir, so readd it
330 # Nothing in dir, so readd it
430 # and let commit reject it
331 # and let commit reject it
431 if not matcheddir:
332 if not matcheddir:
432 actualfiles.append(f)
333 actualfiles.append(f)
433
334
434 # Always add normal files
335 # Always add normal files
435 actualfiles += regulars
336 actualfiles += regulars
436 return actualfiles
337 return actualfiles
437
338
438 repo.__class__ = lfilesrepo
339 repo.__class__ = lfilesrepo
439
340
440 def prepushoutgoinghook(local, remote, outgoing):
341 def prepushoutgoinghook(local, remote, outgoing):
441 if outgoing.missing:
342 if outgoing.missing:
442 toupload = set()
343 toupload = set()
443 addfunc = lambda fn, lfhash: toupload.add(lfhash)
344 addfunc = lambda fn, lfhash: toupload.add(lfhash)
444 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
345 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
445 lfcommands.uploadlfiles(ui, local, remote, toupload)
346 lfcommands.uploadlfiles(ui, local, remote, toupload)
446 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
347 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
447
348
448 def checkrequireslfiles(ui, repo, **kwargs):
349 def checkrequireslfiles(ui, repo, **kwargs):
449 if 'largefiles' not in repo.requirements and util.any(
350 if 'largefiles' not in repo.requirements and util.any(
450 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
351 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
451 repo.requirements.add('largefiles')
352 repo.requirements.add('largefiles')
452 repo._writerequirements()
353 repo._writerequirements()
453
354
454 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
355 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
455 'largefiles')
356 'largefiles')
456 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
357 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now