##// END OF EJS Templates
largefiles: simplify lfdirstate ignore handling - it is only for tracking .hglf
Mads Kiilerich -
r18148:bf6252d1 default
parent child Browse files
Show More
@@ -1,457 +1,459 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repoadd(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def reporemove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
39 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
40 repo[None].forget(list)
40 repo[None].forget(list)
41 finally:
41 finally:
42 wlock.release()
42 wlock.release()
43 return remove(list, unlink=unlink)
43 return remove(list, unlink=unlink)
44
44
45 def repoforget(repo, list):
45 def repoforget(repo, list):
46 forget = repo[None].forget
46 forget = repo[None].forget
47 return forget(list)
47 return forget(list)
48
48
49 def findoutgoing(repo, remote, force):
49 def findoutgoing(repo, remote, force):
50 from mercurial import discovery
50 from mercurial import discovery
51 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
51 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
52 return outgoing.missing
52 return outgoing.missing
53
53
54 # -- Private worker functions ------------------------------------------
54 # -- Private worker functions ------------------------------------------
55
55
56 def getminsize(ui, assumelfiles, opt, default=10):
56 def getminsize(ui, assumelfiles, opt, default=10):
57 lfsize = opt
57 lfsize = opt
58 if not lfsize and assumelfiles:
58 if not lfsize and assumelfiles:
59 lfsize = ui.config(longname, 'minsize', default=default)
59 lfsize = ui.config(longname, 'minsize', default=default)
60 if lfsize:
60 if lfsize:
61 try:
61 try:
62 lfsize = float(lfsize)
62 lfsize = float(lfsize)
63 except ValueError:
63 except ValueError:
64 raise util.Abort(_('largefiles: size must be number (not %s)\n')
64 raise util.Abort(_('largefiles: size must be number (not %s)\n')
65 % lfsize)
65 % lfsize)
66 if lfsize is None:
66 if lfsize is None:
67 raise util.Abort(_('minimum size for largefiles must be specified'))
67 raise util.Abort(_('minimum size for largefiles must be specified'))
68 return lfsize
68 return lfsize
69
69
70 def link(src, dest):
70 def link(src, dest):
71 try:
71 try:
72 util.oslink(src, dest)
72 util.oslink(src, dest)
73 except OSError:
73 except OSError:
74 # if hardlinks fail, fallback on atomic copy
74 # if hardlinks fail, fallback on atomic copy
75 dst = util.atomictempfile(dest)
75 dst = util.atomictempfile(dest)
76 for chunk in util.filechunkiter(open(src, 'rb')):
76 for chunk in util.filechunkiter(open(src, 'rb')):
77 dst.write(chunk)
77 dst.write(chunk)
78 dst.close()
78 dst.close()
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81 def usercachepath(ui, hash):
81 def usercachepath(ui, hash):
82 path = ui.configpath(longname, 'usercache', None)
82 path = ui.configpath(longname, 'usercache', None)
83 if path:
83 if path:
84 path = os.path.join(path, hash)
84 path = os.path.join(path, hash)
85 else:
85 else:
86 if os.name == 'nt':
86 if os.name == 'nt':
87 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
87 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
88 if appdata:
88 if appdata:
89 path = os.path.join(appdata, longname, hash)
89 path = os.path.join(appdata, longname, hash)
90 elif platform.system() == 'Darwin':
90 elif platform.system() == 'Darwin':
91 home = os.getenv('HOME')
91 home = os.getenv('HOME')
92 if home:
92 if home:
93 path = os.path.join(home, 'Library', 'Caches',
93 path = os.path.join(home, 'Library', 'Caches',
94 longname, hash)
94 longname, hash)
95 elif os.name == 'posix':
95 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
97 if path:
98 path = os.path.join(path, longname, hash)
98 path = os.path.join(path, longname, hash)
99 else:
99 else:
100 home = os.getenv('HOME')
100 home = os.getenv('HOME')
101 if home:
101 if home:
102 path = os.path.join(home, '.cache', longname, hash)
102 path = os.path.join(home, '.cache', longname, hash)
103 else:
103 else:
104 raise util.Abort(_('unknown operating system: %s\n') % os.name)
104 raise util.Abort(_('unknown operating system: %s\n') % os.name)
105 return path
105 return path
106
106
107 def inusercache(ui, hash):
107 def inusercache(ui, hash):
108 path = usercachepath(ui, hash)
108 path = usercachepath(ui, hash)
109 return path and os.path.exists(path)
109 return path and os.path.exists(path)
110
110
111 def findfile(repo, hash):
111 def findfile(repo, hash):
112 if instore(repo, hash):
112 if instore(repo, hash):
113 repo.ui.note(_('found %s in store\n') % hash)
113 repo.ui.note(_('found %s in store\n') % hash)
114 return storepath(repo, hash)
114 return storepath(repo, hash)
115 elif inusercache(repo.ui, hash):
115 elif inusercache(repo.ui, hash):
116 repo.ui.note(_('found %s in system cache\n') % hash)
116 repo.ui.note(_('found %s in system cache\n') % hash)
117 path = storepath(repo, hash)
117 path = storepath(repo, hash)
118 util.makedirs(os.path.dirname(path))
118 util.makedirs(os.path.dirname(path))
119 link(usercachepath(repo.ui, hash), path)
119 link(usercachepath(repo.ui, hash), path)
120 return path
120 return path
121 return None
121 return None
122
122
123 class largefilesdirstate(dirstate.dirstate):
123 class largefilesdirstate(dirstate.dirstate):
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 return super(largefilesdirstate, self).__getitem__(unixpath(key))
125 return super(largefilesdirstate, self).__getitem__(unixpath(key))
126 def normal(self, f):
126 def normal(self, f):
127 return super(largefilesdirstate, self).normal(unixpath(f))
127 return super(largefilesdirstate, self).normal(unixpath(f))
128 def remove(self, f):
128 def remove(self, f):
129 return super(largefilesdirstate, self).remove(unixpath(f))
129 return super(largefilesdirstate, self).remove(unixpath(f))
130 def add(self, f):
130 def add(self, f):
131 return super(largefilesdirstate, self).add(unixpath(f))
131 return super(largefilesdirstate, self).add(unixpath(f))
132 def drop(self, f):
132 def drop(self, f):
133 return super(largefilesdirstate, self).drop(unixpath(f))
133 return super(largefilesdirstate, self).drop(unixpath(f))
134 def forget(self, f):
134 def forget(self, f):
135 return super(largefilesdirstate, self).forget(unixpath(f))
135 return super(largefilesdirstate, self).forget(unixpath(f))
136 def normallookup(self, f):
136 def normallookup(self, f):
137 return super(largefilesdirstate, self).normallookup(unixpath(f))
137 return super(largefilesdirstate, self).normallookup(unixpath(f))
138 def _ignore(self):
139 return False
138
140
139 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
140 '''
142 '''
141 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
142 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
143 '''
145 '''
144 lfstoredir = repo.join(longname)
146 lfstoredir = repo.join(longname)
145 opener = scmutil.opener(lfstoredir)
147 opener = scmutil.opener(lfstoredir)
146 lfdirstate = largefilesdirstate(opener, ui, repo.root,
148 lfdirstate = largefilesdirstate(opener, ui, repo.root,
147 repo.dirstate._validate)
149 repo.dirstate._validate)
148
150
149 # If the largefiles dirstate does not exist, populate and create
151 # If the largefiles dirstate does not exist, populate and create
150 # it. This ensures that we create it on the first meaningful
152 # it. This ensures that we create it on the first meaningful
151 # largefiles operation in a new clone.
153 # largefiles operation in a new clone.
152 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
154 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
153 util.makedirs(lfstoredir)
155 util.makedirs(lfstoredir)
154 matcher = getstandinmatcher(repo)
156 matcher = getstandinmatcher(repo)
155 for standin in dirstatewalk(repo.dirstate, matcher):
157 for standin in dirstatewalk(repo.dirstate, matcher):
156 lfile = splitstandin(standin)
158 lfile = splitstandin(standin)
157 hash = readstandin(repo, lfile)
159 hash = readstandin(repo, lfile)
158 lfdirstate.normallookup(lfile)
160 lfdirstate.normallookup(lfile)
159 try:
161 try:
160 if hash == hashfile(repo.wjoin(lfile)):
162 if hash == hashfile(repo.wjoin(lfile)):
161 lfdirstate.normal(lfile)
163 lfdirstate.normal(lfile)
162 except OSError, err:
164 except OSError, err:
163 if err.errno != errno.ENOENT:
165 if err.errno != errno.ENOENT:
164 raise
166 raise
165 return lfdirstate
167 return lfdirstate
166
168
167 def lfdirstatestatus(lfdirstate, repo, rev):
169 def lfdirstatestatus(lfdirstate, repo, rev):
168 match = match_.always(repo.root, repo.getcwd())
170 match = match_.always(repo.root, repo.getcwd())
169 s = lfdirstate.status(match, [], False, False, False)
171 s = lfdirstate.status(match, [], False, False, False)
170 unsure, modified, added, removed, missing, unknown, ignored, clean = s
172 unsure, modified, added, removed, missing, unknown, ignored, clean = s
171 for lfile in unsure:
173 for lfile in unsure:
172 if repo[rev][standin(lfile)].data().strip() != \
174 if repo[rev][standin(lfile)].data().strip() != \
173 hashfile(repo.wjoin(lfile)):
175 hashfile(repo.wjoin(lfile)):
174 modified.append(lfile)
176 modified.append(lfile)
175 else:
177 else:
176 clean.append(lfile)
178 clean.append(lfile)
177 lfdirstate.normal(lfile)
179 lfdirstate.normal(lfile)
178 return (modified, added, removed, missing, unknown, ignored, clean)
180 return (modified, added, removed, missing, unknown, ignored, clean)
179
181
180 def listlfiles(repo, rev=None, matcher=None):
182 def listlfiles(repo, rev=None, matcher=None):
181 '''return a list of largefiles in the working copy or the
183 '''return a list of largefiles in the working copy or the
182 specified changeset'''
184 specified changeset'''
183
185
184 if matcher is None:
186 if matcher is None:
185 matcher = getstandinmatcher(repo)
187 matcher = getstandinmatcher(repo)
186
188
187 # ignore unknown files in working directory
189 # ignore unknown files in working directory
188 return [splitstandin(f)
190 return [splitstandin(f)
189 for f in repo[rev].walk(matcher)
191 for f in repo[rev].walk(matcher)
190 if rev is not None or repo.dirstate[f] != '?']
192 if rev is not None or repo.dirstate[f] != '?']
191
193
192 def instore(repo, hash):
194 def instore(repo, hash):
193 return os.path.exists(storepath(repo, hash))
195 return os.path.exists(storepath(repo, hash))
194
196
195 def storepath(repo, hash):
197 def storepath(repo, hash):
196 return repo.join(os.path.join(longname, hash))
198 return repo.join(os.path.join(longname, hash))
197
199
198 def copyfromcache(repo, hash, filename):
200 def copyfromcache(repo, hash, filename):
199 '''Copy the specified largefile from the repo or system cache to
201 '''Copy the specified largefile from the repo or system cache to
200 filename in the repository. Return true on success or false if the
202 filename in the repository. Return true on success or false if the
201 file was not found in either cache (which should not happened:
203 file was not found in either cache (which should not happened:
202 this is meant to be called only after ensuring that the needed
204 this is meant to be called only after ensuring that the needed
203 largefile exists in the cache).'''
205 largefile exists in the cache).'''
204 path = findfile(repo, hash)
206 path = findfile(repo, hash)
205 if path is None:
207 if path is None:
206 return False
208 return False
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
209 util.makedirs(os.path.dirname(repo.wjoin(filename)))
208 # The write may fail before the file is fully written, but we
210 # The write may fail before the file is fully written, but we
209 # don't use atomic writes in the working copy.
211 # don't use atomic writes in the working copy.
210 shutil.copy(path, repo.wjoin(filename))
212 shutil.copy(path, repo.wjoin(filename))
211 return True
213 return True
212
214
213 def copytostore(repo, rev, file, uploaded=False):
215 def copytostore(repo, rev, file, uploaded=False):
214 hash = readstandin(repo, file, rev)
216 hash = readstandin(repo, file, rev)
215 if instore(repo, hash):
217 if instore(repo, hash):
216 return
218 return
217 copytostoreabsolute(repo, repo.wjoin(file), hash)
219 copytostoreabsolute(repo, repo.wjoin(file), hash)
218
220
219 def copyalltostore(repo, node):
221 def copyalltostore(repo, node):
220 '''Copy all largefiles in a given revision to the store'''
222 '''Copy all largefiles in a given revision to the store'''
221
223
222 ctx = repo[node]
224 ctx = repo[node]
223 for filename in ctx.files():
225 for filename in ctx.files():
224 if isstandin(filename) and filename in ctx.manifest():
226 if isstandin(filename) and filename in ctx.manifest():
225 realfile = splitstandin(filename)
227 realfile = splitstandin(filename)
226 copytostore(repo, ctx.node(), realfile)
228 copytostore(repo, ctx.node(), realfile)
227
229
228
230
229 def copytostoreabsolute(repo, file, hash):
231 def copytostoreabsolute(repo, file, hash):
230 util.makedirs(os.path.dirname(storepath(repo, hash)))
232 util.makedirs(os.path.dirname(storepath(repo, hash)))
231 if inusercache(repo.ui, hash):
233 if inusercache(repo.ui, hash):
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
234 link(usercachepath(repo.ui, hash), storepath(repo, hash))
233 elif not getattr(repo, "_isconverting", False):
235 elif not getattr(repo, "_isconverting", False):
234 dst = util.atomictempfile(storepath(repo, hash),
236 dst = util.atomictempfile(storepath(repo, hash),
235 createmode=repo.store.createmode)
237 createmode=repo.store.createmode)
236 for chunk in util.filechunkiter(open(file, 'rb')):
238 for chunk in util.filechunkiter(open(file, 'rb')):
237 dst.write(chunk)
239 dst.write(chunk)
238 dst.close()
240 dst.close()
239 linktousercache(repo, hash)
241 linktousercache(repo, hash)
240
242
241 def linktousercache(repo, hash):
243 def linktousercache(repo, hash):
242 path = usercachepath(repo.ui, hash)
244 path = usercachepath(repo.ui, hash)
243 if path:
245 if path:
244 util.makedirs(os.path.dirname(path))
246 util.makedirs(os.path.dirname(path))
245 link(storepath(repo, hash), path)
247 link(storepath(repo, hash), path)
246
248
247 def getstandinmatcher(repo, pats=[], opts={}):
249 def getstandinmatcher(repo, pats=[], opts={}):
248 '''Return a match object that applies pats to the standin directory'''
250 '''Return a match object that applies pats to the standin directory'''
249 standindir = repo.pathto(shortname)
251 standindir = repo.pathto(shortname)
250 if pats:
252 if pats:
251 # patterns supplied: search standin directory relative to current dir
253 # patterns supplied: search standin directory relative to current dir
252 cwd = repo.getcwd()
254 cwd = repo.getcwd()
253 if os.path.isabs(cwd):
255 if os.path.isabs(cwd):
254 # cwd is an absolute path for hg -R <reponame>
256 # cwd is an absolute path for hg -R <reponame>
255 # work relative to the repository root in this case
257 # work relative to the repository root in this case
256 cwd = ''
258 cwd = ''
257 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
259 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
258 elif os.path.isdir(standindir):
260 elif os.path.isdir(standindir):
259 # no patterns: relative to repo root
261 # no patterns: relative to repo root
260 pats = [standindir]
262 pats = [standindir]
261 else:
263 else:
262 # no patterns and no standin dir: return matcher that matches nothing
264 # no patterns and no standin dir: return matcher that matches nothing
263 return match_.match(repo.root, None, [], exact=True)
265 return match_.match(repo.root, None, [], exact=True)
264
266
265 # no warnings about missing files or directories
267 # no warnings about missing files or directories
266 match = scmutil.match(repo[None], pats, opts)
268 match = scmutil.match(repo[None], pats, opts)
267 match.bad = lambda f, msg: None
269 match.bad = lambda f, msg: None
268 return match
270 return match
269
271
270 def composestandinmatcher(repo, rmatcher):
272 def composestandinmatcher(repo, rmatcher):
271 '''Return a matcher that accepts standins corresponding to the
273 '''Return a matcher that accepts standins corresponding to the
272 files accepted by rmatcher. Pass the list of files in the matcher
274 files accepted by rmatcher. Pass the list of files in the matcher
273 as the paths specified by the user.'''
275 as the paths specified by the user.'''
274 smatcher = getstandinmatcher(repo, rmatcher.files())
276 smatcher = getstandinmatcher(repo, rmatcher.files())
275 isstandin = smatcher.matchfn
277 isstandin = smatcher.matchfn
276 def composedmatchfn(f):
278 def composedmatchfn(f):
277 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
279 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
278 smatcher.matchfn = composedmatchfn
280 smatcher.matchfn = composedmatchfn
279
281
280 return smatcher
282 return smatcher
281
283
282 def standin(filename):
284 def standin(filename):
283 '''Return the repo-relative path to the standin for the specified big
285 '''Return the repo-relative path to the standin for the specified big
284 file.'''
286 file.'''
285 # Notes:
287 # Notes:
286 # 1) Some callers want an absolute path, but for instance addlargefiles
288 # 1) Some callers want an absolute path, but for instance addlargefiles
287 # needs it repo-relative so it can be passed to repoadd(). So leave
289 # needs it repo-relative so it can be passed to repoadd(). So leave
288 # it up to the caller to use repo.wjoin() to get an absolute path.
290 # it up to the caller to use repo.wjoin() to get an absolute path.
289 # 2) Join with '/' because that's what dirstate always uses, even on
291 # 2) Join with '/' because that's what dirstate always uses, even on
290 # Windows. Change existing separator to '/' first in case we are
292 # Windows. Change existing separator to '/' first in case we are
291 # passed filenames from an external source (like the command line).
293 # passed filenames from an external source (like the command line).
292 return shortname + '/' + util.pconvert(filename)
294 return shortname + '/' + util.pconvert(filename)
293
295
294 def isstandin(filename):
296 def isstandin(filename):
295 '''Return true if filename is a big file standin. filename must be
297 '''Return true if filename is a big file standin. filename must be
296 in Mercurial's internal form (slash-separated).'''
298 in Mercurial's internal form (slash-separated).'''
297 return filename.startswith(shortname + '/')
299 return filename.startswith(shortname + '/')
298
300
299 def splitstandin(filename):
301 def splitstandin(filename):
300 # Split on / because that's what dirstate always uses, even on Windows.
302 # Split on / because that's what dirstate always uses, even on Windows.
301 # Change local separator to / first just in case we are passed filenames
303 # Change local separator to / first just in case we are passed filenames
302 # from an external source (like the command line).
304 # from an external source (like the command line).
303 bits = util.pconvert(filename).split('/', 1)
305 bits = util.pconvert(filename).split('/', 1)
304 if len(bits) == 2 and bits[0] == shortname:
306 if len(bits) == 2 and bits[0] == shortname:
305 return bits[1]
307 return bits[1]
306 else:
308 else:
307 return None
309 return None
308
310
309 def updatestandin(repo, standin):
311 def updatestandin(repo, standin):
310 file = repo.wjoin(splitstandin(standin))
312 file = repo.wjoin(splitstandin(standin))
311 if os.path.exists(file):
313 if os.path.exists(file):
312 hash = hashfile(file)
314 hash = hashfile(file)
313 executable = getexecutable(file)
315 executable = getexecutable(file)
314 writestandin(repo, standin, hash, executable)
316 writestandin(repo, standin, hash, executable)
315
317
316 def readstandin(repo, filename, node=None):
318 def readstandin(repo, filename, node=None):
317 '''read hex hash from standin for filename at given node, or working
319 '''read hex hash from standin for filename at given node, or working
318 directory if no node is given'''
320 directory if no node is given'''
319 return repo[node][standin(filename)].data().strip()
321 return repo[node][standin(filename)].data().strip()
320
322
321 def writestandin(repo, standin, hash, executable):
323 def writestandin(repo, standin, hash, executable):
322 '''write hash to <repo.root>/<standin>'''
324 '''write hash to <repo.root>/<standin>'''
323 writehash(hash, repo.wjoin(standin), executable)
325 writehash(hash, repo.wjoin(standin), executable)
324
326
325 def copyandhash(instream, outfile):
327 def copyandhash(instream, outfile):
326 '''Read bytes from instream (iterable) and write them to outfile,
328 '''Read bytes from instream (iterable) and write them to outfile,
327 computing the SHA-1 hash of the data along the way. Close outfile
329 computing the SHA-1 hash of the data along the way. Close outfile
328 when done and return the binary hash.'''
330 when done and return the binary hash.'''
329 hasher = util.sha1('')
331 hasher = util.sha1('')
330 for data in instream:
332 for data in instream:
331 hasher.update(data)
333 hasher.update(data)
332 outfile.write(data)
334 outfile.write(data)
333
335
334 # Blecch: closing a file that somebody else opened is rude and
336 # Blecch: closing a file that somebody else opened is rude and
335 # wrong. But it's so darn convenient and practical! After all,
337 # wrong. But it's so darn convenient and practical! After all,
336 # outfile was opened just to copy and hash.
338 # outfile was opened just to copy and hash.
337 outfile.close()
339 outfile.close()
338
340
339 return hasher.digest()
341 return hasher.digest()
340
342
341 def hashrepofile(repo, file):
343 def hashrepofile(repo, file):
342 return hashfile(repo.wjoin(file))
344 return hashfile(repo.wjoin(file))
343
345
344 def hashfile(file):
346 def hashfile(file):
345 if not os.path.exists(file):
347 if not os.path.exists(file):
346 return ''
348 return ''
347 hasher = util.sha1('')
349 hasher = util.sha1('')
348 fd = open(file, 'rb')
350 fd = open(file, 'rb')
349 for data in blockstream(fd):
351 for data in blockstream(fd):
350 hasher.update(data)
352 hasher.update(data)
351 fd.close()
353 fd.close()
352 return hasher.hexdigest()
354 return hasher.hexdigest()
353
355
354 class limitreader(object):
356 class limitreader(object):
355 def __init__(self, f, limit):
357 def __init__(self, f, limit):
356 self.f = f
358 self.f = f
357 self.limit = limit
359 self.limit = limit
358
360
359 def read(self, length):
361 def read(self, length):
360 if self.limit == 0:
362 if self.limit == 0:
361 return ''
363 return ''
362 length = length > self.limit and self.limit or length
364 length = length > self.limit and self.limit or length
363 self.limit -= length
365 self.limit -= length
364 return self.f.read(length)
366 return self.f.read(length)
365
367
366 def close(self):
368 def close(self):
367 pass
369 pass
368
370
369 def blockstream(infile, blocksize=128 * 1024):
371 def blockstream(infile, blocksize=128 * 1024):
370 """Generator that yields blocks of data from infile and closes infile."""
372 """Generator that yields blocks of data from infile and closes infile."""
371 while True:
373 while True:
372 data = infile.read(blocksize)
374 data = infile.read(blocksize)
373 if not data:
375 if not data:
374 break
376 break
375 yield data
377 yield data
376 # same blecch as copyandhash() above
378 # same blecch as copyandhash() above
377 infile.close()
379 infile.close()
378
380
379 def writehash(hash, filename, executable):
381 def writehash(hash, filename, executable):
380 util.makedirs(os.path.dirname(filename))
382 util.makedirs(os.path.dirname(filename))
381 util.writefile(filename, hash + '\n')
383 util.writefile(filename, hash + '\n')
382 os.chmod(filename, getmode(executable))
384 os.chmod(filename, getmode(executable))
383
385
384 def getexecutable(filename):
386 def getexecutable(filename):
385 mode = os.stat(filename).st_mode
387 mode = os.stat(filename).st_mode
386 return ((mode & stat.S_IXUSR) and
388 return ((mode & stat.S_IXUSR) and
387 (mode & stat.S_IXGRP) and
389 (mode & stat.S_IXGRP) and
388 (mode & stat.S_IXOTH))
390 (mode & stat.S_IXOTH))
389
391
390 def getmode(executable):
392 def getmode(executable):
391 if executable:
393 if executable:
392 return 0755
394 return 0755
393 else:
395 else:
394 return 0644
396 return 0644
395
397
396 def urljoin(first, second, *arg):
398 def urljoin(first, second, *arg):
397 def join(left, right):
399 def join(left, right):
398 if not left.endswith('/'):
400 if not left.endswith('/'):
399 left += '/'
401 left += '/'
400 if right.startswith('/'):
402 if right.startswith('/'):
401 right = right[1:]
403 right = right[1:]
402 return left + right
404 return left + right
403
405
404 url = join(first, second)
406 url = join(first, second)
405 for a in arg:
407 for a in arg:
406 url = join(url, a)
408 url = join(url, a)
407 return url
409 return url
408
410
409 def hexsha1(data):
411 def hexsha1(data):
410 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
412 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
411 object data"""
413 object data"""
412 h = util.sha1()
414 h = util.sha1()
413 for chunk in util.filechunkiter(data):
415 for chunk in util.filechunkiter(data):
414 h.update(chunk)
416 h.update(chunk)
415 return h.hexdigest()
417 return h.hexdigest()
416
418
417 def httpsendfile(ui, filename):
419 def httpsendfile(ui, filename):
418 return httpconnection.httpsendfile(ui, filename, 'rb')
420 return httpconnection.httpsendfile(ui, filename, 'rb')
419
421
420 def unixpath(path):
422 def unixpath(path):
421 '''Return a version of path normalized for use with the lfdirstate.'''
423 '''Return a version of path normalized for use with the lfdirstate.'''
422 return util.pconvert(os.path.normpath(path))
424 return util.pconvert(os.path.normpath(path))
423
425
424 def islfilesrepo(repo):
426 def islfilesrepo(repo):
425 if ('largefiles' in repo.requirements and
427 if ('largefiles' in repo.requirements and
426 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
428 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
427 return True
429 return True
428
430
429 return util.any(openlfdirstate(repo.ui, repo, False))
431 return util.any(openlfdirstate(repo.ui, repo, False))
430
432
431 class storeprotonotcapable(Exception):
433 class storeprotonotcapable(Exception):
432 def __init__(self, storetypes):
434 def __init__(self, storetypes):
433 self.storetypes = storetypes
435 self.storetypes = storetypes
434
436
435 def getcurrentheads(repo):
437 def getcurrentheads(repo):
436 branches = repo.branchmap()
438 branches = repo.branchmap()
437 heads = []
439 heads = []
438 for branch in branches:
440 for branch in branches:
439 newheads = repo.branchheads(branch)
441 newheads = repo.branchheads(branch)
440 heads = heads + newheads
442 heads = heads + newheads
441 return heads
443 return heads
442
444
443 def getstandinsstate(repo):
445 def getstandinsstate(repo):
444 standins = []
446 standins = []
445 matcher = getstandinmatcher(repo)
447 matcher = getstandinmatcher(repo)
446 for standin in dirstatewalk(repo.dirstate, matcher):
448 for standin in dirstatewalk(repo.dirstate, matcher):
447 lfile = splitstandin(standin)
449 lfile = splitstandin(standin)
448 standins.append((lfile, readstandin(repo, lfile)))
450 standins.append((lfile, readstandin(repo, lfile)))
449 return standins
451 return standins
450
452
451 def getlfilestoupdate(oldstandins, newstandins):
453 def getlfilestoupdate(oldstandins, newstandins):
452 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
454 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
453 filelist = []
455 filelist = []
454 for f in changedstandins:
456 for f in changedstandins:
455 if f[0] not in filelist:
457 if f[0] not in filelist:
456 filelist.append(f[0])
458 filelist.append(f[0])
457 return filelist
459 return filelist
@@ -1,521 +1,504 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util
14 from mercurial import context, error, manifest, match as match_, util
15 from mercurial import node as node_
15 from mercurial import node as node_
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial import localrepo
17 from mercurial import localrepo
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 if (isinstance(method, types.FunctionType) and
31 if (isinstance(method, types.FunctionType) and
32 method.func_name == 'wrap'):
32 method.func_name == 'wrap'):
33 ui.warn(_('largefiles: repo method %r appears to have already been'
33 ui.warn(_('largefiles: repo method %r appears to have already been'
34 ' wrapped by another extension: '
34 ' wrapped by another extension: '
35 'largefiles may behave incorrectly\n')
35 'largefiles may behave incorrectly\n')
36 % name)
36 % name)
37
37
38 class lfilesrepo(repo.__class__):
38 class lfilesrepo(repo.__class__):
39 lfstatus = False
39 lfstatus = False
40 def status_nolfiles(self, *args, **kwargs):
40 def status_nolfiles(self, *args, **kwargs):
41 return super(lfilesrepo, self).status(*args, **kwargs)
41 return super(lfilesrepo, self).status(*args, **kwargs)
42
42
43 # When lfstatus is set, return a context that gives the names
43 # When lfstatus is set, return a context that gives the names
44 # of largefiles instead of their corresponding standins and
44 # of largefiles instead of their corresponding standins and
45 # identifies the largefiles as always binary, regardless of
45 # identifies the largefiles as always binary, regardless of
46 # their actual contents.
46 # their actual contents.
47 def __getitem__(self, changeid):
47 def __getitem__(self, changeid):
48 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 ctx = super(lfilesrepo, self).__getitem__(changeid)
49 if self.lfstatus:
49 if self.lfstatus:
50 class lfilesmanifestdict(manifest.manifestdict):
50 class lfilesmanifestdict(manifest.manifestdict):
51 def __contains__(self, filename):
51 def __contains__(self, filename):
52 if super(lfilesmanifestdict,
52 if super(lfilesmanifestdict,
53 self).__contains__(filename):
53 self).__contains__(filename):
54 return True
54 return True
55 return super(lfilesmanifestdict,
55 return super(lfilesmanifestdict,
56 self).__contains__(lfutil.standin(filename))
56 self).__contains__(lfutil.standin(filename))
57 class lfilesctx(ctx.__class__):
57 class lfilesctx(ctx.__class__):
58 def files(self):
58 def files(self):
59 filenames = super(lfilesctx, self).files()
59 filenames = super(lfilesctx, self).files()
60 return [lfutil.splitstandin(f) or f for f in filenames]
60 return [lfutil.splitstandin(f) or f for f in filenames]
61 def manifest(self):
61 def manifest(self):
62 man1 = super(lfilesctx, self).manifest()
62 man1 = super(lfilesctx, self).manifest()
63 man1.__class__ = lfilesmanifestdict
63 man1.__class__ = lfilesmanifestdict
64 return man1
64 return man1
65 def filectx(self, path, fileid=None, filelog=None):
65 def filectx(self, path, fileid=None, filelog=None):
66 try:
66 try:
67 if filelog is not None:
67 if filelog is not None:
68 result = super(lfilesctx, self).filectx(
68 result = super(lfilesctx, self).filectx(
69 path, fileid, filelog)
69 path, fileid, filelog)
70 else:
70 else:
71 result = super(lfilesctx, self).filectx(
71 result = super(lfilesctx, self).filectx(
72 path, fileid)
72 path, fileid)
73 except error.LookupError:
73 except error.LookupError:
74 # Adding a null character will cause Mercurial to
74 # Adding a null character will cause Mercurial to
75 # identify this as a binary file.
75 # identify this as a binary file.
76 if filelog is not None:
76 if filelog is not None:
77 result = super(lfilesctx, self).filectx(
77 result = super(lfilesctx, self).filectx(
78 lfutil.standin(path), fileid, filelog)
78 lfutil.standin(path), fileid, filelog)
79 else:
79 else:
80 result = super(lfilesctx, self).filectx(
80 result = super(lfilesctx, self).filectx(
81 lfutil.standin(path), fileid)
81 lfutil.standin(path), fileid)
82 olddata = result.data
82 olddata = result.data
83 result.data = lambda: olddata() + '\0'
83 result.data = lambda: olddata() + '\0'
84 return result
84 return result
85 ctx.__class__ = lfilesctx
85 ctx.__class__ = lfilesctx
86 return ctx
86 return ctx
87
87
88 # Figure out the status of big files and insert them into the
88 # Figure out the status of big files and insert them into the
89 # appropriate list in the result. Also removes standin files
89 # appropriate list in the result. Also removes standin files
90 # from the listing. Revert to the original status if
90 # from the listing. Revert to the original status if
91 # self.lfstatus is False.
91 # self.lfstatus is False.
92 # XXX large file status is buggy when used on repo proxy.
92 # XXX large file status is buggy when used on repo proxy.
93 # XXX this needs to be investigated.
93 # XXX this needs to be investigated.
94 @localrepo.unfilteredmethod
94 @localrepo.unfilteredmethod
95 def status(self, node1='.', node2=None, match=None, ignored=False,
95 def status(self, node1='.', node2=None, match=None, ignored=False,
96 clean=False, unknown=False, listsubrepos=False):
96 clean=False, unknown=False, listsubrepos=False):
97 listignored, listclean, listunknown = ignored, clean, unknown
97 listignored, listclean, listunknown = ignored, clean, unknown
98 if not self.lfstatus:
98 if not self.lfstatus:
99 return super(lfilesrepo, self).status(node1, node2, match,
99 return super(lfilesrepo, self).status(node1, node2, match,
100 listignored, listclean, listunknown, listsubrepos)
100 listignored, listclean, listunknown, listsubrepos)
101 else:
101 else:
102 # some calls in this function rely on the old version of status
102 # some calls in this function rely on the old version of status
103 self.lfstatus = False
103 self.lfstatus = False
104 if isinstance(node1, context.changectx):
104 if isinstance(node1, context.changectx):
105 ctx1 = node1
105 ctx1 = node1
106 else:
106 else:
107 ctx1 = self[node1]
107 ctx1 = self[node1]
108 if isinstance(node2, context.changectx):
108 if isinstance(node2, context.changectx):
109 ctx2 = node2
109 ctx2 = node2
110 else:
110 else:
111 ctx2 = self[node2]
111 ctx2 = self[node2]
112 working = ctx2.rev() is None
112 working = ctx2.rev() is None
113 parentworking = working and ctx1 == self['.']
113 parentworking = working and ctx1 == self['.']
114
114
115 def inctx(file, ctx):
115 def inctx(file, ctx):
116 try:
116 try:
117 if ctx.rev() is None:
117 if ctx.rev() is None:
118 return file in ctx.manifest()
118 return file in ctx.manifest()
119 ctx[file]
119 ctx[file]
120 return True
120 return True
121 except KeyError:
121 except KeyError:
122 return False
122 return False
123
123
124 if match is None:
124 if match is None:
125 match = match_.always(self.root, self.getcwd())
125 match = match_.always(self.root, self.getcwd())
126
126
127 # First check if there were files specified on the
127 # First check if there were files specified on the
128 # command line. If there were, and none of them were
128 # command line. If there were, and none of them were
129 # largefiles, we should just bail here and let super
129 # largefiles, we should just bail here and let super
130 # handle it -- thus gaining a big performance boost.
130 # handle it -- thus gaining a big performance boost.
131 lfdirstate = lfutil.openlfdirstate(ui, self)
131 lfdirstate = lfutil.openlfdirstate(ui, self)
132 if match.files() and not match.anypats():
132 if match.files() and not match.anypats():
133 for f in lfdirstate:
133 for f in lfdirstate:
134 if match(f):
134 if match(f):
135 break
135 break
136 else:
136 else:
137 return super(lfilesrepo, self).status(node1, node2,
137 return super(lfilesrepo, self).status(node1, node2,
138 match, listignored, listclean,
138 match, listignored, listclean,
139 listunknown, listsubrepos)
139 listunknown, listsubrepos)
140
140
141 # Create a copy of match that matches standins instead
141 # Create a copy of match that matches standins instead
142 # of largefiles.
142 # of largefiles.
143 def tostandins(files):
143 def tostandins(files):
144 if not working:
144 if not working:
145 return files
145 return files
146 newfiles = []
146 newfiles = []
147 dirstate = self.dirstate
147 dirstate = self.dirstate
148 for f in files:
148 for f in files:
149 sf = lfutil.standin(f)
149 sf = lfutil.standin(f)
150 if sf in dirstate:
150 if sf in dirstate:
151 newfiles.append(sf)
151 newfiles.append(sf)
152 elif sf in dirstate.dirs():
152 elif sf in dirstate.dirs():
153 # Directory entries could be regular or
153 # Directory entries could be regular or
154 # standin, check both
154 # standin, check both
155 newfiles.extend((f, sf))
155 newfiles.extend((f, sf))
156 else:
156 else:
157 newfiles.append(f)
157 newfiles.append(f)
158 return newfiles
158 return newfiles
159
159
160 # Create a function that we can use to override what is
161 # normally the ignore matcher. We've already checked
162 # for ignored files on the first dirstate walk, and
163 # unnecessarily re-checking here causes a huge performance
164 # hit because lfdirstate only knows about largefiles
165 def _ignoreoverride(self):
166 return False
167
168 m = copy.copy(match)
160 m = copy.copy(match)
169 m._files = tostandins(m._files)
161 m._files = tostandins(m._files)
170
162
171 result = super(lfilesrepo, self).status(node1, node2, m,
163 result = super(lfilesrepo, self).status(node1, node2, m,
172 ignored, clean, unknown, listsubrepos)
164 ignored, clean, unknown, listsubrepos)
173 if working:
165 if working:
174 try:
166 try:
175 # Any non-largefiles that were explicitly listed must be
176 # taken out or lfdirstate.status will report an error.
177 # The status of these files was already computed using
178 # super's status.
179 # Override lfdirstate's ignore matcher to not do
180 # anything
181 origignore = lfdirstate._ignore
182 lfdirstate._ignore = _ignoreoverride
183
167
184 def sfindirstate(f):
168 def sfindirstate(f):
185 sf = lfutil.standin(f)
169 sf = lfutil.standin(f)
186 dirstate = self.dirstate
170 dirstate = self.dirstate
187 return sf in dirstate or sf in dirstate.dirs()
171 return sf in dirstate or sf in dirstate.dirs()
188 match._files = [f for f in match._files
172 match._files = [f for f in match._files
189 if sfindirstate(f)]
173 if sfindirstate(f)]
190 # Don't waste time getting the ignored and unknown
174 # Don't waste time getting the ignored and unknown
191 # files from lfdirstate
175 # files from lfdirstate
192 s = lfdirstate.status(match, [], False,
176 s = lfdirstate.status(match, [], False,
193 listclean, False)
177 listclean, False)
194 (unsure, modified, added, removed, missing, _unknown,
178 (unsure, modified, added, removed, missing, _unknown,
195 _ignored, clean) = s
179 _ignored, clean) = s
196 if parentworking:
180 if parentworking:
197 for lfile in unsure:
181 for lfile in unsure:
198 standin = lfutil.standin(lfile)
182 standin = lfutil.standin(lfile)
199 if standin not in ctx1:
183 if standin not in ctx1:
200 # from second parent
184 # from second parent
201 modified.append(lfile)
185 modified.append(lfile)
202 elif ctx1[standin].data().strip() \
186 elif ctx1[standin].data().strip() \
203 != lfutil.hashfile(self.wjoin(lfile)):
187 != lfutil.hashfile(self.wjoin(lfile)):
204 modified.append(lfile)
188 modified.append(lfile)
205 else:
189 else:
206 clean.append(lfile)
190 clean.append(lfile)
207 lfdirstate.normal(lfile)
191 lfdirstate.normal(lfile)
208 else:
192 else:
209 tocheck = unsure + modified + added + clean
193 tocheck = unsure + modified + added + clean
210 modified, added, clean = [], [], []
194 modified, added, clean = [], [], []
211
195
212 for lfile in tocheck:
196 for lfile in tocheck:
213 standin = lfutil.standin(lfile)
197 standin = lfutil.standin(lfile)
214 if inctx(standin, ctx1):
198 if inctx(standin, ctx1):
215 if ctx1[standin].data().strip() != \
199 if ctx1[standin].data().strip() != \
216 lfutil.hashfile(self.wjoin(lfile)):
200 lfutil.hashfile(self.wjoin(lfile)):
217 modified.append(lfile)
201 modified.append(lfile)
218 else:
202 else:
219 clean.append(lfile)
203 clean.append(lfile)
220 else:
204 else:
221 added.append(lfile)
205 added.append(lfile)
222 finally:
206 finally:
223 # Replace the original ignore function
207 pass
224 lfdirstate._ignore = origignore
225
208
226 # Standins no longer found in lfdirstate has been removed
209 # Standins no longer found in lfdirstate has been removed
227 for standin in ctx1.manifest():
210 for standin in ctx1.manifest():
228 if not lfutil.isstandin(standin):
211 if not lfutil.isstandin(standin):
229 continue
212 continue
230 lfile = lfutil.splitstandin(standin)
213 lfile = lfutil.splitstandin(standin)
231 if not match(lfile):
214 if not match(lfile):
232 continue
215 continue
233 if lfile not in lfdirstate:
216 if lfile not in lfdirstate:
234 removed.append(lfile)
217 removed.append(lfile)
235
218
236 # Filter result lists
219 # Filter result lists
237 result = list(result)
220 result = list(result)
238
221
239 # Largefiles are not really removed when they're
222 # Largefiles are not really removed when they're
240 # still in the normal dirstate. Likewise, normal
223 # still in the normal dirstate. Likewise, normal
241 # files are not really removed if they are still in
224 # files are not really removed if they are still in
242 # lfdirstate. This happens in merges where files
225 # lfdirstate. This happens in merges where files
243 # change type.
226 # change type.
244 removed = [f for f in removed if f not in self.dirstate]
227 removed = [f for f in removed if f not in self.dirstate]
245 result[2] = [f for f in result[2] if f not in lfdirstate]
228 result[2] = [f for f in result[2] if f not in lfdirstate]
246
229
247 lfiles = set(lfdirstate._map)
230 lfiles = set(lfdirstate._map)
248 # Unknown files
231 # Unknown files
249 result[4] = set(result[4]).difference(lfiles)
232 result[4] = set(result[4]).difference(lfiles)
250 # Ignored files
233 # Ignored files
251 result[5] = set(result[5]).difference(lfiles)
234 result[5] = set(result[5]).difference(lfiles)
252 # combine normal files and largefiles
235 # combine normal files and largefiles
253 normals = [[fn for fn in filelist
236 normals = [[fn for fn in filelist
254 if not lfutil.isstandin(fn)]
237 if not lfutil.isstandin(fn)]
255 for filelist in result]
238 for filelist in result]
256 lfiles = (modified, added, removed, missing, [], [], clean)
239 lfiles = (modified, added, removed, missing, [], [], clean)
257 result = [sorted(list1 + list2)
240 result = [sorted(list1 + list2)
258 for (list1, list2) in zip(normals, lfiles)]
241 for (list1, list2) in zip(normals, lfiles)]
259 else:
242 else:
260 def toname(f):
243 def toname(f):
261 if lfutil.isstandin(f):
244 if lfutil.isstandin(f):
262 return lfutil.splitstandin(f)
245 return lfutil.splitstandin(f)
263 return f
246 return f
264 result = [[toname(f) for f in items] for items in result]
247 result = [[toname(f) for f in items] for items in result]
265
248
266 lfdirstate.write()
249 lfdirstate.write()
267
250
268 if not listunknown:
251 if not listunknown:
269 result[4] = []
252 result[4] = []
270 if not listignored:
253 if not listignored:
271 result[5] = []
254 result[5] = []
272 if not listclean:
255 if not listclean:
273 result[6] = []
256 result[6] = []
274 self.lfstatus = True
257 self.lfstatus = True
275 return result
258 return result
276
259
277 # As part of committing, copy all of the largefiles into the
260 # As part of committing, copy all of the largefiles into the
278 # cache.
261 # cache.
279 def commitctx(self, *args, **kwargs):
262 def commitctx(self, *args, **kwargs):
280 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
263 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
281 lfutil.copyalltostore(self, node)
264 lfutil.copyalltostore(self, node)
282 return node
265 return node
283
266
284 # Before commit, largefile standins have not had their
267 # Before commit, largefile standins have not had their
285 # contents updated to reflect the hash of their largefile.
268 # contents updated to reflect the hash of their largefile.
286 # Do that here.
269 # Do that here.
287 def commit(self, text="", user=None, date=None, match=None,
270 def commit(self, text="", user=None, date=None, match=None,
288 force=False, editor=False, extra={}):
271 force=False, editor=False, extra={}):
289 orig = super(lfilesrepo, self).commit
272 orig = super(lfilesrepo, self).commit
290
273
291 wlock = self.wlock()
274 wlock = self.wlock()
292 try:
275 try:
293 # Case 0: Rebase or Transplant
276 # Case 0: Rebase or Transplant
294 # We have to take the time to pull down the new largefiles now.
277 # We have to take the time to pull down the new largefiles now.
295 # Otherwise, any largefiles that were modified in the
278 # Otherwise, any largefiles that were modified in the
296 # destination changesets get overwritten, either by the rebase
279 # destination changesets get overwritten, either by the rebase
297 # or in the first commit after the rebase or transplant.
280 # or in the first commit after the rebase or transplant.
298 # updatelfiles will update the dirstate to mark any pulled
281 # updatelfiles will update the dirstate to mark any pulled
299 # largefiles as modified
282 # largefiles as modified
300 if getattr(self, "_isrebasing", False) or \
283 if getattr(self, "_isrebasing", False) or \
301 getattr(self, "_istransplanting", False):
284 getattr(self, "_istransplanting", False):
302 lfcommands.updatelfiles(self.ui, self, filelist=None,
285 lfcommands.updatelfiles(self.ui, self, filelist=None,
303 printmessage=False)
286 printmessage=False)
304 result = orig(text=text, user=user, date=date, match=match,
287 result = orig(text=text, user=user, date=date, match=match,
305 force=force, editor=editor, extra=extra)
288 force=force, editor=editor, extra=extra)
306 return result
289 return result
307 # Case 1: user calls commit with no specific files or
290 # Case 1: user calls commit with no specific files or
308 # include/exclude patterns: refresh and commit all files that
291 # include/exclude patterns: refresh and commit all files that
309 # are "dirty".
292 # are "dirty".
310 if ((match is None) or
293 if ((match is None) or
311 (not match.anypats() and not match.files())):
294 (not match.anypats() and not match.files())):
312 # Spend a bit of time here to get a list of files we know
295 # Spend a bit of time here to get a list of files we know
313 # are modified so we can compare only against those.
296 # are modified so we can compare only against those.
314 # It can cost a lot of time (several seconds)
297 # It can cost a lot of time (several seconds)
315 # otherwise to update all standins if the largefiles are
298 # otherwise to update all standins if the largefiles are
316 # large.
299 # large.
317 lfdirstate = lfutil.openlfdirstate(ui, self)
300 lfdirstate = lfutil.openlfdirstate(ui, self)
318 dirtymatch = match_.always(self.root, self.getcwd())
301 dirtymatch = match_.always(self.root, self.getcwd())
319 s = lfdirstate.status(dirtymatch, [], False, False, False)
302 s = lfdirstate.status(dirtymatch, [], False, False, False)
320 modifiedfiles = []
303 modifiedfiles = []
321 for i in s:
304 for i in s:
322 modifiedfiles.extend(i)
305 modifiedfiles.extend(i)
323 lfiles = lfutil.listlfiles(self)
306 lfiles = lfutil.listlfiles(self)
324 # this only loops through largefiles that exist (not
307 # this only loops through largefiles that exist (not
325 # removed/renamed)
308 # removed/renamed)
326 for lfile in lfiles:
309 for lfile in lfiles:
327 if lfile in modifiedfiles:
310 if lfile in modifiedfiles:
328 if os.path.exists(
311 if os.path.exists(
329 self.wjoin(lfutil.standin(lfile))):
312 self.wjoin(lfutil.standin(lfile))):
330 # this handles the case where a rebase is being
313 # this handles the case where a rebase is being
331 # performed and the working copy is not updated
314 # performed and the working copy is not updated
332 # yet.
315 # yet.
333 if os.path.exists(self.wjoin(lfile)):
316 if os.path.exists(self.wjoin(lfile)):
334 lfutil.updatestandin(self,
317 lfutil.updatestandin(self,
335 lfutil.standin(lfile))
318 lfutil.standin(lfile))
336 lfdirstate.normal(lfile)
319 lfdirstate.normal(lfile)
337
320
338 result = orig(text=text, user=user, date=date, match=match,
321 result = orig(text=text, user=user, date=date, match=match,
339 force=force, editor=editor, extra=extra)
322 force=force, editor=editor, extra=extra)
340
323
341 if result is not None:
324 if result is not None:
342 for lfile in lfdirstate:
325 for lfile in lfdirstate:
343 if lfile in modifiedfiles:
326 if lfile in modifiedfiles:
344 if (not os.path.exists(self.wjoin(
327 if (not os.path.exists(self.wjoin(
345 lfutil.standin(lfile)))) or \
328 lfutil.standin(lfile)))) or \
346 (not os.path.exists(self.wjoin(lfile))):
329 (not os.path.exists(self.wjoin(lfile))):
347 lfdirstate.drop(lfile)
330 lfdirstate.drop(lfile)
348
331
349 # This needs to be after commit; otherwise precommit hooks
332 # This needs to be after commit; otherwise precommit hooks
350 # get the wrong status
333 # get the wrong status
351 lfdirstate.write()
334 lfdirstate.write()
352 return result
335 return result
353
336
354 lfiles = lfutil.listlfiles(self)
337 lfiles = lfutil.listlfiles(self)
355 match._files = self._subdirlfs(match.files(), lfiles)
338 match._files = self._subdirlfs(match.files(), lfiles)
356
339
357 # Case 2: user calls commit with specified patterns: refresh
340 # Case 2: user calls commit with specified patterns: refresh
358 # any matching big files.
341 # any matching big files.
359 smatcher = lfutil.composestandinmatcher(self, match)
342 smatcher = lfutil.composestandinmatcher(self, match)
360 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
343 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
361
344
362 # No matching big files: get out of the way and pass control to
345 # No matching big files: get out of the way and pass control to
363 # the usual commit() method.
346 # the usual commit() method.
364 if not standins:
347 if not standins:
365 return orig(text=text, user=user, date=date, match=match,
348 return orig(text=text, user=user, date=date, match=match,
366 force=force, editor=editor, extra=extra)
349 force=force, editor=editor, extra=extra)
367
350
368 # Refresh all matching big files. It's possible that the
351 # Refresh all matching big files. It's possible that the
369 # commit will end up failing, in which case the big files will
352 # commit will end up failing, in which case the big files will
370 # stay refreshed. No harm done: the user modified them and
353 # stay refreshed. No harm done: the user modified them and
371 # asked to commit them, so sooner or later we're going to
354 # asked to commit them, so sooner or later we're going to
372 # refresh the standins. Might as well leave them refreshed.
355 # refresh the standins. Might as well leave them refreshed.
373 lfdirstate = lfutil.openlfdirstate(ui, self)
356 lfdirstate = lfutil.openlfdirstate(ui, self)
374 for standin in standins:
357 for standin in standins:
375 lfile = lfutil.splitstandin(standin)
358 lfile = lfutil.splitstandin(standin)
376 if lfdirstate[lfile] <> 'r':
359 if lfdirstate[lfile] <> 'r':
377 lfutil.updatestandin(self, standin)
360 lfutil.updatestandin(self, standin)
378 lfdirstate.normal(lfile)
361 lfdirstate.normal(lfile)
379 else:
362 else:
380 lfdirstate.drop(lfile)
363 lfdirstate.drop(lfile)
381
364
382 # Cook up a new matcher that only matches regular files or
365 # Cook up a new matcher that only matches regular files or
383 # standins corresponding to the big files requested by the
366 # standins corresponding to the big files requested by the
384 # user. Have to modify _files to prevent commit() from
367 # user. Have to modify _files to prevent commit() from
385 # complaining "not tracked" for big files.
368 # complaining "not tracked" for big files.
386 match = copy.copy(match)
369 match = copy.copy(match)
387 origmatchfn = match.matchfn
370 origmatchfn = match.matchfn
388
371
389 # Check both the list of largefiles and the list of
372 # Check both the list of largefiles and the list of
390 # standins because if a largefile was removed, it
373 # standins because if a largefile was removed, it
391 # won't be in the list of largefiles at this point
374 # won't be in the list of largefiles at this point
392 match._files += sorted(standins)
375 match._files += sorted(standins)
393
376
394 actualfiles = []
377 actualfiles = []
395 for f in match._files:
378 for f in match._files:
396 fstandin = lfutil.standin(f)
379 fstandin = lfutil.standin(f)
397
380
398 # ignore known largefiles and standins
381 # ignore known largefiles and standins
399 if f in lfiles or fstandin in standins:
382 if f in lfiles or fstandin in standins:
400 continue
383 continue
401
384
402 # append directory separator to avoid collisions
385 # append directory separator to avoid collisions
403 if not fstandin.endswith(os.sep):
386 if not fstandin.endswith(os.sep):
404 fstandin += os.sep
387 fstandin += os.sep
405
388
406 actualfiles.append(f)
389 actualfiles.append(f)
407 match._files = actualfiles
390 match._files = actualfiles
408
391
409 def matchfn(f):
392 def matchfn(f):
410 if origmatchfn(f):
393 if origmatchfn(f):
411 return f not in lfiles
394 return f not in lfiles
412 else:
395 else:
413 return f in standins
396 return f in standins
414
397
415 match.matchfn = matchfn
398 match.matchfn = matchfn
416 result = orig(text=text, user=user, date=date, match=match,
399 result = orig(text=text, user=user, date=date, match=match,
417 force=force, editor=editor, extra=extra)
400 force=force, editor=editor, extra=extra)
418 # This needs to be after commit; otherwise precommit hooks
401 # This needs to be after commit; otherwise precommit hooks
419 # get the wrong status
402 # get the wrong status
420 lfdirstate.write()
403 lfdirstate.write()
421 return result
404 return result
422 finally:
405 finally:
423 wlock.release()
406 wlock.release()
424
407
425 def push(self, remote, force=False, revs=None, newbranch=False):
408 def push(self, remote, force=False, revs=None, newbranch=False):
426 o = lfutil.findoutgoing(self, remote, force)
409 o = lfutil.findoutgoing(self, remote, force)
427 if o:
410 if o:
428 toupload = set()
411 toupload = set()
429 o = self.changelog.nodesbetween(o, revs)[0]
412 o = self.changelog.nodesbetween(o, revs)[0]
430 for n in o:
413 for n in o:
431 parents = [p for p in self.changelog.parents(n)
414 parents = [p for p in self.changelog.parents(n)
432 if p != node_.nullid]
415 if p != node_.nullid]
433 ctx = self[n]
416 ctx = self[n]
434 files = set(ctx.files())
417 files = set(ctx.files())
435 if len(parents) == 2:
418 if len(parents) == 2:
436 mc = ctx.manifest()
419 mc = ctx.manifest()
437 mp1 = ctx.parents()[0].manifest()
420 mp1 = ctx.parents()[0].manifest()
438 mp2 = ctx.parents()[1].manifest()
421 mp2 = ctx.parents()[1].manifest()
439 for f in mp1:
422 for f in mp1:
440 if f not in mc:
423 if f not in mc:
441 files.add(f)
424 files.add(f)
442 for f in mp2:
425 for f in mp2:
443 if f not in mc:
426 if f not in mc:
444 files.add(f)
427 files.add(f)
445 for f in mc:
428 for f in mc:
446 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
429 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
447 None):
430 None):
448 files.add(f)
431 files.add(f)
449
432
450 toupload = toupload.union(
433 toupload = toupload.union(
451 set([ctx[f].data().strip()
434 set([ctx[f].data().strip()
452 for f in files
435 for f in files
453 if lfutil.isstandin(f) and f in ctx]))
436 if lfutil.isstandin(f) and f in ctx]))
454 lfcommands.uploadlfiles(ui, self, remote, toupload)
437 lfcommands.uploadlfiles(ui, self, remote, toupload)
455 return super(lfilesrepo, self).push(remote, force, revs,
438 return super(lfilesrepo, self).push(remote, force, revs,
456 newbranch)
439 newbranch)
457
440
458 def _subdirlfs(self, files, lfiles):
441 def _subdirlfs(self, files, lfiles):
459 '''
442 '''
460 Adjust matched file list
443 Adjust matched file list
461 If we pass a directory to commit whose only commitable files
444 If we pass a directory to commit whose only commitable files
462 are largefiles, the core commit code aborts before finding
445 are largefiles, the core commit code aborts before finding
463 the largefiles.
446 the largefiles.
464 So we do the following:
447 So we do the following:
465 For directories that only have largefiles as matches,
448 For directories that only have largefiles as matches,
466 we explicitly add the largefiles to the matchlist and remove
449 we explicitly add the largefiles to the matchlist and remove
467 the directory.
450 the directory.
468 In other cases, we leave the match list unmodified.
451 In other cases, we leave the match list unmodified.
469 '''
452 '''
470 actualfiles = []
453 actualfiles = []
471 dirs = []
454 dirs = []
472 regulars = []
455 regulars = []
473
456
474 for f in files:
457 for f in files:
475 if lfutil.isstandin(f + '/'):
458 if lfutil.isstandin(f + '/'):
476 raise util.Abort(
459 raise util.Abort(
477 _('file "%s" is a largefile standin') % f,
460 _('file "%s" is a largefile standin') % f,
478 hint=('commit the largefile itself instead'))
461 hint=('commit the largefile itself instead'))
479 # Scan directories
462 # Scan directories
480 if os.path.isdir(self.wjoin(f)):
463 if os.path.isdir(self.wjoin(f)):
481 dirs.append(f)
464 dirs.append(f)
482 else:
465 else:
483 regulars.append(f)
466 regulars.append(f)
484
467
485 for f in dirs:
468 for f in dirs:
486 matcheddir = False
469 matcheddir = False
487 d = self.dirstate.normalize(f) + '/'
470 d = self.dirstate.normalize(f) + '/'
488 # Check for matched normal files
471 # Check for matched normal files
489 for mf in regulars:
472 for mf in regulars:
490 if self.dirstate.normalize(mf).startswith(d):
473 if self.dirstate.normalize(mf).startswith(d):
491 actualfiles.append(f)
474 actualfiles.append(f)
492 matcheddir = True
475 matcheddir = True
493 break
476 break
494 if not matcheddir:
477 if not matcheddir:
495 # If no normal match, manually append
478 # If no normal match, manually append
496 # any matching largefiles
479 # any matching largefiles
497 for lf in lfiles:
480 for lf in lfiles:
498 if self.dirstate.normalize(lf).startswith(d):
481 if self.dirstate.normalize(lf).startswith(d):
499 actualfiles.append(lf)
482 actualfiles.append(lf)
500 if not matcheddir:
483 if not matcheddir:
501 actualfiles.append(lfutil.standin(f))
484 actualfiles.append(lfutil.standin(f))
502 matcheddir = True
485 matcheddir = True
503 # Nothing in dir, so readd it
486 # Nothing in dir, so readd it
504 # and let commit reject it
487 # and let commit reject it
505 if not matcheddir:
488 if not matcheddir:
506 actualfiles.append(f)
489 actualfiles.append(f)
507
490
508 # Always add normal files
491 # Always add normal files
509 actualfiles += regulars
492 actualfiles += regulars
510 return actualfiles
493 return actualfiles
511
494
512 repo.__class__ = lfilesrepo
495 repo.__class__ = lfilesrepo
513
496
514 def checkrequireslfiles(ui, repo, **kwargs):
497 def checkrequireslfiles(ui, repo, **kwargs):
515 if 'largefiles' not in repo.requirements and util.any(
498 if 'largefiles' not in repo.requirements and util.any(
516 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
499 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
517 repo.requirements.add('largefiles')
500 repo.requirements.add('largefiles')
518 repo._writerequirements()
501 repo._writerequirements()
519
502
520 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
503 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
521 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
504 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now