##// END OF EJS Templates
lfutil: avoid creating unnecessary copy of status tuple...
Martin von Zweigbergk -
r22912:3b8e6c09 default
parent child Browse files
Show More
@@ -1,416 +1,416 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node
18 from mercurial import node
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
43 try:
43 try:
44 util.oslink(src, dest)
44 util.oslink(src, dest)
45 except OSError:
45 except OSError:
46 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
49 dst.write(chunk)
50 dst.close()
50 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
52
52
53 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
55 if path:
55 if path:
56 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
57 else:
57 else:
58 if os.name == 'nt':
58 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
60 if appdata:
61 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
63 home = os.getenv('HOME')
64 if home:
64 if home:
65 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
66 longname, hash)
67 elif os.name == 'posix':
67 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
69 if path:
69 if path:
70 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
71 else:
71 else:
72 home = os.getenv('HOME')
72 home = os.getenv('HOME')
73 if home:
73 if home:
74 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
75 else:
75 else:
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
77 return path
78
78
79 def inusercache(ui, hash):
79 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
81 return path and os.path.exists(path)
82
82
83 def findfile(repo, hash):
83 def findfile(repo, hash):
84 if instore(repo, hash):
84 if instore(repo, hash):
85 repo.ui.note(_('found %s in store\n') % hash)
85 repo.ui.note(_('found %s in store\n') % hash)
86 return storepath(repo, hash)
86 return storepath(repo, hash)
87 elif inusercache(repo.ui, hash):
87 elif inusercache(repo.ui, hash):
88 repo.ui.note(_('found %s in system cache\n') % hash)
88 repo.ui.note(_('found %s in system cache\n') % hash)
89 path = storepath(repo, hash)
89 path = storepath(repo, hash)
90 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
91 return path
91 return path
92 return None
92 return None
93
93
94 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
97 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
99 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
101 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
103 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
105 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
107 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self, f):
109 def _ignore(self, f):
110 return False
110 return False
111
111
112 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
113 '''
113 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
116 '''
117 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
120 repo.dirstate._validate)
121
121
122 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 matcher = getstandinmatcher(repo)
126 matcher = getstandinmatcher(repo)
127 standins = repo.dirstate.walk(matcher, [], False, False)
127 standins = repo.dirstate.walk(matcher, [], False, False)
128
128
129 if len(standins) > 0:
129 if len(standins) > 0:
130 util.makedirs(lfstoredir)
130 util.makedirs(lfstoredir)
131
131
132 for standin in standins:
132 for standin in standins:
133 lfile = splitstandin(standin)
133 lfile = splitstandin(standin)
134 lfdirstate.normallookup(lfile)
134 lfdirstate.normallookup(lfile)
135 return lfdirstate
135 return lfdirstate
136
136
137 def lfdirstatestatus(lfdirstate, repo, rev):
137 def lfdirstatestatus(lfdirstate, repo, rev):
138 match = match_.always(repo.root, repo.getcwd())
138 match = match_.always(repo.root, repo.getcwd())
139 unsure, s = lfdirstate.status(match, [], False, False, False)
139 unsure, s = lfdirstate.status(match, [], False, False, False)
140 modified, added, removed, missing, unknown, ignored, clean = s
140 modified, _added, _removed, _missing, _unknown, _ignored, clean = s
141 for lfile in unsure:
141 for lfile in unsure:
142 try:
142 try:
143 fctx = repo[rev][standin(lfile)]
143 fctx = repo[rev][standin(lfile)]
144 except LookupError:
144 except LookupError:
145 fctx = None
145 fctx = None
146 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
146 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
147 modified.append(lfile)
147 modified.append(lfile)
148 else:
148 else:
149 clean.append(lfile)
149 clean.append(lfile)
150 lfdirstate.normal(lfile)
150 lfdirstate.normal(lfile)
151 return (modified, added, removed, missing, unknown, ignored, clean)
151 return s
152
152
153 def listlfiles(repo, rev=None, matcher=None):
153 def listlfiles(repo, rev=None, matcher=None):
154 '''return a list of largefiles in the working copy or the
154 '''return a list of largefiles in the working copy or the
155 specified changeset'''
155 specified changeset'''
156
156
157 if matcher is None:
157 if matcher is None:
158 matcher = getstandinmatcher(repo)
158 matcher = getstandinmatcher(repo)
159
159
160 # ignore unknown files in working directory
160 # ignore unknown files in working directory
161 return [splitstandin(f)
161 return [splitstandin(f)
162 for f in repo[rev].walk(matcher)
162 for f in repo[rev].walk(matcher)
163 if rev is not None or repo.dirstate[f] != '?']
163 if rev is not None or repo.dirstate[f] != '?']
164
164
165 def instore(repo, hash):
165 def instore(repo, hash):
166 return os.path.exists(storepath(repo, hash))
166 return os.path.exists(storepath(repo, hash))
167
167
168 def storepath(repo, hash):
168 def storepath(repo, hash):
169 return repo.join(os.path.join(longname, hash))
169 return repo.join(os.path.join(longname, hash))
170
170
171 def copyfromcache(repo, hash, filename):
171 def copyfromcache(repo, hash, filename):
172 '''Copy the specified largefile from the repo or system cache to
172 '''Copy the specified largefile from the repo or system cache to
173 filename in the repository. Return true on success or false if the
173 filename in the repository. Return true on success or false if the
174 file was not found in either cache (which should not happened:
174 file was not found in either cache (which should not happened:
175 this is meant to be called only after ensuring that the needed
175 this is meant to be called only after ensuring that the needed
176 largefile exists in the cache).'''
176 largefile exists in the cache).'''
177 path = findfile(repo, hash)
177 path = findfile(repo, hash)
178 if path is None:
178 if path is None:
179 return False
179 return False
180 util.makedirs(os.path.dirname(repo.wjoin(filename)))
180 util.makedirs(os.path.dirname(repo.wjoin(filename)))
181 # The write may fail before the file is fully written, but we
181 # The write may fail before the file is fully written, but we
182 # don't use atomic writes in the working copy.
182 # don't use atomic writes in the working copy.
183 shutil.copy(path, repo.wjoin(filename))
183 shutil.copy(path, repo.wjoin(filename))
184 return True
184 return True
185
185
186 def copytostore(repo, rev, file, uploaded=False):
186 def copytostore(repo, rev, file, uploaded=False):
187 hash = readstandin(repo, file, rev)
187 hash = readstandin(repo, file, rev)
188 if instore(repo, hash):
188 if instore(repo, hash):
189 return
189 return
190 copytostoreabsolute(repo, repo.wjoin(file), hash)
190 copytostoreabsolute(repo, repo.wjoin(file), hash)
191
191
192 def copyalltostore(repo, node):
192 def copyalltostore(repo, node):
193 '''Copy all largefiles in a given revision to the store'''
193 '''Copy all largefiles in a given revision to the store'''
194
194
195 ctx = repo[node]
195 ctx = repo[node]
196 for filename in ctx.files():
196 for filename in ctx.files():
197 if isstandin(filename) and filename in ctx.manifest():
197 if isstandin(filename) and filename in ctx.manifest():
198 realfile = splitstandin(filename)
198 realfile = splitstandin(filename)
199 copytostore(repo, ctx.node(), realfile)
199 copytostore(repo, ctx.node(), realfile)
200
200
201
201
202 def copytostoreabsolute(repo, file, hash):
202 def copytostoreabsolute(repo, file, hash):
203 if inusercache(repo.ui, hash):
203 if inusercache(repo.ui, hash):
204 link(usercachepath(repo.ui, hash), storepath(repo, hash))
204 link(usercachepath(repo.ui, hash), storepath(repo, hash))
205 elif not getattr(repo, "_isconverting", False):
205 elif not getattr(repo, "_isconverting", False):
206 util.makedirs(os.path.dirname(storepath(repo, hash)))
206 util.makedirs(os.path.dirname(storepath(repo, hash)))
207 dst = util.atomictempfile(storepath(repo, hash),
207 dst = util.atomictempfile(storepath(repo, hash),
208 createmode=repo.store.createmode)
208 createmode=repo.store.createmode)
209 for chunk in util.filechunkiter(open(file, 'rb')):
209 for chunk in util.filechunkiter(open(file, 'rb')):
210 dst.write(chunk)
210 dst.write(chunk)
211 dst.close()
211 dst.close()
212 linktousercache(repo, hash)
212 linktousercache(repo, hash)
213
213
214 def linktousercache(repo, hash):
214 def linktousercache(repo, hash):
215 path = usercachepath(repo.ui, hash)
215 path = usercachepath(repo.ui, hash)
216 if path:
216 if path:
217 link(storepath(repo, hash), path)
217 link(storepath(repo, hash), path)
218
218
219 def getstandinmatcher(repo, pats=[], opts={}):
219 def getstandinmatcher(repo, pats=[], opts={}):
220 '''Return a match object that applies pats to the standin directory'''
220 '''Return a match object that applies pats to the standin directory'''
221 standindir = repo.wjoin(shortname)
221 standindir = repo.wjoin(shortname)
222 if pats:
222 if pats:
223 pats = [os.path.join(standindir, pat) for pat in pats]
223 pats = [os.path.join(standindir, pat) for pat in pats]
224 else:
224 else:
225 # no patterns: relative to repo root
225 # no patterns: relative to repo root
226 pats = [standindir]
226 pats = [standindir]
227 # no warnings about missing files or directories
227 # no warnings about missing files or directories
228 match = scmutil.match(repo[None], pats, opts)
228 match = scmutil.match(repo[None], pats, opts)
229 match.bad = lambda f, msg: None
229 match.bad = lambda f, msg: None
230 return match
230 return match
231
231
232 def composestandinmatcher(repo, rmatcher):
232 def composestandinmatcher(repo, rmatcher):
233 '''Return a matcher that accepts standins corresponding to the
233 '''Return a matcher that accepts standins corresponding to the
234 files accepted by rmatcher. Pass the list of files in the matcher
234 files accepted by rmatcher. Pass the list of files in the matcher
235 as the paths specified by the user.'''
235 as the paths specified by the user.'''
236 smatcher = getstandinmatcher(repo, rmatcher.files())
236 smatcher = getstandinmatcher(repo, rmatcher.files())
237 isstandin = smatcher.matchfn
237 isstandin = smatcher.matchfn
238 def composedmatchfn(f):
238 def composedmatchfn(f):
239 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
239 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
240 smatcher.matchfn = composedmatchfn
240 smatcher.matchfn = composedmatchfn
241
241
242 return smatcher
242 return smatcher
243
243
244 def standin(filename):
244 def standin(filename):
245 '''Return the repo-relative path to the standin for the specified big
245 '''Return the repo-relative path to the standin for the specified big
246 file.'''
246 file.'''
247 # Notes:
247 # Notes:
248 # 1) Some callers want an absolute path, but for instance addlargefiles
248 # 1) Some callers want an absolute path, but for instance addlargefiles
249 # needs it repo-relative so it can be passed to repo[None].add(). So
249 # needs it repo-relative so it can be passed to repo[None].add(). So
250 # leave it up to the caller to use repo.wjoin() to get an absolute path.
250 # leave it up to the caller to use repo.wjoin() to get an absolute path.
251 # 2) Join with '/' because that's what dirstate always uses, even on
251 # 2) Join with '/' because that's what dirstate always uses, even on
252 # Windows. Change existing separator to '/' first in case we are
252 # Windows. Change existing separator to '/' first in case we are
253 # passed filenames from an external source (like the command line).
253 # passed filenames from an external source (like the command line).
254 return shortnameslash + util.pconvert(filename)
254 return shortnameslash + util.pconvert(filename)
255
255
256 def isstandin(filename):
256 def isstandin(filename):
257 '''Return true if filename is a big file standin. filename must be
257 '''Return true if filename is a big file standin. filename must be
258 in Mercurial's internal form (slash-separated).'''
258 in Mercurial's internal form (slash-separated).'''
259 return filename.startswith(shortnameslash)
259 return filename.startswith(shortnameslash)
260
260
261 def splitstandin(filename):
261 def splitstandin(filename):
262 # Split on / because that's what dirstate always uses, even on Windows.
262 # Split on / because that's what dirstate always uses, even on Windows.
263 # Change local separator to / first just in case we are passed filenames
263 # Change local separator to / first just in case we are passed filenames
264 # from an external source (like the command line).
264 # from an external source (like the command line).
265 bits = util.pconvert(filename).split('/', 1)
265 bits = util.pconvert(filename).split('/', 1)
266 if len(bits) == 2 and bits[0] == shortname:
266 if len(bits) == 2 and bits[0] == shortname:
267 return bits[1]
267 return bits[1]
268 else:
268 else:
269 return None
269 return None
270
270
271 def updatestandin(repo, standin):
271 def updatestandin(repo, standin):
272 file = repo.wjoin(splitstandin(standin))
272 file = repo.wjoin(splitstandin(standin))
273 if os.path.exists(file):
273 if os.path.exists(file):
274 hash = hashfile(file)
274 hash = hashfile(file)
275 executable = getexecutable(file)
275 executable = getexecutable(file)
276 writestandin(repo, standin, hash, executable)
276 writestandin(repo, standin, hash, executable)
277
277
278 def readstandin(repo, filename, node=None):
278 def readstandin(repo, filename, node=None):
279 '''read hex hash from standin for filename at given node, or working
279 '''read hex hash from standin for filename at given node, or working
280 directory if no node is given'''
280 directory if no node is given'''
281 return repo[node][standin(filename)].data().strip()
281 return repo[node][standin(filename)].data().strip()
282
282
283 def writestandin(repo, standin, hash, executable):
283 def writestandin(repo, standin, hash, executable):
284 '''write hash to <repo.root>/<standin>'''
284 '''write hash to <repo.root>/<standin>'''
285 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
285 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
286
286
287 def copyandhash(instream, outfile):
287 def copyandhash(instream, outfile):
288 '''Read bytes from instream (iterable) and write them to outfile,
288 '''Read bytes from instream (iterable) and write them to outfile,
289 computing the SHA-1 hash of the data along the way. Return the hash.'''
289 computing the SHA-1 hash of the data along the way. Return the hash.'''
290 hasher = util.sha1('')
290 hasher = util.sha1('')
291 for data in instream:
291 for data in instream:
292 hasher.update(data)
292 hasher.update(data)
293 outfile.write(data)
293 outfile.write(data)
294 return hasher.hexdigest()
294 return hasher.hexdigest()
295
295
296 def hashrepofile(repo, file):
296 def hashrepofile(repo, file):
297 return hashfile(repo.wjoin(file))
297 return hashfile(repo.wjoin(file))
298
298
299 def hashfile(file):
299 def hashfile(file):
300 if not os.path.exists(file):
300 if not os.path.exists(file):
301 return ''
301 return ''
302 hasher = util.sha1('')
302 hasher = util.sha1('')
303 fd = open(file, 'rb')
303 fd = open(file, 'rb')
304 for data in util.filechunkiter(fd, 128 * 1024):
304 for data in util.filechunkiter(fd, 128 * 1024):
305 hasher.update(data)
305 hasher.update(data)
306 fd.close()
306 fd.close()
307 return hasher.hexdigest()
307 return hasher.hexdigest()
308
308
309 def getexecutable(filename):
309 def getexecutable(filename):
310 mode = os.stat(filename).st_mode
310 mode = os.stat(filename).st_mode
311 return ((mode & stat.S_IXUSR) and
311 return ((mode & stat.S_IXUSR) and
312 (mode & stat.S_IXGRP) and
312 (mode & stat.S_IXGRP) and
313 (mode & stat.S_IXOTH))
313 (mode & stat.S_IXOTH))
314
314
315 def urljoin(first, second, *arg):
315 def urljoin(first, second, *arg):
316 def join(left, right):
316 def join(left, right):
317 if not left.endswith('/'):
317 if not left.endswith('/'):
318 left += '/'
318 left += '/'
319 if right.startswith('/'):
319 if right.startswith('/'):
320 right = right[1:]
320 right = right[1:]
321 return left + right
321 return left + right
322
322
323 url = join(first, second)
323 url = join(first, second)
324 for a in arg:
324 for a in arg:
325 url = join(url, a)
325 url = join(url, a)
326 return url
326 return url
327
327
328 def hexsha1(data):
328 def hexsha1(data):
329 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
329 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
330 object data"""
330 object data"""
331 h = util.sha1()
331 h = util.sha1()
332 for chunk in util.filechunkiter(data):
332 for chunk in util.filechunkiter(data):
333 h.update(chunk)
333 h.update(chunk)
334 return h.hexdigest()
334 return h.hexdigest()
335
335
336 def httpsendfile(ui, filename):
336 def httpsendfile(ui, filename):
337 return httpconnection.httpsendfile(ui, filename, 'rb')
337 return httpconnection.httpsendfile(ui, filename, 'rb')
338
338
339 def unixpath(path):
339 def unixpath(path):
340 '''Return a version of path normalized for use with the lfdirstate.'''
340 '''Return a version of path normalized for use with the lfdirstate.'''
341 return util.pconvert(os.path.normpath(path))
341 return util.pconvert(os.path.normpath(path))
342
342
343 def islfilesrepo(repo):
343 def islfilesrepo(repo):
344 if ('largefiles' in repo.requirements and
344 if ('largefiles' in repo.requirements and
345 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
345 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
346 return True
346 return True
347
347
348 return util.any(openlfdirstate(repo.ui, repo, False))
348 return util.any(openlfdirstate(repo.ui, repo, False))
349
349
350 class storeprotonotcapable(Exception):
350 class storeprotonotcapable(Exception):
351 def __init__(self, storetypes):
351 def __init__(self, storetypes):
352 self.storetypes = storetypes
352 self.storetypes = storetypes
353
353
354 def getstandinsstate(repo):
354 def getstandinsstate(repo):
355 standins = []
355 standins = []
356 matcher = getstandinmatcher(repo)
356 matcher = getstandinmatcher(repo)
357 for standin in repo.dirstate.walk(matcher, [], False, False):
357 for standin in repo.dirstate.walk(matcher, [], False, False):
358 lfile = splitstandin(standin)
358 lfile = splitstandin(standin)
359 try:
359 try:
360 hash = readstandin(repo, lfile)
360 hash = readstandin(repo, lfile)
361 except IOError:
361 except IOError:
362 hash = None
362 hash = None
363 standins.append((lfile, hash))
363 standins.append((lfile, hash))
364 return standins
364 return standins
365
365
366 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
366 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
367 lfstandin = standin(lfile)
367 lfstandin = standin(lfile)
368 if lfstandin in repo.dirstate:
368 if lfstandin in repo.dirstate:
369 stat = repo.dirstate._map[lfstandin]
369 stat = repo.dirstate._map[lfstandin]
370 state, mtime = stat[0], stat[3]
370 state, mtime = stat[0], stat[3]
371 else:
371 else:
372 state, mtime = '?', -1
372 state, mtime = '?', -1
373 if state == 'n':
373 if state == 'n':
374 if normallookup or mtime < 0:
374 if normallookup or mtime < 0:
375 # state 'n' doesn't ensure 'clean' in this case
375 # state 'n' doesn't ensure 'clean' in this case
376 lfdirstate.normallookup(lfile)
376 lfdirstate.normallookup(lfile)
377 else:
377 else:
378 lfdirstate.normal(lfile)
378 lfdirstate.normal(lfile)
379 elif state == 'm':
379 elif state == 'm':
380 lfdirstate.normallookup(lfile)
380 lfdirstate.normallookup(lfile)
381 elif state == 'r':
381 elif state == 'r':
382 lfdirstate.remove(lfile)
382 lfdirstate.remove(lfile)
383 elif state == 'a':
383 elif state == 'a':
384 lfdirstate.add(lfile)
384 lfdirstate.add(lfile)
385 elif state == '?':
385 elif state == '?':
386 lfdirstate.drop(lfile)
386 lfdirstate.drop(lfile)
387
387
388 def getlfilestoupdate(oldstandins, newstandins):
388 def getlfilestoupdate(oldstandins, newstandins):
389 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
389 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
390 filelist = []
390 filelist = []
391 for f in changedstandins:
391 for f in changedstandins:
392 if f[0] not in filelist:
392 if f[0] not in filelist:
393 filelist.append(f[0])
393 filelist.append(f[0])
394 return filelist
394 return filelist
395
395
396 def getlfilestoupload(repo, missing, addfunc):
396 def getlfilestoupload(repo, missing, addfunc):
397 for n in missing:
397 for n in missing:
398 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
398 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
399 ctx = repo[n]
399 ctx = repo[n]
400 files = set(ctx.files())
400 files = set(ctx.files())
401 if len(parents) == 2:
401 if len(parents) == 2:
402 mc = ctx.manifest()
402 mc = ctx.manifest()
403 mp1 = ctx.parents()[0].manifest()
403 mp1 = ctx.parents()[0].manifest()
404 mp2 = ctx.parents()[1].manifest()
404 mp2 = ctx.parents()[1].manifest()
405 for f in mp1:
405 for f in mp1:
406 if f not in mc:
406 if f not in mc:
407 files.add(f)
407 files.add(f)
408 for f in mp2:
408 for f in mp2:
409 if f not in mc:
409 if f not in mc:
410 files.add(f)
410 files.add(f)
411 for f in mc:
411 for f in mc:
412 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
412 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
413 files.add(f)
413 files.add(f)
414 for fn in files:
414 for fn in files:
415 if isstandin(fn) and fn in ctx:
415 if isstandin(fn) and fn in ctx:
416 addfunc(fn, ctx[fn].data().strip())
416 addfunc(fn, ctx[fn].data().strip())
General Comments 0
You need to be logged in to leave comments. Login now