##// END OF EJS Templates
largefiles: fix profile of unused largefilesdirstate._ignore
Mads Kiilerich -
r21085:66c6da0b default
parent child Browse files
Show More
@@ -1,390 +1,390 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node
18 from mercurial import node
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
43 try:
43 try:
44 util.oslink(src, dest)
44 util.oslink(src, dest)
45 except OSError:
45 except OSError:
46 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
49 dst.write(chunk)
50 dst.close()
50 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
52
52
53 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
55 if path:
55 if path:
56 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
57 else:
57 else:
58 if os.name == 'nt':
58 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
60 if appdata:
61 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
63 home = os.getenv('HOME')
64 if home:
64 if home:
65 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
66 longname, hash)
67 elif os.name == 'posix':
67 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
69 if path:
69 if path:
70 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
71 else:
71 else:
72 home = os.getenv('HOME')
72 home = os.getenv('HOME')
73 if home:
73 if home:
74 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
75 else:
75 else:
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
77 return path
78
78
79 def inusercache(ui, hash):
79 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
81 return path and os.path.exists(path)
82
82
83 def findfile(repo, hash):
83 def findfile(repo, hash):
84 if instore(repo, hash):
84 if instore(repo, hash):
85 repo.ui.note(_('found %s in store\n') % hash)
85 repo.ui.note(_('found %s in store\n') % hash)
86 return storepath(repo, hash)
86 return storepath(repo, hash)
87 elif inusercache(repo.ui, hash):
87 elif inusercache(repo.ui, hash):
88 repo.ui.note(_('found %s in system cache\n') % hash)
88 repo.ui.note(_('found %s in system cache\n') % hash)
89 path = storepath(repo, hash)
89 path = storepath(repo, hash)
90 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
91 return path
91 return path
92 return None
92 return None
93
93
94 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
97 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
99 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
101 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
103 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
105 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
107 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self):
109 def _ignore(self, f):
110 return False
110 return False
111
111
112 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
113 '''
113 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
116 '''
117 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
120 repo.dirstate._validate)
121
121
122 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 util.makedirs(lfstoredir)
126 util.makedirs(lfstoredir)
127 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
128 for standin in repo.dirstate.walk(matcher, [], False, False):
128 for standin in repo.dirstate.walk(matcher, [], False, False):
129 lfile = splitstandin(standin)
129 lfile = splitstandin(standin)
130 lfdirstate.normallookup(lfile)
130 lfdirstate.normallookup(lfile)
131 return lfdirstate
131 return lfdirstate
132
132
133 def lfdirstatestatus(lfdirstate, repo, rev):
133 def lfdirstatestatus(lfdirstate, repo, rev):
134 match = match_.always(repo.root, repo.getcwd())
134 match = match_.always(repo.root, repo.getcwd())
135 s = lfdirstate.status(match, [], False, False, False)
135 s = lfdirstate.status(match, [], False, False, False)
136 unsure, modified, added, removed, missing, unknown, ignored, clean = s
136 unsure, modified, added, removed, missing, unknown, ignored, clean = s
137 for lfile in unsure:
137 for lfile in unsure:
138 try:
138 try:
139 fctx = repo[rev][standin(lfile)]
139 fctx = repo[rev][standin(lfile)]
140 except LookupError:
140 except LookupError:
141 fctx = None
141 fctx = None
142 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
142 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
143 modified.append(lfile)
143 modified.append(lfile)
144 else:
144 else:
145 clean.append(lfile)
145 clean.append(lfile)
146 lfdirstate.normal(lfile)
146 lfdirstate.normal(lfile)
147 return (modified, added, removed, missing, unknown, ignored, clean)
147 return (modified, added, removed, missing, unknown, ignored, clean)
148
148
149 def listlfiles(repo, rev=None, matcher=None):
149 def listlfiles(repo, rev=None, matcher=None):
150 '''return a list of largefiles in the working copy or the
150 '''return a list of largefiles in the working copy or the
151 specified changeset'''
151 specified changeset'''
152
152
153 if matcher is None:
153 if matcher is None:
154 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
155
155
156 # ignore unknown files in working directory
156 # ignore unknown files in working directory
157 return [splitstandin(f)
157 return [splitstandin(f)
158 for f in repo[rev].walk(matcher)
158 for f in repo[rev].walk(matcher)
159 if rev is not None or repo.dirstate[f] != '?']
159 if rev is not None or repo.dirstate[f] != '?']
160
160
161 def instore(repo, hash):
161 def instore(repo, hash):
162 return os.path.exists(storepath(repo, hash))
162 return os.path.exists(storepath(repo, hash))
163
163
164 def storepath(repo, hash):
164 def storepath(repo, hash):
165 return repo.join(os.path.join(longname, hash))
165 return repo.join(os.path.join(longname, hash))
166
166
167 def copyfromcache(repo, hash, filename):
167 def copyfromcache(repo, hash, filename):
168 '''Copy the specified largefile from the repo or system cache to
168 '''Copy the specified largefile from the repo or system cache to
169 filename in the repository. Return true on success or false if the
169 filename in the repository. Return true on success or false if the
170 file was not found in either cache (which should not happened:
170 file was not found in either cache (which should not happened:
171 this is meant to be called only after ensuring that the needed
171 this is meant to be called only after ensuring that the needed
172 largefile exists in the cache).'''
172 largefile exists in the cache).'''
173 path = findfile(repo, hash)
173 path = findfile(repo, hash)
174 if path is None:
174 if path is None:
175 return False
175 return False
176 util.makedirs(os.path.dirname(repo.wjoin(filename)))
176 util.makedirs(os.path.dirname(repo.wjoin(filename)))
177 # The write may fail before the file is fully written, but we
177 # The write may fail before the file is fully written, but we
178 # don't use atomic writes in the working copy.
178 # don't use atomic writes in the working copy.
179 shutil.copy(path, repo.wjoin(filename))
179 shutil.copy(path, repo.wjoin(filename))
180 return True
180 return True
181
181
182 def copytostore(repo, rev, file, uploaded=False):
182 def copytostore(repo, rev, file, uploaded=False):
183 hash = readstandin(repo, file, rev)
183 hash = readstandin(repo, file, rev)
184 if instore(repo, hash):
184 if instore(repo, hash):
185 return
185 return
186 copytostoreabsolute(repo, repo.wjoin(file), hash)
186 copytostoreabsolute(repo, repo.wjoin(file), hash)
187
187
188 def copyalltostore(repo, node):
188 def copyalltostore(repo, node):
189 '''Copy all largefiles in a given revision to the store'''
189 '''Copy all largefiles in a given revision to the store'''
190
190
191 ctx = repo[node]
191 ctx = repo[node]
192 for filename in ctx.files():
192 for filename in ctx.files():
193 if isstandin(filename) and filename in ctx.manifest():
193 if isstandin(filename) and filename in ctx.manifest():
194 realfile = splitstandin(filename)
194 realfile = splitstandin(filename)
195 copytostore(repo, ctx.node(), realfile)
195 copytostore(repo, ctx.node(), realfile)
196
196
197
197
198 def copytostoreabsolute(repo, file, hash):
198 def copytostoreabsolute(repo, file, hash):
199 if inusercache(repo.ui, hash):
199 if inusercache(repo.ui, hash):
200 link(usercachepath(repo.ui, hash), storepath(repo, hash))
200 link(usercachepath(repo.ui, hash), storepath(repo, hash))
201 elif not getattr(repo, "_isconverting", False):
201 elif not getattr(repo, "_isconverting", False):
202 util.makedirs(os.path.dirname(storepath(repo, hash)))
202 util.makedirs(os.path.dirname(storepath(repo, hash)))
203 dst = util.atomictempfile(storepath(repo, hash),
203 dst = util.atomictempfile(storepath(repo, hash),
204 createmode=repo.store.createmode)
204 createmode=repo.store.createmode)
205 for chunk in util.filechunkiter(open(file, 'rb')):
205 for chunk in util.filechunkiter(open(file, 'rb')):
206 dst.write(chunk)
206 dst.write(chunk)
207 dst.close()
207 dst.close()
208 linktousercache(repo, hash)
208 linktousercache(repo, hash)
209
209
210 def linktousercache(repo, hash):
210 def linktousercache(repo, hash):
211 path = usercachepath(repo.ui, hash)
211 path = usercachepath(repo.ui, hash)
212 if path:
212 if path:
213 link(storepath(repo, hash), path)
213 link(storepath(repo, hash), path)
214
214
215 def getstandinmatcher(repo, pats=[], opts={}):
215 def getstandinmatcher(repo, pats=[], opts={}):
216 '''Return a match object that applies pats to the standin directory'''
216 '''Return a match object that applies pats to the standin directory'''
217 standindir = repo.wjoin(shortname)
217 standindir = repo.wjoin(shortname)
218 if pats:
218 if pats:
219 pats = [os.path.join(standindir, pat) for pat in pats]
219 pats = [os.path.join(standindir, pat) for pat in pats]
220 else:
220 else:
221 # no patterns: relative to repo root
221 # no patterns: relative to repo root
222 pats = [standindir]
222 pats = [standindir]
223 # no warnings about missing files or directories
223 # no warnings about missing files or directories
224 match = scmutil.match(repo[None], pats, opts)
224 match = scmutil.match(repo[None], pats, opts)
225 match.bad = lambda f, msg: None
225 match.bad = lambda f, msg: None
226 return match
226 return match
227
227
228 def composestandinmatcher(repo, rmatcher):
228 def composestandinmatcher(repo, rmatcher):
229 '''Return a matcher that accepts standins corresponding to the
229 '''Return a matcher that accepts standins corresponding to the
230 files accepted by rmatcher. Pass the list of files in the matcher
230 files accepted by rmatcher. Pass the list of files in the matcher
231 as the paths specified by the user.'''
231 as the paths specified by the user.'''
232 smatcher = getstandinmatcher(repo, rmatcher.files())
232 smatcher = getstandinmatcher(repo, rmatcher.files())
233 isstandin = smatcher.matchfn
233 isstandin = smatcher.matchfn
234 def composedmatchfn(f):
234 def composedmatchfn(f):
235 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
235 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
236 smatcher.matchfn = composedmatchfn
236 smatcher.matchfn = composedmatchfn
237
237
238 return smatcher
238 return smatcher
239
239
240 def standin(filename):
240 def standin(filename):
241 '''Return the repo-relative path to the standin for the specified big
241 '''Return the repo-relative path to the standin for the specified big
242 file.'''
242 file.'''
243 # Notes:
243 # Notes:
244 # 1) Some callers want an absolute path, but for instance addlargefiles
244 # 1) Some callers want an absolute path, but for instance addlargefiles
245 # needs it repo-relative so it can be passed to repo[None].add(). So
245 # needs it repo-relative so it can be passed to repo[None].add(). So
246 # leave it up to the caller to use repo.wjoin() to get an absolute path.
246 # leave it up to the caller to use repo.wjoin() to get an absolute path.
247 # 2) Join with '/' because that's what dirstate always uses, even on
247 # 2) Join with '/' because that's what dirstate always uses, even on
248 # Windows. Change existing separator to '/' first in case we are
248 # Windows. Change existing separator to '/' first in case we are
249 # passed filenames from an external source (like the command line).
249 # passed filenames from an external source (like the command line).
250 return shortnameslash + util.pconvert(filename)
250 return shortnameslash + util.pconvert(filename)
251
251
252 def isstandin(filename):
252 def isstandin(filename):
253 '''Return true if filename is a big file standin. filename must be
253 '''Return true if filename is a big file standin. filename must be
254 in Mercurial's internal form (slash-separated).'''
254 in Mercurial's internal form (slash-separated).'''
255 return filename.startswith(shortnameslash)
255 return filename.startswith(shortnameslash)
256
256
257 def splitstandin(filename):
257 def splitstandin(filename):
258 # Split on / because that's what dirstate always uses, even on Windows.
258 # Split on / because that's what dirstate always uses, even on Windows.
259 # Change local separator to / first just in case we are passed filenames
259 # Change local separator to / first just in case we are passed filenames
260 # from an external source (like the command line).
260 # from an external source (like the command line).
261 bits = util.pconvert(filename).split('/', 1)
261 bits = util.pconvert(filename).split('/', 1)
262 if len(bits) == 2 and bits[0] == shortname:
262 if len(bits) == 2 and bits[0] == shortname:
263 return bits[1]
263 return bits[1]
264 else:
264 else:
265 return None
265 return None
266
266
267 def updatestandin(repo, standin):
267 def updatestandin(repo, standin):
268 file = repo.wjoin(splitstandin(standin))
268 file = repo.wjoin(splitstandin(standin))
269 if os.path.exists(file):
269 if os.path.exists(file):
270 hash = hashfile(file)
270 hash = hashfile(file)
271 executable = getexecutable(file)
271 executable = getexecutable(file)
272 writestandin(repo, standin, hash, executable)
272 writestandin(repo, standin, hash, executable)
273
273
274 def readstandin(repo, filename, node=None):
274 def readstandin(repo, filename, node=None):
275 '''read hex hash from standin for filename at given node, or working
275 '''read hex hash from standin for filename at given node, or working
276 directory if no node is given'''
276 directory if no node is given'''
277 return repo[node][standin(filename)].data().strip()
277 return repo[node][standin(filename)].data().strip()
278
278
279 def writestandin(repo, standin, hash, executable):
279 def writestandin(repo, standin, hash, executable):
280 '''write hash to <repo.root>/<standin>'''
280 '''write hash to <repo.root>/<standin>'''
281 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
281 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
282
282
283 def copyandhash(instream, outfile):
283 def copyandhash(instream, outfile):
284 '''Read bytes from instream (iterable) and write them to outfile,
284 '''Read bytes from instream (iterable) and write them to outfile,
285 computing the SHA-1 hash of the data along the way. Return the hash.'''
285 computing the SHA-1 hash of the data along the way. Return the hash.'''
286 hasher = util.sha1('')
286 hasher = util.sha1('')
287 for data in instream:
287 for data in instream:
288 hasher.update(data)
288 hasher.update(data)
289 outfile.write(data)
289 outfile.write(data)
290 return hasher.hexdigest()
290 return hasher.hexdigest()
291
291
292 def hashrepofile(repo, file):
292 def hashrepofile(repo, file):
293 return hashfile(repo.wjoin(file))
293 return hashfile(repo.wjoin(file))
294
294
295 def hashfile(file):
295 def hashfile(file):
296 if not os.path.exists(file):
296 if not os.path.exists(file):
297 return ''
297 return ''
298 hasher = util.sha1('')
298 hasher = util.sha1('')
299 fd = open(file, 'rb')
299 fd = open(file, 'rb')
300 for data in util.filechunkiter(fd, 128 * 1024):
300 for data in util.filechunkiter(fd, 128 * 1024):
301 hasher.update(data)
301 hasher.update(data)
302 fd.close()
302 fd.close()
303 return hasher.hexdigest()
303 return hasher.hexdigest()
304
304
305 def getexecutable(filename):
305 def getexecutable(filename):
306 mode = os.stat(filename).st_mode
306 mode = os.stat(filename).st_mode
307 return ((mode & stat.S_IXUSR) and
307 return ((mode & stat.S_IXUSR) and
308 (mode & stat.S_IXGRP) and
308 (mode & stat.S_IXGRP) and
309 (mode & stat.S_IXOTH))
309 (mode & stat.S_IXOTH))
310
310
311 def urljoin(first, second, *arg):
311 def urljoin(first, second, *arg):
312 def join(left, right):
312 def join(left, right):
313 if not left.endswith('/'):
313 if not left.endswith('/'):
314 left += '/'
314 left += '/'
315 if right.startswith('/'):
315 if right.startswith('/'):
316 right = right[1:]
316 right = right[1:]
317 return left + right
317 return left + right
318
318
319 url = join(first, second)
319 url = join(first, second)
320 for a in arg:
320 for a in arg:
321 url = join(url, a)
321 url = join(url, a)
322 return url
322 return url
323
323
324 def hexsha1(data):
324 def hexsha1(data):
325 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
325 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
326 object data"""
326 object data"""
327 h = util.sha1()
327 h = util.sha1()
328 for chunk in util.filechunkiter(data):
328 for chunk in util.filechunkiter(data):
329 h.update(chunk)
329 h.update(chunk)
330 return h.hexdigest()
330 return h.hexdigest()
331
331
332 def httpsendfile(ui, filename):
332 def httpsendfile(ui, filename):
333 return httpconnection.httpsendfile(ui, filename, 'rb')
333 return httpconnection.httpsendfile(ui, filename, 'rb')
334
334
335 def unixpath(path):
335 def unixpath(path):
336 '''Return a version of path normalized for use with the lfdirstate.'''
336 '''Return a version of path normalized for use with the lfdirstate.'''
337 return util.pconvert(os.path.normpath(path))
337 return util.pconvert(os.path.normpath(path))
338
338
339 def islfilesrepo(repo):
339 def islfilesrepo(repo):
340 if ('largefiles' in repo.requirements and
340 if ('largefiles' in repo.requirements and
341 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
341 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
342 return True
342 return True
343
343
344 return util.any(openlfdirstate(repo.ui, repo, False))
344 return util.any(openlfdirstate(repo.ui, repo, False))
345
345
346 class storeprotonotcapable(Exception):
346 class storeprotonotcapable(Exception):
347 def __init__(self, storetypes):
347 def __init__(self, storetypes):
348 self.storetypes = storetypes
348 self.storetypes = storetypes
349
349
350 def getstandinsstate(repo):
350 def getstandinsstate(repo):
351 standins = []
351 standins = []
352 matcher = getstandinmatcher(repo)
352 matcher = getstandinmatcher(repo)
353 for standin in repo.dirstate.walk(matcher, [], False, False):
353 for standin in repo.dirstate.walk(matcher, [], False, False):
354 lfile = splitstandin(standin)
354 lfile = splitstandin(standin)
355 try:
355 try:
356 hash = readstandin(repo, lfile)
356 hash = readstandin(repo, lfile)
357 except IOError:
357 except IOError:
358 hash = None
358 hash = None
359 standins.append((lfile, hash))
359 standins.append((lfile, hash))
360 return standins
360 return standins
361
361
362 def getlfilestoupdate(oldstandins, newstandins):
362 def getlfilestoupdate(oldstandins, newstandins):
363 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
363 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
364 filelist = []
364 filelist = []
365 for f in changedstandins:
365 for f in changedstandins:
366 if f[0] not in filelist:
366 if f[0] not in filelist:
367 filelist.append(f[0])
367 filelist.append(f[0])
368 return filelist
368 return filelist
369
369
370 def getlfilestoupload(repo, missing, addfunc):
370 def getlfilestoupload(repo, missing, addfunc):
371 for n in missing:
371 for n in missing:
372 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
372 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
373 ctx = repo[n]
373 ctx = repo[n]
374 files = set(ctx.files())
374 files = set(ctx.files())
375 if len(parents) == 2:
375 if len(parents) == 2:
376 mc = ctx.manifest()
376 mc = ctx.manifest()
377 mp1 = ctx.parents()[0].manifest()
377 mp1 = ctx.parents()[0].manifest()
378 mp2 = ctx.parents()[1].manifest()
378 mp2 = ctx.parents()[1].manifest()
379 for f in mp1:
379 for f in mp1:
380 if f not in mc:
380 if f not in mc:
381 files.add(f)
381 files.add(f)
382 for f in mp2:
382 for f in mp2:
383 if f not in mc:
383 if f not in mc:
384 files.add(f)
384 files.add(f)
385 for f in mc:
385 for f in mc:
386 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
386 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
387 files.add(f)
387 files.add(f)
388 for fn in files:
388 for fn in files:
389 if isstandin(fn) and fn in ctx:
389 if isstandin(fn) and fn in ctx:
390 addfunc(fn, ctx[fn].data().strip())
390 addfunc(fn, ctx[fn].data().strip())
General Comments 0
You need to be logged in to leave comments. Login now