##// END OF EJS Templates
largefiles: centralize the logic to get outgoing largefiles...
FUJIWARA Katsunori -
r21042:32b3331f default
parent child Browse files
Show More
@@ -1,367 +1,390 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node
18
19
19 shortname = '.hglf'
20 shortname = '.hglf'
20 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
21 longname = 'largefiles'
22 longname = 'largefiles'
22
23
23
24
24 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
25
26
26 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
27 lfsize = opt
28 lfsize = opt
28 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
29 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
30 if lfsize:
31 if lfsize:
31 try:
32 try:
32 lfsize = float(lfsize)
33 lfsize = float(lfsize)
33 except ValueError:
34 except ValueError:
34 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 % lfsize)
36 % lfsize)
36 if lfsize is None:
37 if lfsize is None:
37 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 return lfsize
39 return lfsize
39
40
40 def link(src, dest):
41 def link(src, dest):
41 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
42 try:
43 try:
43 util.oslink(src, dest)
44 util.oslink(src, dest)
44 except OSError:
45 except OSError:
45 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
46 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
47 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 dst.write(chunk)
49 dst.write(chunk)
49 dst.close()
50 dst.close()
50 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
51
52
52 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
53 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
54 if path:
55 if path:
55 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
56 else:
57 else:
57 if os.name == 'nt':
58 if os.name == 'nt':
58 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 if appdata:
60 if appdata:
60 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
61 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
62 home = os.getenv('HOME')
63 home = os.getenv('HOME')
63 if home:
64 if home:
64 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
65 longname, hash)
66 longname, hash)
66 elif os.name == 'posix':
67 elif os.name == 'posix':
67 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
68 if path:
69 if path:
69 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
70 else:
71 else:
71 home = os.getenv('HOME')
72 home = os.getenv('HOME')
72 if home:
73 if home:
73 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
74 else:
75 else:
75 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 return path
77 return path
77
78
78 def inusercache(ui, hash):
79 def inusercache(ui, hash):
79 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
80 return path and os.path.exists(path)
81 return path and os.path.exists(path)
81
82
82 def findfile(repo, hash):
83 def findfile(repo, hash):
83 if instore(repo, hash):
84 if instore(repo, hash):
84 repo.ui.note(_('found %s in store\n') % hash)
85 repo.ui.note(_('found %s in store\n') % hash)
85 return storepath(repo, hash)
86 return storepath(repo, hash)
86 elif inusercache(repo.ui, hash):
87 elif inusercache(repo.ui, hash):
87 repo.ui.note(_('found %s in system cache\n') % hash)
88 repo.ui.note(_('found %s in system cache\n') % hash)
88 path = storepath(repo, hash)
89 path = storepath(repo, hash)
89 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
90 return path
91 return path
91 return None
92 return None
92
93
93 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
94 def __getitem__(self, key):
95 def __getitem__(self, key):
95 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 def normal(self, f):
97 def normal(self, f):
97 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 def remove(self, f):
99 def remove(self, f):
99 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 def add(self, f):
101 def add(self, f):
101 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
102 def drop(self, f):
103 def drop(self, f):
103 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 def forget(self, f):
105 def forget(self, f):
105 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 def normallookup(self, f):
107 def normallookup(self, f):
107 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 def _ignore(self):
109 def _ignore(self):
109 return False
110 return False
110
111
111 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
112 '''
113 '''
113 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 '''
116 '''
116 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
117 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
118 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 repo.dirstate._validate)
120 repo.dirstate._validate)
120
121
121 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
122 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
123 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
124 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 util.makedirs(lfstoredir)
126 util.makedirs(lfstoredir)
126 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
127 for standin in repo.dirstate.walk(matcher, [], False, False):
128 for standin in repo.dirstate.walk(matcher, [], False, False):
128 lfile = splitstandin(standin)
129 lfile = splitstandin(standin)
129 lfdirstate.normallookup(lfile)
130 lfdirstate.normallookup(lfile)
130 return lfdirstate
131 return lfdirstate
131
132
132 def lfdirstatestatus(lfdirstate, repo, rev):
133 def lfdirstatestatus(lfdirstate, repo, rev):
133 match = match_.always(repo.root, repo.getcwd())
134 match = match_.always(repo.root, repo.getcwd())
134 s = lfdirstate.status(match, [], False, False, False)
135 s = lfdirstate.status(match, [], False, False, False)
135 unsure, modified, added, removed, missing, unknown, ignored, clean = s
136 unsure, modified, added, removed, missing, unknown, ignored, clean = s
136 for lfile in unsure:
137 for lfile in unsure:
137 try:
138 try:
138 fctx = repo[rev][standin(lfile)]
139 fctx = repo[rev][standin(lfile)]
139 except LookupError:
140 except LookupError:
140 fctx = None
141 fctx = None
141 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
142 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
142 modified.append(lfile)
143 modified.append(lfile)
143 else:
144 else:
144 clean.append(lfile)
145 clean.append(lfile)
145 lfdirstate.normal(lfile)
146 lfdirstate.normal(lfile)
146 return (modified, added, removed, missing, unknown, ignored, clean)
147 return (modified, added, removed, missing, unknown, ignored, clean)
147
148
148 def listlfiles(repo, rev=None, matcher=None):
149 def listlfiles(repo, rev=None, matcher=None):
149 '''return a list of largefiles in the working copy or the
150 '''return a list of largefiles in the working copy or the
150 specified changeset'''
151 specified changeset'''
151
152
152 if matcher is None:
153 if matcher is None:
153 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
154
155
155 # ignore unknown files in working directory
156 # ignore unknown files in working directory
156 return [splitstandin(f)
157 return [splitstandin(f)
157 for f in repo[rev].walk(matcher)
158 for f in repo[rev].walk(matcher)
158 if rev is not None or repo.dirstate[f] != '?']
159 if rev is not None or repo.dirstate[f] != '?']
159
160
160 def instore(repo, hash):
161 def instore(repo, hash):
161 return os.path.exists(storepath(repo, hash))
162 return os.path.exists(storepath(repo, hash))
162
163
163 def storepath(repo, hash):
164 def storepath(repo, hash):
164 return repo.join(os.path.join(longname, hash))
165 return repo.join(os.path.join(longname, hash))
165
166
166 def copyfromcache(repo, hash, filename):
167 def copyfromcache(repo, hash, filename):
167 '''Copy the specified largefile from the repo or system cache to
168 '''Copy the specified largefile from the repo or system cache to
168 filename in the repository. Return true on success or false if the
169 filename in the repository. Return true on success or false if the
169 file was not found in either cache (which should not happened:
170 file was not found in either cache (which should not happened:
170 this is meant to be called only after ensuring that the needed
171 this is meant to be called only after ensuring that the needed
171 largefile exists in the cache).'''
172 largefile exists in the cache).'''
172 path = findfile(repo, hash)
173 path = findfile(repo, hash)
173 if path is None:
174 if path is None:
174 return False
175 return False
175 util.makedirs(os.path.dirname(repo.wjoin(filename)))
176 util.makedirs(os.path.dirname(repo.wjoin(filename)))
176 # The write may fail before the file is fully written, but we
177 # The write may fail before the file is fully written, but we
177 # don't use atomic writes in the working copy.
178 # don't use atomic writes in the working copy.
178 shutil.copy(path, repo.wjoin(filename))
179 shutil.copy(path, repo.wjoin(filename))
179 return True
180 return True
180
181
181 def copytostore(repo, rev, file, uploaded=False):
182 def copytostore(repo, rev, file, uploaded=False):
182 hash = readstandin(repo, file, rev)
183 hash = readstandin(repo, file, rev)
183 if instore(repo, hash):
184 if instore(repo, hash):
184 return
185 return
185 copytostoreabsolute(repo, repo.wjoin(file), hash)
186 copytostoreabsolute(repo, repo.wjoin(file), hash)
186
187
187 def copyalltostore(repo, node):
188 def copyalltostore(repo, node):
188 '''Copy all largefiles in a given revision to the store'''
189 '''Copy all largefiles in a given revision to the store'''
189
190
190 ctx = repo[node]
191 ctx = repo[node]
191 for filename in ctx.files():
192 for filename in ctx.files():
192 if isstandin(filename) and filename in ctx.manifest():
193 if isstandin(filename) and filename in ctx.manifest():
193 realfile = splitstandin(filename)
194 realfile = splitstandin(filename)
194 copytostore(repo, ctx.node(), realfile)
195 copytostore(repo, ctx.node(), realfile)
195
196
196
197
197 def copytostoreabsolute(repo, file, hash):
198 def copytostoreabsolute(repo, file, hash):
198 if inusercache(repo.ui, hash):
199 if inusercache(repo.ui, hash):
199 link(usercachepath(repo.ui, hash), storepath(repo, hash))
200 link(usercachepath(repo.ui, hash), storepath(repo, hash))
200 elif not getattr(repo, "_isconverting", False):
201 elif not getattr(repo, "_isconverting", False):
201 util.makedirs(os.path.dirname(storepath(repo, hash)))
202 util.makedirs(os.path.dirname(storepath(repo, hash)))
202 dst = util.atomictempfile(storepath(repo, hash),
203 dst = util.atomictempfile(storepath(repo, hash),
203 createmode=repo.store.createmode)
204 createmode=repo.store.createmode)
204 for chunk in util.filechunkiter(open(file, 'rb')):
205 for chunk in util.filechunkiter(open(file, 'rb')):
205 dst.write(chunk)
206 dst.write(chunk)
206 dst.close()
207 dst.close()
207 linktousercache(repo, hash)
208 linktousercache(repo, hash)
208
209
209 def linktousercache(repo, hash):
210 def linktousercache(repo, hash):
210 path = usercachepath(repo.ui, hash)
211 path = usercachepath(repo.ui, hash)
211 if path:
212 if path:
212 link(storepath(repo, hash), path)
213 link(storepath(repo, hash), path)
213
214
214 def getstandinmatcher(repo, pats=[], opts={}):
215 def getstandinmatcher(repo, pats=[], opts={}):
215 '''Return a match object that applies pats to the standin directory'''
216 '''Return a match object that applies pats to the standin directory'''
216 standindir = repo.wjoin(shortname)
217 standindir = repo.wjoin(shortname)
217 if pats:
218 if pats:
218 pats = [os.path.join(standindir, pat) for pat in pats]
219 pats = [os.path.join(standindir, pat) for pat in pats]
219 else:
220 else:
220 # no patterns: relative to repo root
221 # no patterns: relative to repo root
221 pats = [standindir]
222 pats = [standindir]
222 # no warnings about missing files or directories
223 # no warnings about missing files or directories
223 match = scmutil.match(repo[None], pats, opts)
224 match = scmutil.match(repo[None], pats, opts)
224 match.bad = lambda f, msg: None
225 match.bad = lambda f, msg: None
225 return match
226 return match
226
227
227 def composestandinmatcher(repo, rmatcher):
228 def composestandinmatcher(repo, rmatcher):
228 '''Return a matcher that accepts standins corresponding to the
229 '''Return a matcher that accepts standins corresponding to the
229 files accepted by rmatcher. Pass the list of files in the matcher
230 files accepted by rmatcher. Pass the list of files in the matcher
230 as the paths specified by the user.'''
231 as the paths specified by the user.'''
231 smatcher = getstandinmatcher(repo, rmatcher.files())
232 smatcher = getstandinmatcher(repo, rmatcher.files())
232 isstandin = smatcher.matchfn
233 isstandin = smatcher.matchfn
233 def composedmatchfn(f):
234 def composedmatchfn(f):
234 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
235 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
235 smatcher.matchfn = composedmatchfn
236 smatcher.matchfn = composedmatchfn
236
237
237 return smatcher
238 return smatcher
238
239
239 def standin(filename):
240 def standin(filename):
240 '''Return the repo-relative path to the standin for the specified big
241 '''Return the repo-relative path to the standin for the specified big
241 file.'''
242 file.'''
242 # Notes:
243 # Notes:
243 # 1) Some callers want an absolute path, but for instance addlargefiles
244 # 1) Some callers want an absolute path, but for instance addlargefiles
244 # needs it repo-relative so it can be passed to repo[None].add(). So
245 # needs it repo-relative so it can be passed to repo[None].add(). So
245 # leave it up to the caller to use repo.wjoin() to get an absolute path.
246 # leave it up to the caller to use repo.wjoin() to get an absolute path.
246 # 2) Join with '/' because that's what dirstate always uses, even on
247 # 2) Join with '/' because that's what dirstate always uses, even on
247 # Windows. Change existing separator to '/' first in case we are
248 # Windows. Change existing separator to '/' first in case we are
248 # passed filenames from an external source (like the command line).
249 # passed filenames from an external source (like the command line).
249 return shortnameslash + util.pconvert(filename)
250 return shortnameslash + util.pconvert(filename)
250
251
251 def isstandin(filename):
252 def isstandin(filename):
252 '''Return true if filename is a big file standin. filename must be
253 '''Return true if filename is a big file standin. filename must be
253 in Mercurial's internal form (slash-separated).'''
254 in Mercurial's internal form (slash-separated).'''
254 return filename.startswith(shortnameslash)
255 return filename.startswith(shortnameslash)
255
256
256 def splitstandin(filename):
257 def splitstandin(filename):
257 # Split on / because that's what dirstate always uses, even on Windows.
258 # Split on / because that's what dirstate always uses, even on Windows.
258 # Change local separator to / first just in case we are passed filenames
259 # Change local separator to / first just in case we are passed filenames
259 # from an external source (like the command line).
260 # from an external source (like the command line).
260 bits = util.pconvert(filename).split('/', 1)
261 bits = util.pconvert(filename).split('/', 1)
261 if len(bits) == 2 and bits[0] == shortname:
262 if len(bits) == 2 and bits[0] == shortname:
262 return bits[1]
263 return bits[1]
263 else:
264 else:
264 return None
265 return None
265
266
266 def updatestandin(repo, standin):
267 def updatestandin(repo, standin):
267 file = repo.wjoin(splitstandin(standin))
268 file = repo.wjoin(splitstandin(standin))
268 if os.path.exists(file):
269 if os.path.exists(file):
269 hash = hashfile(file)
270 hash = hashfile(file)
270 executable = getexecutable(file)
271 executable = getexecutable(file)
271 writestandin(repo, standin, hash, executable)
272 writestandin(repo, standin, hash, executable)
272
273
273 def readstandin(repo, filename, node=None):
274 def readstandin(repo, filename, node=None):
274 '''read hex hash from standin for filename at given node, or working
275 '''read hex hash from standin for filename at given node, or working
275 directory if no node is given'''
276 directory if no node is given'''
276 return repo[node][standin(filename)].data().strip()
277 return repo[node][standin(filename)].data().strip()
277
278
278 def writestandin(repo, standin, hash, executable):
279 def writestandin(repo, standin, hash, executable):
279 '''write hash to <repo.root>/<standin>'''
280 '''write hash to <repo.root>/<standin>'''
280 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
281 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
281
282
282 def copyandhash(instream, outfile):
283 def copyandhash(instream, outfile):
283 '''Read bytes from instream (iterable) and write them to outfile,
284 '''Read bytes from instream (iterable) and write them to outfile,
284 computing the SHA-1 hash of the data along the way. Return the hash.'''
285 computing the SHA-1 hash of the data along the way. Return the hash.'''
285 hasher = util.sha1('')
286 hasher = util.sha1('')
286 for data in instream:
287 for data in instream:
287 hasher.update(data)
288 hasher.update(data)
288 outfile.write(data)
289 outfile.write(data)
289 return hasher.hexdigest()
290 return hasher.hexdigest()
290
291
291 def hashrepofile(repo, file):
292 def hashrepofile(repo, file):
292 return hashfile(repo.wjoin(file))
293 return hashfile(repo.wjoin(file))
293
294
294 def hashfile(file):
295 def hashfile(file):
295 if not os.path.exists(file):
296 if not os.path.exists(file):
296 return ''
297 return ''
297 hasher = util.sha1('')
298 hasher = util.sha1('')
298 fd = open(file, 'rb')
299 fd = open(file, 'rb')
299 for data in util.filechunkiter(fd, 128 * 1024):
300 for data in util.filechunkiter(fd, 128 * 1024):
300 hasher.update(data)
301 hasher.update(data)
301 fd.close()
302 fd.close()
302 return hasher.hexdigest()
303 return hasher.hexdigest()
303
304
304 def getexecutable(filename):
305 def getexecutable(filename):
305 mode = os.stat(filename).st_mode
306 mode = os.stat(filename).st_mode
306 return ((mode & stat.S_IXUSR) and
307 return ((mode & stat.S_IXUSR) and
307 (mode & stat.S_IXGRP) and
308 (mode & stat.S_IXGRP) and
308 (mode & stat.S_IXOTH))
309 (mode & stat.S_IXOTH))
309
310
310 def urljoin(first, second, *arg):
311 def urljoin(first, second, *arg):
311 def join(left, right):
312 def join(left, right):
312 if not left.endswith('/'):
313 if not left.endswith('/'):
313 left += '/'
314 left += '/'
314 if right.startswith('/'):
315 if right.startswith('/'):
315 right = right[1:]
316 right = right[1:]
316 return left + right
317 return left + right
317
318
318 url = join(first, second)
319 url = join(first, second)
319 for a in arg:
320 for a in arg:
320 url = join(url, a)
321 url = join(url, a)
321 return url
322 return url
322
323
323 def hexsha1(data):
324 def hexsha1(data):
324 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
325 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
325 object data"""
326 object data"""
326 h = util.sha1()
327 h = util.sha1()
327 for chunk in util.filechunkiter(data):
328 for chunk in util.filechunkiter(data):
328 h.update(chunk)
329 h.update(chunk)
329 return h.hexdigest()
330 return h.hexdigest()
330
331
331 def httpsendfile(ui, filename):
332 def httpsendfile(ui, filename):
332 return httpconnection.httpsendfile(ui, filename, 'rb')
333 return httpconnection.httpsendfile(ui, filename, 'rb')
333
334
334 def unixpath(path):
335 def unixpath(path):
335 '''Return a version of path normalized for use with the lfdirstate.'''
336 '''Return a version of path normalized for use with the lfdirstate.'''
336 return util.pconvert(os.path.normpath(path))
337 return util.pconvert(os.path.normpath(path))
337
338
338 def islfilesrepo(repo):
339 def islfilesrepo(repo):
339 if ('largefiles' in repo.requirements and
340 if ('largefiles' in repo.requirements and
340 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
341 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
341 return True
342 return True
342
343
343 return util.any(openlfdirstate(repo.ui, repo, False))
344 return util.any(openlfdirstate(repo.ui, repo, False))
344
345
345 class storeprotonotcapable(Exception):
346 class storeprotonotcapable(Exception):
346 def __init__(self, storetypes):
347 def __init__(self, storetypes):
347 self.storetypes = storetypes
348 self.storetypes = storetypes
348
349
349 def getstandinsstate(repo):
350 def getstandinsstate(repo):
350 standins = []
351 standins = []
351 matcher = getstandinmatcher(repo)
352 matcher = getstandinmatcher(repo)
352 for standin in repo.dirstate.walk(matcher, [], False, False):
353 for standin in repo.dirstate.walk(matcher, [], False, False):
353 lfile = splitstandin(standin)
354 lfile = splitstandin(standin)
354 try:
355 try:
355 hash = readstandin(repo, lfile)
356 hash = readstandin(repo, lfile)
356 except IOError:
357 except IOError:
357 hash = None
358 hash = None
358 standins.append((lfile, hash))
359 standins.append((lfile, hash))
359 return standins
360 return standins
360
361
361 def getlfilestoupdate(oldstandins, newstandins):
362 def getlfilestoupdate(oldstandins, newstandins):
362 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
363 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
363 filelist = []
364 filelist = []
364 for f in changedstandins:
365 for f in changedstandins:
365 if f[0] not in filelist:
366 if f[0] not in filelist:
366 filelist.append(f[0])
367 filelist.append(f[0])
367 return filelist
368 return filelist
369
370 def getlfilestoupload(repo, missing, addfunc):
371 for n in missing:
372 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
373 ctx = repo[n]
374 files = set(ctx.files())
375 if len(parents) == 2:
376 mc = ctx.manifest()
377 mp1 = ctx.parents()[0].manifest()
378 mp2 = ctx.parents()[1].manifest()
379 for f in mp1:
380 if f not in mc:
381 files.add(f)
382 for f in mp2:
383 if f not in mc:
384 files.add(f)
385 for f in mc:
386 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
387 files.add(f)
388 for fn in files:
389 if isstandin(fn) and fn in ctx:
390 addfunc(fn, ctx[fn].data().strip())
@@ -1,1196 +1,1178 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge, discovery, pathutil, revset
15 archival, error, merge, discovery, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''overrides scmutil.match so that the matcher it returns will ignore all
27 '''overrides scmutil.match so that the matcher it returns will ignore all
28 largefiles'''
28 largefiles'''
29 oldmatch = None # for the closure
29 oldmatch = None # for the closure
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
31 default='relpath'):
31 default='relpath'):
32 match = oldmatch(ctx, pats, opts, globbed, default)
32 match = oldmatch(ctx, pats, opts, globbed, default)
33 m = copy.copy(match)
33 m = copy.copy(match)
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
35 manifest)
35 manifest)
36 m._files = filter(notlfile, m._files)
36 m._files = filter(notlfile, m._files)
37 m._fmap = set(m._files)
37 m._fmap = set(m._files)
38 m._always = False
38 m._always = False
39 origmatchfn = m.matchfn
39 origmatchfn = m.matchfn
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
41 return m
41 return m
42 oldmatch = installmatchfn(overridematch)
42 oldmatch = installmatchfn(overridematch)
43
43
44 def installmatchfn(f):
44 def installmatchfn(f):
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installnormalfilesmatchfn will require n calls to
54 Note that n calls to installnormalfilesmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57
57
58 def addlargefiles(ui, repo, *pats, **opts):
58 def addlargefiles(ui, repo, *pats, **opts):
59 large = opts.pop('large', None)
59 large = opts.pop('large', None)
60 lfsize = lfutil.getminsize(
60 lfsize = lfutil.getminsize(
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
62
62
63 lfmatcher = None
63 lfmatcher = None
64 if lfutil.islfilesrepo(repo):
64 if lfutil.islfilesrepo(repo):
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
66 if lfpats:
66 if lfpats:
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
68
68
69 lfnames = []
69 lfnames = []
70 m = scmutil.match(repo[None], pats, opts)
70 m = scmutil.match(repo[None], pats, opts)
71 m.bad = lambda x, y: None
71 m.bad = lambda x, y: None
72 wctx = repo[None]
72 wctx = repo[None]
73 for f in repo.walk(m):
73 for f in repo.walk(m):
74 exact = m.exact(f)
74 exact = m.exact(f)
75 lfile = lfutil.standin(f) in wctx
75 lfile = lfutil.standin(f) in wctx
76 nfile = f in wctx
76 nfile = f in wctx
77 exists = lfile or nfile
77 exists = lfile or nfile
78
78
79 # Don't warn the user when they attempt to add a normal tracked file.
79 # Don't warn the user when they attempt to add a normal tracked file.
80 # The normal add code will do that for us.
80 # The normal add code will do that for us.
81 if exact and exists:
81 if exact and exists:
82 if lfile:
82 if lfile:
83 ui.warn(_('%s already a largefile\n') % f)
83 ui.warn(_('%s already a largefile\n') % f)
84 continue
84 continue
85
85
86 if (exact or not exists) and not lfutil.isstandin(f):
86 if (exact or not exists) and not lfutil.isstandin(f):
87 wfile = repo.wjoin(f)
87 wfile = repo.wjoin(f)
88
88
89 # In case the file was removed previously, but not committed
89 # In case the file was removed previously, but not committed
90 # (issue3507)
90 # (issue3507)
91 if not os.path.exists(wfile):
91 if not os.path.exists(wfile):
92 continue
92 continue
93
93
94 abovemin = (lfsize and
94 abovemin = (lfsize and
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
97 lfnames.append(f)
98 if ui.verbose or not exact:
98 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
100
101 bad = []
101 bad = []
102 standins = []
102 standins = []
103
103
104 # Need to lock, otherwise there could be a race condition between
104 # Need to lock, otherwise there could be a race condition between
105 # when standins are created and added to the repo.
105 # when standins are created and added to the repo.
106 wlock = repo.wlock()
106 wlock = repo.wlock()
107 try:
107 try:
108 if not opts.get('dry_run'):
108 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
110 for f in lfnames:
111 standinname = lfutil.standin(f)
111 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
112 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
114 standins.append(standinname)
115 if lfdirstate[f] == 'r':
115 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
116 lfdirstate.normallookup(f)
117 else:
117 else:
118 lfdirstate.add(f)
118 lfdirstate.add(f)
119 lfdirstate.write()
119 lfdirstate.write()
120 bad += [lfutil.splitstandin(f)
120 bad += [lfutil.splitstandin(f)
121 for f in repo[None].add(standins)
121 for f in repo[None].add(standins)
122 if f in m.files()]
122 if f in m.files()]
123 finally:
123 finally:
124 wlock.release()
124 wlock.release()
125 return bad
125 return bad
126
126
127 def removelargefiles(ui, repo, *pats, **opts):
127 def removelargefiles(ui, repo, *pats, **opts):
128 after = opts.get('after')
128 after = opts.get('after')
129 if not pats and not after:
129 if not pats and not after:
130 raise util.Abort(_('no files specified'))
130 raise util.Abort(_('no files specified'))
131 m = scmutil.match(repo[None], pats, opts)
131 m = scmutil.match(repo[None], pats, opts)
132 try:
132 try:
133 repo.lfstatus = True
133 repo.lfstatus = True
134 s = repo.status(match=m, clean=True)
134 s = repo.status(match=m, clean=True)
135 finally:
135 finally:
136 repo.lfstatus = False
136 repo.lfstatus = False
137 manifest = repo[None].manifest()
137 manifest = repo[None].manifest()
138 modified, added, deleted, clean = [[f for f in list
138 modified, added, deleted, clean = [[f for f in list
139 if lfutil.standin(f) in manifest]
139 if lfutil.standin(f) in manifest]
140 for list in [s[0], s[1], s[3], s[6]]]
140 for list in [s[0], s[1], s[3], s[6]]]
141
141
142 def warn(files, msg):
142 def warn(files, msg):
143 for f in files:
143 for f in files:
144 ui.warn(msg % m.rel(f))
144 ui.warn(msg % m.rel(f))
145 return int(len(files) > 0)
145 return int(len(files) > 0)
146
146
147 result = 0
147 result = 0
148
148
149 if after:
149 if after:
150 remove, forget = deleted, []
150 remove, forget = deleted, []
151 result = warn(modified + added + clean,
151 result = warn(modified + added + clean,
152 _('not removing %s: file still exists\n'))
152 _('not removing %s: file still exists\n'))
153 else:
153 else:
154 remove, forget = deleted + clean, []
154 remove, forget = deleted + clean, []
155 result = warn(modified, _('not removing %s: file is modified (use -f'
155 result = warn(modified, _('not removing %s: file is modified (use -f'
156 ' to force removal)\n'))
156 ' to force removal)\n'))
157 result = warn(added, _('not removing %s: file has been marked for add'
157 result = warn(added, _('not removing %s: file has been marked for add'
158 ' (use forget to undo)\n')) or result
158 ' (use forget to undo)\n')) or result
159
159
160 for f in sorted(remove + forget):
160 for f in sorted(remove + forget):
161 if ui.verbose or not m.exact(f):
161 if ui.verbose or not m.exact(f):
162 ui.status(_('removing %s\n') % m.rel(f))
162 ui.status(_('removing %s\n') % m.rel(f))
163
163
164 # Need to lock because standin files are deleted then removed from the
164 # Need to lock because standin files are deleted then removed from the
165 # repository and we could race in-between.
165 # repository and we could race in-between.
166 wlock = repo.wlock()
166 wlock = repo.wlock()
167 try:
167 try:
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
169 for f in remove:
169 for f in remove:
170 if not after:
170 if not after:
171 # If this is being called by addremove, notify the user that we
171 # If this is being called by addremove, notify the user that we
172 # are removing the file.
172 # are removing the file.
173 if getattr(repo, "_isaddremove", False):
173 if getattr(repo, "_isaddremove", False):
174 ui.status(_('removing %s\n') % f)
174 ui.status(_('removing %s\n') % f)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
176 lfdirstate.remove(f)
176 lfdirstate.remove(f)
177 lfdirstate.write()
177 lfdirstate.write()
178 forget = [lfutil.standin(f) for f in forget]
178 forget = [lfutil.standin(f) for f in forget]
179 remove = [lfutil.standin(f) for f in remove]
179 remove = [lfutil.standin(f) for f in remove]
180 repo[None].forget(forget)
180 repo[None].forget(forget)
181 # If this is being called by addremove, let the original addremove
181 # If this is being called by addremove, let the original addremove
182 # function handle this.
182 # function handle this.
183 if not getattr(repo, "_isaddremove", False):
183 if not getattr(repo, "_isaddremove", False):
184 for f in remove:
184 for f in remove:
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
186 repo[None].forget(remove)
186 repo[None].forget(remove)
187 finally:
187 finally:
188 wlock.release()
188 wlock.release()
189
189
190 return result
190 return result
191
191
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
193 # appear at their right place in the manifests.
193 # appear at their right place in the manifests.
194 def decodepath(orig, path):
194 def decodepath(orig, path):
195 return lfutil.splitstandin(path) or path
195 return lfutil.splitstandin(path) or path
196
196
197 # -- Wrappers: modify existing commands --------------------------------
197 # -- Wrappers: modify existing commands --------------------------------
198
198
199 # Add works by going through the files that the user wanted to add and
199 # Add works by going through the files that the user wanted to add and
200 # checking if they should be added as largefiles. Then it makes a new
200 # checking if they should be added as largefiles. Then it makes a new
201 # matcher which matches only the normal files and runs the original
201 # matcher which matches only the normal files and runs the original
202 # version of add.
202 # version of add.
203 def overrideadd(orig, ui, repo, *pats, **opts):
203 def overrideadd(orig, ui, repo, *pats, **opts):
204 normal = opts.pop('normal')
204 normal = opts.pop('normal')
205 if normal:
205 if normal:
206 if opts.get('large'):
206 if opts.get('large'):
207 raise util.Abort(_('--normal cannot be used with --large'))
207 raise util.Abort(_('--normal cannot be used with --large'))
208 return orig(ui, repo, *pats, **opts)
208 return orig(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
210 installnormalfilesmatchfn(repo[None].manifest())
210 installnormalfilesmatchfn(repo[None].manifest())
211 result = orig(ui, repo, *pats, **opts)
211 result = orig(ui, repo, *pats, **opts)
212 restorematchfn()
212 restorematchfn()
213
213
214 return (result == 1 or bad) and 1 or 0
214 return (result == 1 or bad) and 1 or 0
215
215
216 def overrideremove(orig, ui, repo, *pats, **opts):
216 def overrideremove(orig, ui, repo, *pats, **opts):
217 installnormalfilesmatchfn(repo[None].manifest())
217 installnormalfilesmatchfn(repo[None].manifest())
218 result = orig(ui, repo, *pats, **opts)
218 result = orig(ui, repo, *pats, **opts)
219 restorematchfn()
219 restorematchfn()
220 return removelargefiles(ui, repo, *pats, **opts) or result
220 return removelargefiles(ui, repo, *pats, **opts) or result
221
221
222 def overridestatusfn(orig, repo, rev2, **opts):
222 def overridestatusfn(orig, repo, rev2, **opts):
223 try:
223 try:
224 repo._repo.lfstatus = True
224 repo._repo.lfstatus = True
225 return orig(repo, rev2, **opts)
225 return orig(repo, rev2, **opts)
226 finally:
226 finally:
227 repo._repo.lfstatus = False
227 repo._repo.lfstatus = False
228
228
229 def overridestatus(orig, ui, repo, *pats, **opts):
229 def overridestatus(orig, ui, repo, *pats, **opts):
230 try:
230 try:
231 repo.lfstatus = True
231 repo.lfstatus = True
232 return orig(ui, repo, *pats, **opts)
232 return orig(ui, repo, *pats, **opts)
233 finally:
233 finally:
234 repo.lfstatus = False
234 repo.lfstatus = False
235
235
236 def overridedirty(orig, repo, ignoreupdate=False):
236 def overridedirty(orig, repo, ignoreupdate=False):
237 try:
237 try:
238 repo._repo.lfstatus = True
238 repo._repo.lfstatus = True
239 return orig(repo, ignoreupdate)
239 return orig(repo, ignoreupdate)
240 finally:
240 finally:
241 repo._repo.lfstatus = False
241 repo._repo.lfstatus = False
242
242
243 def overridelog(orig, ui, repo, *pats, **opts):
243 def overridelog(orig, ui, repo, *pats, **opts):
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
245 default='relpath'):
245 default='relpath'):
246 """Matcher that merges root directory with .hglf, suitable for log.
246 """Matcher that merges root directory with .hglf, suitable for log.
247 It is still possible to match .hglf directly.
247 It is still possible to match .hglf directly.
248 For any listed files run log on the standin too.
248 For any listed files run log on the standin too.
249 matchfn tries both the given filename and with .hglf stripped.
249 matchfn tries both the given filename and with .hglf stripped.
250 """
250 """
251 match = oldmatch(ctx, pats, opts, globbed, default)
251 match = oldmatch(ctx, pats, opts, globbed, default)
252 m = copy.copy(match)
252 m = copy.copy(match)
253 for i in range(0, len(m._files)):
253 for i in range(0, len(m._files)):
254 standin = lfutil.standin(m._files[i])
254 standin = lfutil.standin(m._files[i])
255 if standin in repo[ctx.node()]:
255 if standin in repo[ctx.node()]:
256 m._files[i] = standin
256 m._files[i] = standin
257 m._fmap = set(m._files)
257 m._fmap = set(m._files)
258 m._always = False
258 m._always = False
259 origmatchfn = m.matchfn
259 origmatchfn = m.matchfn
260 def lfmatchfn(f):
260 def lfmatchfn(f):
261 lf = lfutil.splitstandin(f)
261 lf = lfutil.splitstandin(f)
262 if lf is not None and origmatchfn(lf):
262 if lf is not None and origmatchfn(lf):
263 return True
263 return True
264 r = origmatchfn(f)
264 r = origmatchfn(f)
265 return r
265 return r
266 m.matchfn = lfmatchfn
266 m.matchfn = lfmatchfn
267 return m
267 return m
268 oldmatch = installmatchfn(overridematch)
268 oldmatch = installmatchfn(overridematch)
269 try:
269 try:
270 repo.lfstatus = True
270 repo.lfstatus = True
271 return orig(ui, repo, *pats, **opts)
271 return orig(ui, repo, *pats, **opts)
272 finally:
272 finally:
273 repo.lfstatus = False
273 repo.lfstatus = False
274 restorematchfn()
274 restorematchfn()
275
275
276 def overrideverify(orig, ui, repo, *pats, **opts):
276 def overrideverify(orig, ui, repo, *pats, **opts):
277 large = opts.pop('large', False)
277 large = opts.pop('large', False)
278 all = opts.pop('lfa', False)
278 all = opts.pop('lfa', False)
279 contents = opts.pop('lfc', False)
279 contents = opts.pop('lfc', False)
280
280
281 result = orig(ui, repo, *pats, **opts)
281 result = orig(ui, repo, *pats, **opts)
282 if large or all or contents:
282 if large or all or contents:
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
284 return result
284 return result
285
285
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
287 large = opts.pop('large', False)
287 large = opts.pop('large', False)
288 if large:
288 if large:
289 lfcommands.debugdirstate(ui, repo)
289 lfcommands.debugdirstate(ui, repo)
290 else:
290 else:
291 orig(ui, repo, *pats, **opts)
291 orig(ui, repo, *pats, **opts)
292
292
293 # Override needs to refresh standins so that update's normal merge
293 # Override needs to refresh standins so that update's normal merge
294 # will go through properly. Then the other update hook (overriding repo.update)
294 # will go through properly. Then the other update hook (overriding repo.update)
295 # will get the new files. Filemerge is also overridden so that the merge
295 # will get the new files. Filemerge is also overridden so that the merge
296 # will merge standins correctly.
296 # will merge standins correctly.
297 def overrideupdate(orig, ui, repo, *pats, **opts):
297 def overrideupdate(orig, ui, repo, *pats, **opts):
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
300 False, False)
300 False, False)
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
302
302
303 # Need to lock between the standins getting updated and their
303 # Need to lock between the standins getting updated and their
304 # largefiles getting updated
304 # largefiles getting updated
305 wlock = repo.wlock()
305 wlock = repo.wlock()
306 try:
306 try:
307 if opts['check']:
307 if opts['check']:
308 mod = len(modified) > 0
308 mod = len(modified) > 0
309 for lfile in unsure:
309 for lfile in unsure:
310 standin = lfutil.standin(lfile)
310 standin = lfutil.standin(lfile)
311 if repo['.'][standin].data().strip() != \
311 if repo['.'][standin].data().strip() != \
312 lfutil.hashfile(repo.wjoin(lfile)):
312 lfutil.hashfile(repo.wjoin(lfile)):
313 mod = True
313 mod = True
314 else:
314 else:
315 lfdirstate.normal(lfile)
315 lfdirstate.normal(lfile)
316 lfdirstate.write()
316 lfdirstate.write()
317 if mod:
317 if mod:
318 raise util.Abort(_('uncommitted changes'))
318 raise util.Abort(_('uncommitted changes'))
319 # XXX handle removed differently
319 # XXX handle removed differently
320 if not opts['clean']:
320 if not opts['clean']:
321 for lfile in unsure + modified + added:
321 for lfile in unsure + modified + added:
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
323 finally:
323 finally:
324 wlock.release()
324 wlock.release()
325 return orig(ui, repo, *pats, **opts)
325 return orig(ui, repo, *pats, **opts)
326
326
327 # Before starting the manifest merge, merge.updates will call
327 # Before starting the manifest merge, merge.updates will call
328 # _checkunknown to check if there are any files in the merged-in
328 # _checkunknown to check if there are any files in the merged-in
329 # changeset that collide with unknown files in the working copy.
329 # changeset that collide with unknown files in the working copy.
330 #
330 #
331 # The largefiles are seen as unknown, so this prevents us from merging
331 # The largefiles are seen as unknown, so this prevents us from merging
332 # in a file 'foo' if we already have a largefile with the same name.
332 # in a file 'foo' if we already have a largefile with the same name.
333 #
333 #
334 # The overridden function filters the unknown files by removing any
334 # The overridden function filters the unknown files by removing any
335 # largefiles. This makes the merge proceed and we can then handle this
335 # largefiles. This makes the merge proceed and we can then handle this
336 # case further in the overridden manifestmerge function below.
336 # case further in the overridden manifestmerge function below.
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
339 return False
339 return False
340 return origfn(repo, wctx, mctx, f)
340 return origfn(repo, wctx, mctx, f)
341
341
342 # The manifest merge handles conflicts on the manifest level. We want
342 # The manifest merge handles conflicts on the manifest level. We want
343 # to handle changes in largefile-ness of files at this level too.
343 # to handle changes in largefile-ness of files at this level too.
344 #
344 #
345 # The strategy is to run the original manifestmerge and then process
345 # The strategy is to run the original manifestmerge and then process
346 # the action list it outputs. There are two cases we need to deal with:
346 # the action list it outputs. There are two cases we need to deal with:
347 #
347 #
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
349 # detected via its standin file, which will enter the working copy
349 # detected via its standin file, which will enter the working copy
350 # with a "get" action. It is not "merge" since the standin is all
350 # with a "get" action. It is not "merge" since the standin is all
351 # Mercurial is concerned with at this level -- the link to the
351 # Mercurial is concerned with at this level -- the link to the
352 # existing normal file is not relevant here.
352 # existing normal file is not relevant here.
353 #
353 #
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
355 # since the largefile will be present in the working copy and
355 # since the largefile will be present in the working copy and
356 # different from the normal file in p2. Mercurial therefore
356 # different from the normal file in p2. Mercurial therefore
357 # triggers a merge action.
357 # triggers a merge action.
358 #
358 #
359 # In both cases, we prompt the user and emit new actions to either
359 # In both cases, we prompt the user and emit new actions to either
360 # remove the standin (if the normal file was kept) or to remove the
360 # remove the standin (if the normal file was kept) or to remove the
361 # normal file and get the standin (if the largefile was kept). The
361 # normal file and get the standin (if the largefile was kept). The
362 # default prompt answer is to use the largefile version since it was
362 # default prompt answer is to use the largefile version since it was
363 # presumably changed on purpose.
363 # presumably changed on purpose.
364 #
364 #
365 # Finally, the merge.applyupdates function will then take care of
365 # Finally, the merge.applyupdates function will then take care of
366 # writing the files into the working copy and lfcommands.updatelfiles
366 # writing the files into the working copy and lfcommands.updatelfiles
367 # will update the largefiles.
367 # will update the largefiles.
368 def overridecalculateupdates(origfn, repo, p1, p2, pa, branchmerge, force,
368 def overridecalculateupdates(origfn, repo, p1, p2, pa, branchmerge, force,
369 partial, acceptremote=False):
369 partial, acceptremote=False):
370 overwrite = force and not branchmerge
370 overwrite = force and not branchmerge
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
372 acceptremote)
372 acceptremote)
373
373
374 if overwrite:
374 if overwrite:
375 return actions
375 return actions
376
376
377 removes = set(a[0] for a in actions if a[1] == 'r')
377 removes = set(a[0] for a in actions if a[1] == 'r')
378 processed = []
378 processed = []
379
379
380 for action in actions:
380 for action in actions:
381 f, m, args, msg = action
381 f, m, args, msg = action
382
382
383 splitstandin = f and lfutil.splitstandin(f)
383 splitstandin = f and lfutil.splitstandin(f)
384 if (m == "g" and splitstandin is not None and
384 if (m == "g" and splitstandin is not None and
385 splitstandin in p1 and splitstandin not in removes):
385 splitstandin in p1 and splitstandin not in removes):
386 # Case 1: normal file in the working copy, largefile in
386 # Case 1: normal file in the working copy, largefile in
387 # the second parent
387 # the second parent
388 lfile = splitstandin
388 lfile = splitstandin
389 standin = f
389 standin = f
390 msg = _('remote turned local normal file %s into a largefile\n'
390 msg = _('remote turned local normal file %s into a largefile\n'
391 'use (l)argefile or keep (n)ormal file?'
391 'use (l)argefile or keep (n)ormal file?'
392 '$$ &Largefile $$ &Normal file') % lfile
392 '$$ &Largefile $$ &Normal file') % lfile
393 if repo.ui.promptchoice(msg, 0) == 0:
393 if repo.ui.promptchoice(msg, 0) == 0:
394 processed.append((lfile, "r", None, msg))
394 processed.append((lfile, "r", None, msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
396 else:
396 else:
397 processed.append((standin, "r", None, msg))
397 processed.append((standin, "r", None, msg))
398 elif (m == "g" and
398 elif (m == "g" and
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
400 # Case 2: largefile in the working copy, normal file in
400 # Case 2: largefile in the working copy, normal file in
401 # the second parent
401 # the second parent
402 standin = lfutil.standin(f)
402 standin = lfutil.standin(f)
403 lfile = f
403 lfile = f
404 msg = _('remote turned local largefile %s into a normal file\n'
404 msg = _('remote turned local largefile %s into a normal file\n'
405 'keep (l)argefile or use (n)ormal file?'
405 'keep (l)argefile or use (n)ormal file?'
406 '$$ &Largefile $$ &Normal file') % lfile
406 '$$ &Largefile $$ &Normal file') % lfile
407 if repo.ui.promptchoice(msg, 0) == 0:
407 if repo.ui.promptchoice(msg, 0) == 0:
408 processed.append((lfile, "r", None, msg))
408 processed.append((lfile, "r", None, msg))
409 else:
409 else:
410 processed.append((standin, "r", None, msg))
410 processed.append((standin, "r", None, msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
412 else:
412 else:
413 processed.append(action)
413 processed.append(action)
414
414
415 return processed
415 return processed
416
416
417 # Override filemerge to prompt the user about how they wish to merge
417 # Override filemerge to prompt the user about how they wish to merge
418 # largefiles. This will handle identical edits without prompting the user.
418 # largefiles. This will handle identical edits without prompting the user.
419 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
419 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
420 if not lfutil.isstandin(orig):
420 if not lfutil.isstandin(orig):
421 return origfn(repo, mynode, orig, fcd, fco, fca)
421 return origfn(repo, mynode, orig, fcd, fco, fca)
422
422
423 ahash = fca.data().strip().lower()
423 ahash = fca.data().strip().lower()
424 dhash = fcd.data().strip().lower()
424 dhash = fcd.data().strip().lower()
425 ohash = fco.data().strip().lower()
425 ohash = fco.data().strip().lower()
426 if (ohash != ahash and
426 if (ohash != ahash and
427 ohash != dhash and
427 ohash != dhash and
428 (dhash == ahash or
428 (dhash == ahash or
429 repo.ui.promptchoice(
429 repo.ui.promptchoice(
430 _('largefile %s has a merge conflict\nancestor was %s\n'
430 _('largefile %s has a merge conflict\nancestor was %s\n'
431 'keep (l)ocal %s or\ntake (o)ther %s?'
431 'keep (l)ocal %s or\ntake (o)ther %s?'
432 '$$ &Local $$ &Other') %
432 '$$ &Local $$ &Other') %
433 (lfutil.splitstandin(orig), ahash, dhash, ohash),
433 (lfutil.splitstandin(orig), ahash, dhash, ohash),
434 0) == 1)):
434 0) == 1)):
435 repo.wwrite(fcd.path(), fco.data(), fco.flags())
435 repo.wwrite(fcd.path(), fco.data(), fco.flags())
436 return 0
436 return 0
437
437
438 # Copy first changes the matchers to match standins instead of
438 # Copy first changes the matchers to match standins instead of
439 # largefiles. Then it overrides util.copyfile in that function it
439 # largefiles. Then it overrides util.copyfile in that function it
440 # checks if the destination largefile already exists. It also keeps a
440 # checks if the destination largefile already exists. It also keeps a
441 # list of copied files so that the largefiles can be copied and the
441 # list of copied files so that the largefiles can be copied and the
442 # dirstate updated.
442 # dirstate updated.
443 def overridecopy(orig, ui, repo, pats, opts, rename=False):
443 def overridecopy(orig, ui, repo, pats, opts, rename=False):
444 # doesn't remove largefile on rename
444 # doesn't remove largefile on rename
445 if len(pats) < 2:
445 if len(pats) < 2:
446 # this isn't legal, let the original function deal with it
446 # this isn't legal, let the original function deal with it
447 return orig(ui, repo, pats, opts, rename)
447 return orig(ui, repo, pats, opts, rename)
448
448
449 def makestandin(relpath):
449 def makestandin(relpath):
450 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
450 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
451 return os.path.join(repo.wjoin(lfutil.standin(path)))
451 return os.path.join(repo.wjoin(lfutil.standin(path)))
452
452
453 fullpats = scmutil.expandpats(pats)
453 fullpats = scmutil.expandpats(pats)
454 dest = fullpats[-1]
454 dest = fullpats[-1]
455
455
456 if os.path.isdir(dest):
456 if os.path.isdir(dest):
457 if not os.path.isdir(makestandin(dest)):
457 if not os.path.isdir(makestandin(dest)):
458 os.makedirs(makestandin(dest))
458 os.makedirs(makestandin(dest))
459 # This could copy both lfiles and normal files in one command,
459 # This could copy both lfiles and normal files in one command,
460 # but we don't want to do that. First replace their matcher to
460 # but we don't want to do that. First replace their matcher to
461 # only match normal files and run it, then replace it to just
461 # only match normal files and run it, then replace it to just
462 # match largefiles and run it again.
462 # match largefiles and run it again.
463 nonormalfiles = False
463 nonormalfiles = False
464 nolfiles = False
464 nolfiles = False
465 try:
465 try:
466 try:
466 try:
467 installnormalfilesmatchfn(repo[None].manifest())
467 installnormalfilesmatchfn(repo[None].manifest())
468 result = orig(ui, repo, pats, opts, rename)
468 result = orig(ui, repo, pats, opts, rename)
469 except util.Abort, e:
469 except util.Abort, e:
470 if str(e) != _('no files to copy'):
470 if str(e) != _('no files to copy'):
471 raise e
471 raise e
472 else:
472 else:
473 nonormalfiles = True
473 nonormalfiles = True
474 result = 0
474 result = 0
475 finally:
475 finally:
476 restorematchfn()
476 restorematchfn()
477
477
478 # The first rename can cause our current working directory to be removed.
478 # The first rename can cause our current working directory to be removed.
479 # In that case there is nothing left to copy/rename so just quit.
479 # In that case there is nothing left to copy/rename so just quit.
480 try:
480 try:
481 repo.getcwd()
481 repo.getcwd()
482 except OSError:
482 except OSError:
483 return result
483 return result
484
484
485 try:
485 try:
486 try:
486 try:
487 # When we call orig below it creates the standins but we don't add
487 # When we call orig below it creates the standins but we don't add
488 # them to the dir state until later so lock during that time.
488 # them to the dir state until later so lock during that time.
489 wlock = repo.wlock()
489 wlock = repo.wlock()
490
490
491 manifest = repo[None].manifest()
491 manifest = repo[None].manifest()
492 oldmatch = None # for the closure
492 oldmatch = None # for the closure
493 def overridematch(ctx, pats=[], opts={}, globbed=False,
493 def overridematch(ctx, pats=[], opts={}, globbed=False,
494 default='relpath'):
494 default='relpath'):
495 newpats = []
495 newpats = []
496 # The patterns were previously mangled to add the standin
496 # The patterns were previously mangled to add the standin
497 # directory; we need to remove that now
497 # directory; we need to remove that now
498 for pat in pats:
498 for pat in pats:
499 if match_.patkind(pat) is None and lfutil.shortname in pat:
499 if match_.patkind(pat) is None and lfutil.shortname in pat:
500 newpats.append(pat.replace(lfutil.shortname, ''))
500 newpats.append(pat.replace(lfutil.shortname, ''))
501 else:
501 else:
502 newpats.append(pat)
502 newpats.append(pat)
503 match = oldmatch(ctx, newpats, opts, globbed, default)
503 match = oldmatch(ctx, newpats, opts, globbed, default)
504 m = copy.copy(match)
504 m = copy.copy(match)
505 lfile = lambda f: lfutil.standin(f) in manifest
505 lfile = lambda f: lfutil.standin(f) in manifest
506 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
506 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
507 m._fmap = set(m._files)
507 m._fmap = set(m._files)
508 m._always = False
508 m._always = False
509 origmatchfn = m.matchfn
509 origmatchfn = m.matchfn
510 m.matchfn = lambda f: (lfutil.isstandin(f) and
510 m.matchfn = lambda f: (lfutil.isstandin(f) and
511 (f in manifest) and
511 (f in manifest) and
512 origmatchfn(lfutil.splitstandin(f)) or
512 origmatchfn(lfutil.splitstandin(f)) or
513 None)
513 None)
514 return m
514 return m
515 oldmatch = installmatchfn(overridematch)
515 oldmatch = installmatchfn(overridematch)
516 listpats = []
516 listpats = []
517 for pat in pats:
517 for pat in pats:
518 if match_.patkind(pat) is not None:
518 if match_.patkind(pat) is not None:
519 listpats.append(pat)
519 listpats.append(pat)
520 else:
520 else:
521 listpats.append(makestandin(pat))
521 listpats.append(makestandin(pat))
522
522
523 try:
523 try:
524 origcopyfile = util.copyfile
524 origcopyfile = util.copyfile
525 copiedfiles = []
525 copiedfiles = []
526 def overridecopyfile(src, dest):
526 def overridecopyfile(src, dest):
527 if (lfutil.shortname in src and
527 if (lfutil.shortname in src and
528 dest.startswith(repo.wjoin(lfutil.shortname))):
528 dest.startswith(repo.wjoin(lfutil.shortname))):
529 destlfile = dest.replace(lfutil.shortname, '')
529 destlfile = dest.replace(lfutil.shortname, '')
530 if not opts['force'] and os.path.exists(destlfile):
530 if not opts['force'] and os.path.exists(destlfile):
531 raise IOError('',
531 raise IOError('',
532 _('destination largefile already exists'))
532 _('destination largefile already exists'))
533 copiedfiles.append((src, dest))
533 copiedfiles.append((src, dest))
534 origcopyfile(src, dest)
534 origcopyfile(src, dest)
535
535
536 util.copyfile = overridecopyfile
536 util.copyfile = overridecopyfile
537 result += orig(ui, repo, listpats, opts, rename)
537 result += orig(ui, repo, listpats, opts, rename)
538 finally:
538 finally:
539 util.copyfile = origcopyfile
539 util.copyfile = origcopyfile
540
540
541 lfdirstate = lfutil.openlfdirstate(ui, repo)
541 lfdirstate = lfutil.openlfdirstate(ui, repo)
542 for (src, dest) in copiedfiles:
542 for (src, dest) in copiedfiles:
543 if (lfutil.shortname in src and
543 if (lfutil.shortname in src and
544 dest.startswith(repo.wjoin(lfutil.shortname))):
544 dest.startswith(repo.wjoin(lfutil.shortname))):
545 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
545 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
546 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
546 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
547 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
547 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
548 if not os.path.isdir(destlfiledir):
548 if not os.path.isdir(destlfiledir):
549 os.makedirs(destlfiledir)
549 os.makedirs(destlfiledir)
550 if rename:
550 if rename:
551 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
551 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
552 lfdirstate.remove(srclfile)
552 lfdirstate.remove(srclfile)
553 else:
553 else:
554 util.copyfile(repo.wjoin(srclfile),
554 util.copyfile(repo.wjoin(srclfile),
555 repo.wjoin(destlfile))
555 repo.wjoin(destlfile))
556
556
557 lfdirstate.add(destlfile)
557 lfdirstate.add(destlfile)
558 lfdirstate.write()
558 lfdirstate.write()
559 except util.Abort, e:
559 except util.Abort, e:
560 if str(e) != _('no files to copy'):
560 if str(e) != _('no files to copy'):
561 raise e
561 raise e
562 else:
562 else:
563 nolfiles = True
563 nolfiles = True
564 finally:
564 finally:
565 restorematchfn()
565 restorematchfn()
566 wlock.release()
566 wlock.release()
567
567
568 if nolfiles and nonormalfiles:
568 if nolfiles and nonormalfiles:
569 raise util.Abort(_('no files to copy'))
569 raise util.Abort(_('no files to copy'))
570
570
571 return result
571 return result
572
572
573 # When the user calls revert, we have to be careful to not revert any
573 # When the user calls revert, we have to be careful to not revert any
574 # changes to other largefiles accidentally. This means we have to keep
574 # changes to other largefiles accidentally. This means we have to keep
575 # track of the largefiles that are being reverted so we only pull down
575 # track of the largefiles that are being reverted so we only pull down
576 # the necessary largefiles.
576 # the necessary largefiles.
577 #
577 #
578 # Standins are only updated (to match the hash of largefiles) before
578 # Standins are only updated (to match the hash of largefiles) before
579 # commits. Update the standins then run the original revert, changing
579 # commits. Update the standins then run the original revert, changing
580 # the matcher to hit standins instead of largefiles. Based on the
580 # the matcher to hit standins instead of largefiles. Based on the
581 # resulting standins update the largefiles. Then return the standins
581 # resulting standins update the largefiles. Then return the standins
582 # to their proper state
582 # to their proper state
583 def overriderevert(orig, ui, repo, *pats, **opts):
583 def overriderevert(orig, ui, repo, *pats, **opts):
584 # Because we put the standins in a bad state (by updating them)
584 # Because we put the standins in a bad state (by updating them)
585 # and then return them to a correct state we need to lock to
585 # and then return them to a correct state we need to lock to
586 # prevent others from changing them in their incorrect state.
586 # prevent others from changing them in their incorrect state.
587 wlock = repo.wlock()
587 wlock = repo.wlock()
588 try:
588 try:
589 lfdirstate = lfutil.openlfdirstate(ui, repo)
589 lfdirstate = lfutil.openlfdirstate(ui, repo)
590 (modified, added, removed, missing, unknown, ignored, clean) = \
590 (modified, added, removed, missing, unknown, ignored, clean) = \
591 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
591 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
592 lfdirstate.write()
592 lfdirstate.write()
593 for lfile in modified:
593 for lfile in modified:
594 lfutil.updatestandin(repo, lfutil.standin(lfile))
594 lfutil.updatestandin(repo, lfutil.standin(lfile))
595 for lfile in missing:
595 for lfile in missing:
596 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
596 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
597 os.unlink(repo.wjoin(lfutil.standin(lfile)))
597 os.unlink(repo.wjoin(lfutil.standin(lfile)))
598
598
599 try:
599 try:
600 ctx = scmutil.revsingle(repo, opts.get('rev'))
600 ctx = scmutil.revsingle(repo, opts.get('rev'))
601 oldmatch = None # for the closure
601 oldmatch = None # for the closure
602 def overridematch(ctx, pats=[], opts={}, globbed=False,
602 def overridematch(ctx, pats=[], opts={}, globbed=False,
603 default='relpath'):
603 default='relpath'):
604 match = oldmatch(ctx, pats, opts, globbed, default)
604 match = oldmatch(ctx, pats, opts, globbed, default)
605 m = copy.copy(match)
605 m = copy.copy(match)
606 def tostandin(f):
606 def tostandin(f):
607 if lfutil.standin(f) in ctx:
607 if lfutil.standin(f) in ctx:
608 return lfutil.standin(f)
608 return lfutil.standin(f)
609 elif lfutil.standin(f) in repo[None]:
609 elif lfutil.standin(f) in repo[None]:
610 return None
610 return None
611 return f
611 return f
612 m._files = [tostandin(f) for f in m._files]
612 m._files = [tostandin(f) for f in m._files]
613 m._files = [f for f in m._files if f is not None]
613 m._files = [f for f in m._files if f is not None]
614 m._fmap = set(m._files)
614 m._fmap = set(m._files)
615 m._always = False
615 m._always = False
616 origmatchfn = m.matchfn
616 origmatchfn = m.matchfn
617 def matchfn(f):
617 def matchfn(f):
618 if lfutil.isstandin(f):
618 if lfutil.isstandin(f):
619 # We need to keep track of what largefiles are being
619 # We need to keep track of what largefiles are being
620 # matched so we know which ones to update later --
620 # matched so we know which ones to update later --
621 # otherwise we accidentally revert changes to other
621 # otherwise we accidentally revert changes to other
622 # largefiles. This is repo-specific, so duckpunch the
622 # largefiles. This is repo-specific, so duckpunch the
623 # repo object to keep the list of largefiles for us
623 # repo object to keep the list of largefiles for us
624 # later.
624 # later.
625 if origmatchfn(lfutil.splitstandin(f)) and \
625 if origmatchfn(lfutil.splitstandin(f)) and \
626 (f in repo[None] or f in ctx):
626 (f in repo[None] or f in ctx):
627 lfileslist = getattr(repo, '_lfilestoupdate', [])
627 lfileslist = getattr(repo, '_lfilestoupdate', [])
628 lfileslist.append(lfutil.splitstandin(f))
628 lfileslist.append(lfutil.splitstandin(f))
629 repo._lfilestoupdate = lfileslist
629 repo._lfilestoupdate = lfileslist
630 return True
630 return True
631 else:
631 else:
632 return False
632 return False
633 return origmatchfn(f)
633 return origmatchfn(f)
634 m.matchfn = matchfn
634 m.matchfn = matchfn
635 return m
635 return m
636 oldmatch = installmatchfn(overridematch)
636 oldmatch = installmatchfn(overridematch)
637 scmutil.match
637 scmutil.match
638 matches = overridematch(repo[None], pats, opts)
638 matches = overridematch(repo[None], pats, opts)
639 orig(ui, repo, *pats, **opts)
639 orig(ui, repo, *pats, **opts)
640 finally:
640 finally:
641 restorematchfn()
641 restorematchfn()
642 lfileslist = getattr(repo, '_lfilestoupdate', [])
642 lfileslist = getattr(repo, '_lfilestoupdate', [])
643 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
643 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
644 printmessage=False)
644 printmessage=False)
645
645
646 # empty out the largefiles list so we start fresh next time
646 # empty out the largefiles list so we start fresh next time
647 repo._lfilestoupdate = []
647 repo._lfilestoupdate = []
648 for lfile in modified:
648 for lfile in modified:
649 if lfile in lfileslist:
649 if lfile in lfileslist:
650 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
650 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
651 in repo['.']:
651 in repo['.']:
652 lfutil.writestandin(repo, lfutil.standin(lfile),
652 lfutil.writestandin(repo, lfutil.standin(lfile),
653 repo['.'][lfile].data().strip(),
653 repo['.'][lfile].data().strip(),
654 'x' in repo['.'][lfile].flags())
654 'x' in repo['.'][lfile].flags())
655 lfdirstate = lfutil.openlfdirstate(ui, repo)
655 lfdirstate = lfutil.openlfdirstate(ui, repo)
656 for lfile in added:
656 for lfile in added:
657 standin = lfutil.standin(lfile)
657 standin = lfutil.standin(lfile)
658 if standin not in ctx and (standin in matches or opts.get('all')):
658 if standin not in ctx and (standin in matches or opts.get('all')):
659 if lfile in lfdirstate:
659 if lfile in lfdirstate:
660 lfdirstate.drop(lfile)
660 lfdirstate.drop(lfile)
661 util.unlinkpath(repo.wjoin(standin))
661 util.unlinkpath(repo.wjoin(standin))
662 lfdirstate.write()
662 lfdirstate.write()
663 finally:
663 finally:
664 wlock.release()
664 wlock.release()
665
665
666 def hgupdaterepo(orig, repo, node, overwrite):
666 def hgupdaterepo(orig, repo, node, overwrite):
667 if not overwrite:
667 if not overwrite:
668 # Only call updatelfiles on the standins that have changed to save time
668 # Only call updatelfiles on the standins that have changed to save time
669 oldstandins = lfutil.getstandinsstate(repo)
669 oldstandins = lfutil.getstandinsstate(repo)
670
670
671 result = orig(repo, node, overwrite)
671 result = orig(repo, node, overwrite)
672
672
673 filelist = None
673 filelist = None
674 if not overwrite:
674 if not overwrite:
675 newstandins = lfutil.getstandinsstate(repo)
675 newstandins = lfutil.getstandinsstate(repo)
676 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
676 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
677 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
677 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
678 return result
678 return result
679
679
680 def hgmerge(orig, repo, node, force=None, remind=True):
680 def hgmerge(orig, repo, node, force=None, remind=True):
681 result = orig(repo, node, force, remind)
681 result = orig(repo, node, force, remind)
682 lfcommands.updatelfiles(repo.ui, repo)
682 lfcommands.updatelfiles(repo.ui, repo)
683 return result
683 return result
684
684
685 # When we rebase a repository with remotely changed largefiles, we need to
685 # When we rebase a repository with remotely changed largefiles, we need to
686 # take some extra care so that the largefiles are correctly updated in the
686 # take some extra care so that the largefiles are correctly updated in the
687 # working copy
687 # working copy
688 def overridepull(orig, ui, repo, source=None, **opts):
688 def overridepull(orig, ui, repo, source=None, **opts):
689 revsprepull = len(repo)
689 revsprepull = len(repo)
690 if not source:
690 if not source:
691 source = 'default'
691 source = 'default'
692 repo.lfpullsource = source
692 repo.lfpullsource = source
693 if opts.get('rebase', False):
693 if opts.get('rebase', False):
694 repo._isrebasing = True
694 repo._isrebasing = True
695 try:
695 try:
696 if opts.get('update'):
696 if opts.get('update'):
697 del opts['update']
697 del opts['update']
698 ui.debug('--update and --rebase are not compatible, ignoring '
698 ui.debug('--update and --rebase are not compatible, ignoring '
699 'the update flag\n')
699 'the update flag\n')
700 del opts['rebase']
700 del opts['rebase']
701 origpostincoming = commands.postincoming
701 origpostincoming = commands.postincoming
702 def _dummy(*args, **kwargs):
702 def _dummy(*args, **kwargs):
703 pass
703 pass
704 commands.postincoming = _dummy
704 commands.postincoming = _dummy
705 try:
705 try:
706 result = commands.pull(ui, repo, source, **opts)
706 result = commands.pull(ui, repo, source, **opts)
707 finally:
707 finally:
708 commands.postincoming = origpostincoming
708 commands.postincoming = origpostincoming
709 revspostpull = len(repo)
709 revspostpull = len(repo)
710 if revspostpull > revsprepull:
710 if revspostpull > revsprepull:
711 result = result or rebase.rebase(ui, repo)
711 result = result or rebase.rebase(ui, repo)
712 finally:
712 finally:
713 repo._isrebasing = False
713 repo._isrebasing = False
714 else:
714 else:
715 result = orig(ui, repo, source, **opts)
715 result = orig(ui, repo, source, **opts)
716 revspostpull = len(repo)
716 revspostpull = len(repo)
717 lfrevs = opts.get('lfrev', [])
717 lfrevs = opts.get('lfrev', [])
718 if opts.get('all_largefiles'):
718 if opts.get('all_largefiles'):
719 lfrevs.append('pulled()')
719 lfrevs.append('pulled()')
720 if lfrevs and revspostpull > revsprepull:
720 if lfrevs and revspostpull > revsprepull:
721 numcached = 0
721 numcached = 0
722 repo.firstpulled = revsprepull # for pulled() revset expression
722 repo.firstpulled = revsprepull # for pulled() revset expression
723 try:
723 try:
724 for rev in scmutil.revrange(repo, lfrevs):
724 for rev in scmutil.revrange(repo, lfrevs):
725 ui.note(_('pulling largefiles for revision %s\n') % rev)
725 ui.note(_('pulling largefiles for revision %s\n') % rev)
726 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
726 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
727 numcached += len(cached)
727 numcached += len(cached)
728 finally:
728 finally:
729 del repo.firstpulled
729 del repo.firstpulled
730 ui.status(_("%d largefiles cached\n") % numcached)
730 ui.status(_("%d largefiles cached\n") % numcached)
731 return result
731 return result
732
732
733 def pulledrevsetsymbol(repo, subset, x):
733 def pulledrevsetsymbol(repo, subset, x):
734 """``pulled()``
734 """``pulled()``
735 Changesets that just has been pulled.
735 Changesets that just has been pulled.
736
736
737 Only available with largefiles from pull --lfrev expressions.
737 Only available with largefiles from pull --lfrev expressions.
738
738
739 .. container:: verbose
739 .. container:: verbose
740
740
741 Some examples:
741 Some examples:
742
742
743 - pull largefiles for all new changesets::
743 - pull largefiles for all new changesets::
744
744
745 hg pull -lfrev "pulled()"
745 hg pull -lfrev "pulled()"
746
746
747 - pull largefiles for all new branch heads::
747 - pull largefiles for all new branch heads::
748
748
749 hg pull -lfrev "head(pulled()) and not closed()"
749 hg pull -lfrev "head(pulled()) and not closed()"
750
750
751 """
751 """
752
752
753 try:
753 try:
754 firstpulled = repo.firstpulled
754 firstpulled = repo.firstpulled
755 except AttributeError:
755 except AttributeError:
756 raise util.Abort(_("pulled() only available in --lfrev"))
756 raise util.Abort(_("pulled() only available in --lfrev"))
757 return revset.baseset([r for r in subset if r >= firstpulled])
757 return revset.baseset([r for r in subset if r >= firstpulled])
758
758
759 def overrideclone(orig, ui, source, dest=None, **opts):
759 def overrideclone(orig, ui, source, dest=None, **opts):
760 d = dest
760 d = dest
761 if d is None:
761 if d is None:
762 d = hg.defaultdest(source)
762 d = hg.defaultdest(source)
763 if opts.get('all_largefiles') and not hg.islocal(d):
763 if opts.get('all_largefiles') and not hg.islocal(d):
764 raise util.Abort(_(
764 raise util.Abort(_(
765 '--all-largefiles is incompatible with non-local destination %s' %
765 '--all-largefiles is incompatible with non-local destination %s' %
766 d))
766 d))
767
767
768 return orig(ui, source, dest, **opts)
768 return orig(ui, source, dest, **opts)
769
769
770 def hgclone(orig, ui, opts, *args, **kwargs):
770 def hgclone(orig, ui, opts, *args, **kwargs):
771 result = orig(ui, opts, *args, **kwargs)
771 result = orig(ui, opts, *args, **kwargs)
772
772
773 if result is not None:
773 if result is not None:
774 sourcerepo, destrepo = result
774 sourcerepo, destrepo = result
775 repo = destrepo.local()
775 repo = destrepo.local()
776
776
777 # Caching is implicitly limited to 'rev' option, since the dest repo was
777 # Caching is implicitly limited to 'rev' option, since the dest repo was
778 # truncated at that point. The user may expect a download count with
778 # truncated at that point. The user may expect a download count with
779 # this option, so attempt whether or not this is a largefile repo.
779 # this option, so attempt whether or not this is a largefile repo.
780 if opts.get('all_largefiles'):
780 if opts.get('all_largefiles'):
781 success, missing = lfcommands.downloadlfiles(ui, repo, None)
781 success, missing = lfcommands.downloadlfiles(ui, repo, None)
782
782
783 if missing != 0:
783 if missing != 0:
784 return None
784 return None
785
785
786 return result
786 return result
787
787
788 def overriderebase(orig, ui, repo, **opts):
788 def overriderebase(orig, ui, repo, **opts):
789 repo._isrebasing = True
789 repo._isrebasing = True
790 try:
790 try:
791 return orig(ui, repo, **opts)
791 return orig(ui, repo, **opts)
792 finally:
792 finally:
793 repo._isrebasing = False
793 repo._isrebasing = False
794
794
795 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
795 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
796 prefix=None, mtime=None, subrepos=None):
796 prefix=None, mtime=None, subrepos=None):
797 # No need to lock because we are only reading history and
797 # No need to lock because we are only reading history and
798 # largefile caches, neither of which are modified.
798 # largefile caches, neither of which are modified.
799 lfcommands.cachelfiles(repo.ui, repo, node)
799 lfcommands.cachelfiles(repo.ui, repo, node)
800
800
801 if kind not in archival.archivers:
801 if kind not in archival.archivers:
802 raise util.Abort(_("unknown archive type '%s'") % kind)
802 raise util.Abort(_("unknown archive type '%s'") % kind)
803
803
804 ctx = repo[node]
804 ctx = repo[node]
805
805
806 if kind == 'files':
806 if kind == 'files':
807 if prefix:
807 if prefix:
808 raise util.Abort(
808 raise util.Abort(
809 _('cannot give prefix when archiving to files'))
809 _('cannot give prefix when archiving to files'))
810 else:
810 else:
811 prefix = archival.tidyprefix(dest, kind, prefix)
811 prefix = archival.tidyprefix(dest, kind, prefix)
812
812
813 def write(name, mode, islink, getdata):
813 def write(name, mode, islink, getdata):
814 if matchfn and not matchfn(name):
814 if matchfn and not matchfn(name):
815 return
815 return
816 data = getdata()
816 data = getdata()
817 if decode:
817 if decode:
818 data = repo.wwritedata(name, data)
818 data = repo.wwritedata(name, data)
819 archiver.addfile(prefix + name, mode, islink, data)
819 archiver.addfile(prefix + name, mode, islink, data)
820
820
821 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
821 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
822
822
823 if repo.ui.configbool("ui", "archivemeta", True):
823 if repo.ui.configbool("ui", "archivemeta", True):
824 def metadata():
824 def metadata():
825 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
825 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
826 hex(repo.changelog.node(0)), hex(node), ctx.branch())
826 hex(repo.changelog.node(0)), hex(node), ctx.branch())
827
827
828 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
828 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
829 if repo.tagtype(t) == 'global')
829 if repo.tagtype(t) == 'global')
830 if not tags:
830 if not tags:
831 repo.ui.pushbuffer()
831 repo.ui.pushbuffer()
832 opts = {'template': '{latesttag}\n{latesttagdistance}',
832 opts = {'template': '{latesttag}\n{latesttagdistance}',
833 'style': '', 'patch': None, 'git': None}
833 'style': '', 'patch': None, 'git': None}
834 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
834 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
835 ltags, dist = repo.ui.popbuffer().split('\n')
835 ltags, dist = repo.ui.popbuffer().split('\n')
836 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
836 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
837 tags += 'latesttagdistance: %s\n' % dist
837 tags += 'latesttagdistance: %s\n' % dist
838
838
839 return base + tags
839 return base + tags
840
840
841 write('.hg_archival.txt', 0644, False, metadata)
841 write('.hg_archival.txt', 0644, False, metadata)
842
842
843 for f in ctx:
843 for f in ctx:
844 ff = ctx.flags(f)
844 ff = ctx.flags(f)
845 getdata = ctx[f].data
845 getdata = ctx[f].data
846 if lfutil.isstandin(f):
846 if lfutil.isstandin(f):
847 path = lfutil.findfile(repo, getdata().strip())
847 path = lfutil.findfile(repo, getdata().strip())
848 if path is None:
848 if path is None:
849 raise util.Abort(
849 raise util.Abort(
850 _('largefile %s not found in repo store or system cache')
850 _('largefile %s not found in repo store or system cache')
851 % lfutil.splitstandin(f))
851 % lfutil.splitstandin(f))
852 f = lfutil.splitstandin(f)
852 f = lfutil.splitstandin(f)
853
853
854 def getdatafn():
854 def getdatafn():
855 fd = None
855 fd = None
856 try:
856 try:
857 fd = open(path, 'rb')
857 fd = open(path, 'rb')
858 return fd.read()
858 return fd.read()
859 finally:
859 finally:
860 if fd:
860 if fd:
861 fd.close()
861 fd.close()
862
862
863 getdata = getdatafn
863 getdata = getdatafn
864 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
864 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
865
865
866 if subrepos:
866 if subrepos:
867 for subpath in sorted(ctx.substate):
867 for subpath in sorted(ctx.substate):
868 sub = ctx.sub(subpath)
868 sub = ctx.sub(subpath)
869 submatch = match_.narrowmatcher(subpath, matchfn)
869 submatch = match_.narrowmatcher(subpath, matchfn)
870 sub.archive(repo.ui, archiver, prefix, submatch)
870 sub.archive(repo.ui, archiver, prefix, submatch)
871
871
872 archiver.done()
872 archiver.done()
873
873
874 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
874 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
875 repo._get(repo._state + ('hg',))
875 repo._get(repo._state + ('hg',))
876 rev = repo._state[1]
876 rev = repo._state[1]
877 ctx = repo._repo[rev]
877 ctx = repo._repo[rev]
878
878
879 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
879 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
880
880
881 def write(name, mode, islink, getdata):
881 def write(name, mode, islink, getdata):
882 # At this point, the standin has been replaced with the largefile name,
882 # At this point, the standin has been replaced with the largefile name,
883 # so the normal matcher works here without the lfutil variants.
883 # so the normal matcher works here without the lfutil variants.
884 if match and not match(f):
884 if match and not match(f):
885 return
885 return
886 data = getdata()
886 data = getdata()
887
887
888 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
888 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
889
889
890 for f in ctx:
890 for f in ctx:
891 ff = ctx.flags(f)
891 ff = ctx.flags(f)
892 getdata = ctx[f].data
892 getdata = ctx[f].data
893 if lfutil.isstandin(f):
893 if lfutil.isstandin(f):
894 path = lfutil.findfile(repo._repo, getdata().strip())
894 path = lfutil.findfile(repo._repo, getdata().strip())
895 if path is None:
895 if path is None:
896 raise util.Abort(
896 raise util.Abort(
897 _('largefile %s not found in repo store or system cache')
897 _('largefile %s not found in repo store or system cache')
898 % lfutil.splitstandin(f))
898 % lfutil.splitstandin(f))
899 f = lfutil.splitstandin(f)
899 f = lfutil.splitstandin(f)
900
900
901 def getdatafn():
901 def getdatafn():
902 fd = None
902 fd = None
903 try:
903 try:
904 fd = open(os.path.join(prefix, path), 'rb')
904 fd = open(os.path.join(prefix, path), 'rb')
905 return fd.read()
905 return fd.read()
906 finally:
906 finally:
907 if fd:
907 if fd:
908 fd.close()
908 fd.close()
909
909
910 getdata = getdatafn
910 getdata = getdatafn
911
911
912 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
912 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
913
913
914 for subpath in sorted(ctx.substate):
914 for subpath in sorted(ctx.substate):
915 sub = ctx.sub(subpath)
915 sub = ctx.sub(subpath)
916 submatch = match_.narrowmatcher(subpath, match)
916 submatch = match_.narrowmatcher(subpath, match)
917 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
917 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
918 submatch)
918 submatch)
919
919
920 # If a largefile is modified, the change is not reflected in its
920 # If a largefile is modified, the change is not reflected in its
921 # standin until a commit. cmdutil.bailifchanged() raises an exception
921 # standin until a commit. cmdutil.bailifchanged() raises an exception
922 # if the repo has uncommitted changes. Wrap it to also check if
922 # if the repo has uncommitted changes. Wrap it to also check if
923 # largefiles were changed. This is used by bisect and backout.
923 # largefiles were changed. This is used by bisect and backout.
924 def overridebailifchanged(orig, repo):
924 def overridebailifchanged(orig, repo):
925 orig(repo)
925 orig(repo)
926 repo.lfstatus = True
926 repo.lfstatus = True
927 modified, added, removed, deleted = repo.status()[:4]
927 modified, added, removed, deleted = repo.status()[:4]
928 repo.lfstatus = False
928 repo.lfstatus = False
929 if modified or added or removed or deleted:
929 if modified or added or removed or deleted:
930 raise util.Abort(_('uncommitted changes'))
930 raise util.Abort(_('uncommitted changes'))
931
931
932 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
932 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
933 def overridefetch(orig, ui, repo, *pats, **opts):
933 def overridefetch(orig, ui, repo, *pats, **opts):
934 repo.lfstatus = True
934 repo.lfstatus = True
935 modified, added, removed, deleted = repo.status()[:4]
935 modified, added, removed, deleted = repo.status()[:4]
936 repo.lfstatus = False
936 repo.lfstatus = False
937 if modified or added or removed or deleted:
937 if modified or added or removed or deleted:
938 raise util.Abort(_('uncommitted changes'))
938 raise util.Abort(_('uncommitted changes'))
939 return orig(ui, repo, *pats, **opts)
939 return orig(ui, repo, *pats, **opts)
940
940
941 def overrideforget(orig, ui, repo, *pats, **opts):
941 def overrideforget(orig, ui, repo, *pats, **opts):
942 installnormalfilesmatchfn(repo[None].manifest())
942 installnormalfilesmatchfn(repo[None].manifest())
943 result = orig(ui, repo, *pats, **opts)
943 result = orig(ui, repo, *pats, **opts)
944 restorematchfn()
944 restorematchfn()
945 m = scmutil.match(repo[None], pats, opts)
945 m = scmutil.match(repo[None], pats, opts)
946
946
947 try:
947 try:
948 repo.lfstatus = True
948 repo.lfstatus = True
949 s = repo.status(match=m, clean=True)
949 s = repo.status(match=m, clean=True)
950 finally:
950 finally:
951 repo.lfstatus = False
951 repo.lfstatus = False
952 forget = sorted(s[0] + s[1] + s[3] + s[6])
952 forget = sorted(s[0] + s[1] + s[3] + s[6])
953 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
953 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
954
954
955 for f in forget:
955 for f in forget:
956 if lfutil.standin(f) not in repo.dirstate and not \
956 if lfutil.standin(f) not in repo.dirstate and not \
957 os.path.isdir(m.rel(lfutil.standin(f))):
957 os.path.isdir(m.rel(lfutil.standin(f))):
958 ui.warn(_('not removing %s: file is already untracked\n')
958 ui.warn(_('not removing %s: file is already untracked\n')
959 % m.rel(f))
959 % m.rel(f))
960 result = 1
960 result = 1
961
961
962 for f in forget:
962 for f in forget:
963 if ui.verbose or not m.exact(f):
963 if ui.verbose or not m.exact(f):
964 ui.status(_('removing %s\n') % m.rel(f))
964 ui.status(_('removing %s\n') % m.rel(f))
965
965
966 # Need to lock because standin files are deleted then removed from the
966 # Need to lock because standin files are deleted then removed from the
967 # repository and we could race in-between.
967 # repository and we could race in-between.
968 wlock = repo.wlock()
968 wlock = repo.wlock()
969 try:
969 try:
970 lfdirstate = lfutil.openlfdirstate(ui, repo)
970 lfdirstate = lfutil.openlfdirstate(ui, repo)
971 for f in forget:
971 for f in forget:
972 if lfdirstate[f] == 'a':
972 if lfdirstate[f] == 'a':
973 lfdirstate.drop(f)
973 lfdirstate.drop(f)
974 else:
974 else:
975 lfdirstate.remove(f)
975 lfdirstate.remove(f)
976 lfdirstate.write()
976 lfdirstate.write()
977 standins = [lfutil.standin(f) for f in forget]
977 standins = [lfutil.standin(f) for f in forget]
978 for f in standins:
978 for f in standins:
979 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
979 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
980 repo[None].forget(standins)
980 repo[None].forget(standins)
981 finally:
981 finally:
982 wlock.release()
982 wlock.release()
983
983
984 return result
984 return result
985
985
986 def getoutgoinglfiles(ui, repo, dest=None, **opts):
986 def getoutgoinglfiles(ui, repo, dest=None, **opts):
987 dest = ui.expandpath(dest or 'default-push', dest or 'default')
987 dest = ui.expandpath(dest or 'default-push', dest or 'default')
988 dest, branches = hg.parseurl(dest, opts.get('branch'))
988 dest, branches = hg.parseurl(dest, opts.get('branch'))
989 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
989 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
990 if revs:
990 if revs:
991 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
991 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
992
992
993 try:
993 try:
994 remote = hg.peer(repo, opts, dest)
994 remote = hg.peer(repo, opts, dest)
995 except error.RepoError:
995 except error.RepoError:
996 return None
996 return None
997 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
997 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
998 if not outgoing.missing:
998 if not outgoing.missing:
999 return outgoing.missing
999 return outgoing.missing
1000 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1000 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1001 if opts.get('newest_first'):
1001 if opts.get('newest_first'):
1002 o.reverse()
1002 o.reverse()
1003
1003
1004 toupload = set()
1004 toupload = set()
1005 for n in o:
1005 lfutil.getlfilestoupload(repo, o, lambda fn, lfhash: toupload.add(fn))
1006 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
1007 ctx = repo[n]
1008 files = set(ctx.files())
1009 if len(parents) == 2:
1010 mc = ctx.manifest()
1011 mp1 = ctx.parents()[0].manifest()
1012 mp2 = ctx.parents()[1].manifest()
1013 for f in mp1:
1014 if f not in mc:
1015 files.add(f)
1016 for f in mp2:
1017 if f not in mc:
1018 files.add(f)
1019 for f in mc:
1020 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1021 files.add(f)
1022 toupload = toupload.union(
1023 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1024 return sorted(toupload)
1006 return sorted(toupload)
1025
1007
1026 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1008 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1027 result = orig(ui, repo, dest, **opts)
1009 result = orig(ui, repo, dest, **opts)
1028
1010
1029 if opts.pop('large', None):
1011 if opts.pop('large', None):
1030 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1012 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1031 if toupload is None:
1013 if toupload is None:
1032 ui.status(_('largefiles: No remote repo\n'))
1014 ui.status(_('largefiles: No remote repo\n'))
1033 elif not toupload:
1015 elif not toupload:
1034 ui.status(_('largefiles: no files to upload\n'))
1016 ui.status(_('largefiles: no files to upload\n'))
1035 else:
1017 else:
1036 ui.status(_('largefiles to upload:\n'))
1018 ui.status(_('largefiles to upload:\n'))
1037 for file in toupload:
1019 for file in toupload:
1038 ui.status(lfutil.splitstandin(file) + '\n')
1020 ui.status(lfutil.splitstandin(file) + '\n')
1039 ui.status('\n')
1021 ui.status('\n')
1040
1022
1041 return result
1023 return result
1042
1024
1043 def overridesummary(orig, ui, repo, *pats, **opts):
1025 def overridesummary(orig, ui, repo, *pats, **opts):
1044 try:
1026 try:
1045 repo.lfstatus = True
1027 repo.lfstatus = True
1046 orig(ui, repo, *pats, **opts)
1028 orig(ui, repo, *pats, **opts)
1047 finally:
1029 finally:
1048 repo.lfstatus = False
1030 repo.lfstatus = False
1049
1031
1050 if opts.pop('large', None):
1032 if opts.pop('large', None):
1051 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1033 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1052 if toupload is None:
1034 if toupload is None:
1053 # i18n: column positioning for "hg summary"
1035 # i18n: column positioning for "hg summary"
1054 ui.status(_('largefiles: (no remote repo)\n'))
1036 ui.status(_('largefiles: (no remote repo)\n'))
1055 elif not toupload:
1037 elif not toupload:
1056 # i18n: column positioning for "hg summary"
1038 # i18n: column positioning for "hg summary"
1057 ui.status(_('largefiles: (no files to upload)\n'))
1039 ui.status(_('largefiles: (no files to upload)\n'))
1058 else:
1040 else:
1059 # i18n: column positioning for "hg summary"
1041 # i18n: column positioning for "hg summary"
1060 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1042 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1061
1043
1062 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1044 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1063 similarity=None):
1045 similarity=None):
1064 if not lfutil.islfilesrepo(repo):
1046 if not lfutil.islfilesrepo(repo):
1065 return orig(repo, pats, opts, dry_run, similarity)
1047 return orig(repo, pats, opts, dry_run, similarity)
1066 # Get the list of missing largefiles so we can remove them
1048 # Get the list of missing largefiles so we can remove them
1067 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1049 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1068 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1050 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1069 False, False)
1051 False, False)
1070 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1052 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1071
1053
1072 # Call into the normal remove code, but the removing of the standin, we want
1054 # Call into the normal remove code, but the removing of the standin, we want
1073 # to have handled by original addremove. Monkey patching here makes sure
1055 # to have handled by original addremove. Monkey patching here makes sure
1074 # we don't remove the standin in the largefiles code, preventing a very
1056 # we don't remove the standin in the largefiles code, preventing a very
1075 # confused state later.
1057 # confused state later.
1076 if missing:
1058 if missing:
1077 m = [repo.wjoin(f) for f in missing]
1059 m = [repo.wjoin(f) for f in missing]
1078 repo._isaddremove = True
1060 repo._isaddremove = True
1079 removelargefiles(repo.ui, repo, *m, **opts)
1061 removelargefiles(repo.ui, repo, *m, **opts)
1080 repo._isaddremove = False
1062 repo._isaddremove = False
1081 # Call into the normal add code, and any files that *should* be added as
1063 # Call into the normal add code, and any files that *should* be added as
1082 # largefiles will be
1064 # largefiles will be
1083 addlargefiles(repo.ui, repo, *pats, **opts)
1065 addlargefiles(repo.ui, repo, *pats, **opts)
1084 # Now that we've handled largefiles, hand off to the original addremove
1066 # Now that we've handled largefiles, hand off to the original addremove
1085 # function to take care of the rest. Make sure it doesn't do anything with
1067 # function to take care of the rest. Make sure it doesn't do anything with
1086 # largefiles by installing a matcher that will ignore them.
1068 # largefiles by installing a matcher that will ignore them.
1087 installnormalfilesmatchfn(repo[None].manifest())
1069 installnormalfilesmatchfn(repo[None].manifest())
1088 result = orig(repo, pats, opts, dry_run, similarity)
1070 result = orig(repo, pats, opts, dry_run, similarity)
1089 restorematchfn()
1071 restorematchfn()
1090 return result
1072 return result
1091
1073
1092 # Calling purge with --all will cause the largefiles to be deleted.
1074 # Calling purge with --all will cause the largefiles to be deleted.
1093 # Override repo.status to prevent this from happening.
1075 # Override repo.status to prevent this from happening.
1094 def overridepurge(orig, ui, repo, *dirs, **opts):
1076 def overridepurge(orig, ui, repo, *dirs, **opts):
1095 # XXX large file status is buggy when used on repo proxy.
1077 # XXX large file status is buggy when used on repo proxy.
1096 # XXX this needs to be investigate.
1078 # XXX this needs to be investigate.
1097 repo = repo.unfiltered()
1079 repo = repo.unfiltered()
1098 oldstatus = repo.status
1080 oldstatus = repo.status
1099 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1081 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1100 clean=False, unknown=False, listsubrepos=False):
1082 clean=False, unknown=False, listsubrepos=False):
1101 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1083 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1102 listsubrepos)
1084 listsubrepos)
1103 lfdirstate = lfutil.openlfdirstate(ui, repo)
1085 lfdirstate = lfutil.openlfdirstate(ui, repo)
1104 modified, added, removed, deleted, unknown, ignored, clean = r
1086 modified, added, removed, deleted, unknown, ignored, clean = r
1105 unknown = [f for f in unknown if lfdirstate[f] == '?']
1087 unknown = [f for f in unknown if lfdirstate[f] == '?']
1106 ignored = [f for f in ignored if lfdirstate[f] == '?']
1088 ignored = [f for f in ignored if lfdirstate[f] == '?']
1107 return modified, added, removed, deleted, unknown, ignored, clean
1089 return modified, added, removed, deleted, unknown, ignored, clean
1108 repo.status = overridestatus
1090 repo.status = overridestatus
1109 orig(ui, repo, *dirs, **opts)
1091 orig(ui, repo, *dirs, **opts)
1110 repo.status = oldstatus
1092 repo.status = oldstatus
1111
1093
1112 def overriderollback(orig, ui, repo, **opts):
1094 def overriderollback(orig, ui, repo, **opts):
1113 result = orig(ui, repo, **opts)
1095 result = orig(ui, repo, **opts)
1114 merge.update(repo, node=None, branchmerge=False, force=True,
1096 merge.update(repo, node=None, branchmerge=False, force=True,
1115 partial=lfutil.isstandin)
1097 partial=lfutil.isstandin)
1116 wlock = repo.wlock()
1098 wlock = repo.wlock()
1117 try:
1099 try:
1118 lfdirstate = lfutil.openlfdirstate(ui, repo)
1100 lfdirstate = lfutil.openlfdirstate(ui, repo)
1119 lfiles = lfutil.listlfiles(repo)
1101 lfiles = lfutil.listlfiles(repo)
1120 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1102 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1121 for file in lfiles:
1103 for file in lfiles:
1122 if file in oldlfiles:
1104 if file in oldlfiles:
1123 lfdirstate.normallookup(file)
1105 lfdirstate.normallookup(file)
1124 else:
1106 else:
1125 lfdirstate.add(file)
1107 lfdirstate.add(file)
1126 lfdirstate.write()
1108 lfdirstate.write()
1127 finally:
1109 finally:
1128 wlock.release()
1110 wlock.release()
1129 return result
1111 return result
1130
1112
1131 def overridetransplant(orig, ui, repo, *revs, **opts):
1113 def overridetransplant(orig, ui, repo, *revs, **opts):
1132 try:
1114 try:
1133 oldstandins = lfutil.getstandinsstate(repo)
1115 oldstandins = lfutil.getstandinsstate(repo)
1134 repo._istransplanting = True
1116 repo._istransplanting = True
1135 result = orig(ui, repo, *revs, **opts)
1117 result = orig(ui, repo, *revs, **opts)
1136 newstandins = lfutil.getstandinsstate(repo)
1118 newstandins = lfutil.getstandinsstate(repo)
1137 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1119 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1138 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1120 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1139 printmessage=True)
1121 printmessage=True)
1140 finally:
1122 finally:
1141 repo._istransplanting = False
1123 repo._istransplanting = False
1142 return result
1124 return result
1143
1125
1144 def overridecat(orig, ui, repo, file1, *pats, **opts):
1126 def overridecat(orig, ui, repo, file1, *pats, **opts):
1145 ctx = scmutil.revsingle(repo, opts.get('rev'))
1127 ctx = scmutil.revsingle(repo, opts.get('rev'))
1146 err = 1
1128 err = 1
1147 notbad = set()
1129 notbad = set()
1148 m = scmutil.match(ctx, (file1,) + pats, opts)
1130 m = scmutil.match(ctx, (file1,) + pats, opts)
1149 origmatchfn = m.matchfn
1131 origmatchfn = m.matchfn
1150 def lfmatchfn(f):
1132 def lfmatchfn(f):
1151 lf = lfutil.splitstandin(f)
1133 lf = lfutil.splitstandin(f)
1152 if lf is None:
1134 if lf is None:
1153 return origmatchfn(f)
1135 return origmatchfn(f)
1154 notbad.add(lf)
1136 notbad.add(lf)
1155 return origmatchfn(lf)
1137 return origmatchfn(lf)
1156 m.matchfn = lfmatchfn
1138 m.matchfn = lfmatchfn
1157 origbadfn = m.bad
1139 origbadfn = m.bad
1158 def lfbadfn(f, msg):
1140 def lfbadfn(f, msg):
1159 if not f in notbad:
1141 if not f in notbad:
1160 return origbadfn(f, msg)
1142 return origbadfn(f, msg)
1161 m.bad = lfbadfn
1143 m.bad = lfbadfn
1162 for f in ctx.walk(m):
1144 for f in ctx.walk(m):
1163 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1145 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1164 pathname=f)
1146 pathname=f)
1165 lf = lfutil.splitstandin(f)
1147 lf = lfutil.splitstandin(f)
1166 if lf is None:
1148 if lf is None:
1167 # duplicating unreachable code from commands.cat
1149 # duplicating unreachable code from commands.cat
1168 data = ctx[f].data()
1150 data = ctx[f].data()
1169 if opts.get('decode'):
1151 if opts.get('decode'):
1170 data = repo.wwritedata(f, data)
1152 data = repo.wwritedata(f, data)
1171 fp.write(data)
1153 fp.write(data)
1172 else:
1154 else:
1173 hash = lfutil.readstandin(repo, lf, ctx.rev())
1155 hash = lfutil.readstandin(repo, lf, ctx.rev())
1174 if not lfutil.inusercache(repo.ui, hash):
1156 if not lfutil.inusercache(repo.ui, hash):
1175 store = basestore._openstore(repo)
1157 store = basestore._openstore(repo)
1176 success, missing = store.get([(lf, hash)])
1158 success, missing = store.get([(lf, hash)])
1177 if len(success) != 1:
1159 if len(success) != 1:
1178 raise util.Abort(
1160 raise util.Abort(
1179 _('largefile %s is not in cache and could not be '
1161 _('largefile %s is not in cache and could not be '
1180 'downloaded') % lf)
1162 'downloaded') % lf)
1181 path = lfutil.usercachepath(repo.ui, hash)
1163 path = lfutil.usercachepath(repo.ui, hash)
1182 fpin = open(path, "rb")
1164 fpin = open(path, "rb")
1183 for chunk in util.filechunkiter(fpin, 128 * 1024):
1165 for chunk in util.filechunkiter(fpin, 128 * 1024):
1184 fp.write(chunk)
1166 fp.write(chunk)
1185 fpin.close()
1167 fpin.close()
1186 fp.close()
1168 fp.close()
1187 err = 0
1169 err = 0
1188 return err
1170 return err
1189
1171
1190 def mercurialsinkbefore(orig, sink):
1172 def mercurialsinkbefore(orig, sink):
1191 sink.repo._isconverting = True
1173 sink.repo._isconverting = True
1192 orig(sink)
1174 orig(sink)
1193
1175
1194 def mercurialsinkafter(orig, sink):
1176 def mercurialsinkafter(orig, sink):
1195 sink.repo._isconverting = False
1177 sink.repo._isconverting = False
1196 orig(sink)
1178 orig(sink)
@@ -1,514 +1,491 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import os
11 import os
12
12
13 from mercurial import error, manifest, match as match_, util, discovery
13 from mercurial import error, manifest, match as match_, util, discovery
14 from mercurial import node as node_
15 from mercurial.i18n import _
14 from mercurial.i18n import _
16 from mercurial import localrepo
15 from mercurial import localrepo
17
16
18 import lfcommands
17 import lfcommands
19 import lfutil
18 import lfutil
20
19
21 def reposetup(ui, repo):
20 def reposetup(ui, repo):
22 # wire repositories should be given new wireproto functions
21 # wire repositories should be given new wireproto functions
23 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
24 if not repo.local():
23 if not repo.local():
25 return
24 return
26
25
27 class lfilesrepo(repo.__class__):
26 class lfilesrepo(repo.__class__):
28 lfstatus = False
27 lfstatus = False
29 def status_nolfiles(self, *args, **kwargs):
28 def status_nolfiles(self, *args, **kwargs):
30 return super(lfilesrepo, self).status(*args, **kwargs)
29 return super(lfilesrepo, self).status(*args, **kwargs)
31
30
32 # When lfstatus is set, return a context that gives the names
31 # When lfstatus is set, return a context that gives the names
33 # of largefiles instead of their corresponding standins and
32 # of largefiles instead of their corresponding standins and
34 # identifies the largefiles as always binary, regardless of
33 # identifies the largefiles as always binary, regardless of
35 # their actual contents.
34 # their actual contents.
36 def __getitem__(self, changeid):
35 def __getitem__(self, changeid):
37 ctx = super(lfilesrepo, self).__getitem__(changeid)
36 ctx = super(lfilesrepo, self).__getitem__(changeid)
38 if self.lfstatus:
37 if self.lfstatus:
39 class lfilesmanifestdict(manifest.manifestdict):
38 class lfilesmanifestdict(manifest.manifestdict):
40 def __contains__(self, filename):
39 def __contains__(self, filename):
41 if super(lfilesmanifestdict,
40 if super(lfilesmanifestdict,
42 self).__contains__(filename):
41 self).__contains__(filename):
43 return True
42 return True
44 return super(lfilesmanifestdict,
43 return super(lfilesmanifestdict,
45 self).__contains__(lfutil.standin(filename))
44 self).__contains__(lfutil.standin(filename))
46 class lfilesctx(ctx.__class__):
45 class lfilesctx(ctx.__class__):
47 def files(self):
46 def files(self):
48 filenames = super(lfilesctx, self).files()
47 filenames = super(lfilesctx, self).files()
49 return [lfutil.splitstandin(f) or f for f in filenames]
48 return [lfutil.splitstandin(f) or f for f in filenames]
50 def manifest(self):
49 def manifest(self):
51 man1 = super(lfilesctx, self).manifest()
50 man1 = super(lfilesctx, self).manifest()
52 man1.__class__ = lfilesmanifestdict
51 man1.__class__ = lfilesmanifestdict
53 return man1
52 return man1
54 def filectx(self, path, fileid=None, filelog=None):
53 def filectx(self, path, fileid=None, filelog=None):
55 try:
54 try:
56 if filelog is not None:
55 if filelog is not None:
57 result = super(lfilesctx, self).filectx(
56 result = super(lfilesctx, self).filectx(
58 path, fileid, filelog)
57 path, fileid, filelog)
59 else:
58 else:
60 result = super(lfilesctx, self).filectx(
59 result = super(lfilesctx, self).filectx(
61 path, fileid)
60 path, fileid)
62 except error.LookupError:
61 except error.LookupError:
63 # Adding a null character will cause Mercurial to
62 # Adding a null character will cause Mercurial to
64 # identify this as a binary file.
63 # identify this as a binary file.
65 if filelog is not None:
64 if filelog is not None:
66 result = super(lfilesctx, self).filectx(
65 result = super(lfilesctx, self).filectx(
67 lfutil.standin(path), fileid, filelog)
66 lfutil.standin(path), fileid, filelog)
68 else:
67 else:
69 result = super(lfilesctx, self).filectx(
68 result = super(lfilesctx, self).filectx(
70 lfutil.standin(path), fileid)
69 lfutil.standin(path), fileid)
71 olddata = result.data
70 olddata = result.data
72 result.data = lambda: olddata() + '\0'
71 result.data = lambda: olddata() + '\0'
73 return result
72 return result
74 ctx.__class__ = lfilesctx
73 ctx.__class__ = lfilesctx
75 return ctx
74 return ctx
76
75
77 # Figure out the status of big files and insert them into the
76 # Figure out the status of big files and insert them into the
78 # appropriate list in the result. Also removes standin files
77 # appropriate list in the result. Also removes standin files
79 # from the listing. Revert to the original status if
78 # from the listing. Revert to the original status if
80 # self.lfstatus is False.
79 # self.lfstatus is False.
81 # XXX large file status is buggy when used on repo proxy.
80 # XXX large file status is buggy when used on repo proxy.
82 # XXX this needs to be investigated.
81 # XXX this needs to be investigated.
83 @localrepo.unfilteredmethod
82 @localrepo.unfilteredmethod
84 def status(self, node1='.', node2=None, match=None, ignored=False,
83 def status(self, node1='.', node2=None, match=None, ignored=False,
85 clean=False, unknown=False, listsubrepos=False):
84 clean=False, unknown=False, listsubrepos=False):
86 listignored, listclean, listunknown = ignored, clean, unknown
85 listignored, listclean, listunknown = ignored, clean, unknown
87 if not self.lfstatus:
86 if not self.lfstatus:
88 return super(lfilesrepo, self).status(node1, node2, match,
87 return super(lfilesrepo, self).status(node1, node2, match,
89 listignored, listclean, listunknown, listsubrepos)
88 listignored, listclean, listunknown, listsubrepos)
90 else:
89 else:
91 # some calls in this function rely on the old version of status
90 # some calls in this function rely on the old version of status
92 self.lfstatus = False
91 self.lfstatus = False
93 ctx1 = self[node1]
92 ctx1 = self[node1]
94 ctx2 = self[node2]
93 ctx2 = self[node2]
95 working = ctx2.rev() is None
94 working = ctx2.rev() is None
96 parentworking = working and ctx1 == self['.']
95 parentworking = working and ctx1 == self['.']
97
96
98 def inctx(file, ctx):
97 def inctx(file, ctx):
99 try:
98 try:
100 if ctx.rev() is None:
99 if ctx.rev() is None:
101 return file in ctx.manifest()
100 return file in ctx.manifest()
102 ctx[file]
101 ctx[file]
103 return True
102 return True
104 except KeyError:
103 except KeyError:
105 return False
104 return False
106
105
107 if match is None:
106 if match is None:
108 match = match_.always(self.root, self.getcwd())
107 match = match_.always(self.root, self.getcwd())
109
108
110 wlock = None
109 wlock = None
111 try:
110 try:
112 try:
111 try:
113 # updating the dirstate is optional
112 # updating the dirstate is optional
114 # so we don't wait on the lock
113 # so we don't wait on the lock
115 wlock = self.wlock(False)
114 wlock = self.wlock(False)
116 except error.LockError:
115 except error.LockError:
117 pass
116 pass
118
117
119 # First check if there were files specified on the
118 # First check if there were files specified on the
120 # command line. If there were, and none of them were
119 # command line. If there were, and none of them were
121 # largefiles, we should just bail here and let super
120 # largefiles, we should just bail here and let super
122 # handle it -- thus gaining a big performance boost.
121 # handle it -- thus gaining a big performance boost.
123 lfdirstate = lfutil.openlfdirstate(ui, self)
122 lfdirstate = lfutil.openlfdirstate(ui, self)
124 if match.files() and not match.anypats():
123 if match.files() and not match.anypats():
125 for f in lfdirstate:
124 for f in lfdirstate:
126 if match(f):
125 if match(f):
127 break
126 break
128 else:
127 else:
129 return super(lfilesrepo, self).status(node1, node2,
128 return super(lfilesrepo, self).status(node1, node2,
130 match, listignored, listclean,
129 match, listignored, listclean,
131 listunknown, listsubrepos)
130 listunknown, listsubrepos)
132
131
133 # Create a copy of match that matches standins instead
132 # Create a copy of match that matches standins instead
134 # of largefiles.
133 # of largefiles.
135 def tostandins(files):
134 def tostandins(files):
136 if not working:
135 if not working:
137 return files
136 return files
138 newfiles = []
137 newfiles = []
139 dirstate = self.dirstate
138 dirstate = self.dirstate
140 for f in files:
139 for f in files:
141 sf = lfutil.standin(f)
140 sf = lfutil.standin(f)
142 if sf in dirstate:
141 if sf in dirstate:
143 newfiles.append(sf)
142 newfiles.append(sf)
144 elif sf in dirstate.dirs():
143 elif sf in dirstate.dirs():
145 # Directory entries could be regular or
144 # Directory entries could be regular or
146 # standin, check both
145 # standin, check both
147 newfiles.extend((f, sf))
146 newfiles.extend((f, sf))
148 else:
147 else:
149 newfiles.append(f)
148 newfiles.append(f)
150 return newfiles
149 return newfiles
151
150
152 m = copy.copy(match)
151 m = copy.copy(match)
153 m._files = tostandins(m._files)
152 m._files = tostandins(m._files)
154
153
155 result = super(lfilesrepo, self).status(node1, node2, m,
154 result = super(lfilesrepo, self).status(node1, node2, m,
156 ignored, clean, unknown, listsubrepos)
155 ignored, clean, unknown, listsubrepos)
157 if working:
156 if working:
158
157
159 def sfindirstate(f):
158 def sfindirstate(f):
160 sf = lfutil.standin(f)
159 sf = lfutil.standin(f)
161 dirstate = self.dirstate
160 dirstate = self.dirstate
162 return sf in dirstate or sf in dirstate.dirs()
161 return sf in dirstate or sf in dirstate.dirs()
163
162
164 match._files = [f for f in match._files
163 match._files = [f for f in match._files
165 if sfindirstate(f)]
164 if sfindirstate(f)]
166 # Don't waste time getting the ignored and unknown
165 # Don't waste time getting the ignored and unknown
167 # files from lfdirstate
166 # files from lfdirstate
168 s = lfdirstate.status(match, [], False,
167 s = lfdirstate.status(match, [], False,
169 listclean, False)
168 listclean, False)
170 (unsure, modified, added, removed, missing, _unknown,
169 (unsure, modified, added, removed, missing, _unknown,
171 _ignored, clean) = s
170 _ignored, clean) = s
172 if parentworking:
171 if parentworking:
173 for lfile in unsure:
172 for lfile in unsure:
174 standin = lfutil.standin(lfile)
173 standin = lfutil.standin(lfile)
175 if standin not in ctx1:
174 if standin not in ctx1:
176 # from second parent
175 # from second parent
177 modified.append(lfile)
176 modified.append(lfile)
178 elif ctx1[standin].data().strip() \
177 elif ctx1[standin].data().strip() \
179 != lfutil.hashfile(self.wjoin(lfile)):
178 != lfutil.hashfile(self.wjoin(lfile)):
180 modified.append(lfile)
179 modified.append(lfile)
181 else:
180 else:
182 clean.append(lfile)
181 clean.append(lfile)
183 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
184 else:
183 else:
185 tocheck = unsure + modified + added + clean
184 tocheck = unsure + modified + added + clean
186 modified, added, clean = [], [], []
185 modified, added, clean = [], [], []
187
186
188 for lfile in tocheck:
187 for lfile in tocheck:
189 standin = lfutil.standin(lfile)
188 standin = lfutil.standin(lfile)
190 if inctx(standin, ctx1):
189 if inctx(standin, ctx1):
191 if ctx1[standin].data().strip() != \
190 if ctx1[standin].data().strip() != \
192 lfutil.hashfile(self.wjoin(lfile)):
191 lfutil.hashfile(self.wjoin(lfile)):
193 modified.append(lfile)
192 modified.append(lfile)
194 else:
193 else:
195 clean.append(lfile)
194 clean.append(lfile)
196 else:
195 else:
197 added.append(lfile)
196 added.append(lfile)
198
197
199 # Standins no longer found in lfdirstate has been
198 # Standins no longer found in lfdirstate has been
200 # removed
199 # removed
201 for standin in ctx1.manifest():
200 for standin in ctx1.manifest():
202 if not lfutil.isstandin(standin):
201 if not lfutil.isstandin(standin):
203 continue
202 continue
204 lfile = lfutil.splitstandin(standin)
203 lfile = lfutil.splitstandin(standin)
205 if not match(lfile):
204 if not match(lfile):
206 continue
205 continue
207 if lfile not in lfdirstate:
206 if lfile not in lfdirstate:
208 removed.append(lfile)
207 removed.append(lfile)
209
208
210 # Filter result lists
209 # Filter result lists
211 result = list(result)
210 result = list(result)
212
211
213 # Largefiles are not really removed when they're
212 # Largefiles are not really removed when they're
214 # still in the normal dirstate. Likewise, normal
213 # still in the normal dirstate. Likewise, normal
215 # files are not really removed if they are still in
214 # files are not really removed if they are still in
216 # lfdirstate. This happens in merges where files
215 # lfdirstate. This happens in merges where files
217 # change type.
216 # change type.
218 removed = [f for f in removed
217 removed = [f for f in removed
219 if f not in self.dirstate]
218 if f not in self.dirstate]
220 result[2] = [f for f in result[2]
219 result[2] = [f for f in result[2]
221 if f not in lfdirstate]
220 if f not in lfdirstate]
222
221
223 lfiles = set(lfdirstate._map)
222 lfiles = set(lfdirstate._map)
224 # Unknown files
223 # Unknown files
225 result[4] = set(result[4]).difference(lfiles)
224 result[4] = set(result[4]).difference(lfiles)
226 # Ignored files
225 # Ignored files
227 result[5] = set(result[5]).difference(lfiles)
226 result[5] = set(result[5]).difference(lfiles)
228 # combine normal files and largefiles
227 # combine normal files and largefiles
229 normals = [[fn for fn in filelist
228 normals = [[fn for fn in filelist
230 if not lfutil.isstandin(fn)]
229 if not lfutil.isstandin(fn)]
231 for filelist in result]
230 for filelist in result]
232 lfiles = (modified, added, removed, missing, [], [],
231 lfiles = (modified, added, removed, missing, [], [],
233 clean)
232 clean)
234 result = [sorted(list1 + list2)
233 result = [sorted(list1 + list2)
235 for (list1, list2) in zip(normals, lfiles)]
234 for (list1, list2) in zip(normals, lfiles)]
236 else:
235 else:
237 def toname(f):
236 def toname(f):
238 if lfutil.isstandin(f):
237 if lfutil.isstandin(f):
239 return lfutil.splitstandin(f)
238 return lfutil.splitstandin(f)
240 return f
239 return f
241 result = [[toname(f) for f in items]
240 result = [[toname(f) for f in items]
242 for items in result]
241 for items in result]
243
242
244 if wlock:
243 if wlock:
245 lfdirstate.write()
244 lfdirstate.write()
246
245
247 finally:
246 finally:
248 if wlock:
247 if wlock:
249 wlock.release()
248 wlock.release()
250
249
251 if not listunknown:
250 if not listunknown:
252 result[4] = []
251 result[4] = []
253 if not listignored:
252 if not listignored:
254 result[5] = []
253 result[5] = []
255 if not listclean:
254 if not listclean:
256 result[6] = []
255 result[6] = []
257 self.lfstatus = True
256 self.lfstatus = True
258 return result
257 return result
259
258
260 # As part of committing, copy all of the largefiles into the
259 # As part of committing, copy all of the largefiles into the
261 # cache.
260 # cache.
262 def commitctx(self, *args, **kwargs):
261 def commitctx(self, *args, **kwargs):
263 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
262 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
264 lfutil.copyalltostore(self, node)
263 lfutil.copyalltostore(self, node)
265 return node
264 return node
266
265
267 # Before commit, largefile standins have not had their
266 # Before commit, largefile standins have not had their
268 # contents updated to reflect the hash of their largefile.
267 # contents updated to reflect the hash of their largefile.
269 # Do that here.
268 # Do that here.
270 def commit(self, text="", user=None, date=None, match=None,
269 def commit(self, text="", user=None, date=None, match=None,
271 force=False, editor=False, extra={}):
270 force=False, editor=False, extra={}):
272 orig = super(lfilesrepo, self).commit
271 orig = super(lfilesrepo, self).commit
273
272
274 wlock = self.wlock()
273 wlock = self.wlock()
275 try:
274 try:
276 # Case 0: Rebase or Transplant
275 # Case 0: Rebase or Transplant
277 # We have to take the time to pull down the new largefiles now.
276 # We have to take the time to pull down the new largefiles now.
278 # Otherwise, any largefiles that were modified in the
277 # Otherwise, any largefiles that were modified in the
279 # destination changesets get overwritten, either by the rebase
278 # destination changesets get overwritten, either by the rebase
280 # or in the first commit after the rebase or transplant.
279 # or in the first commit after the rebase or transplant.
281 # updatelfiles will update the dirstate to mark any pulled
280 # updatelfiles will update the dirstate to mark any pulled
282 # largefiles as modified
281 # largefiles as modified
283 if getattr(self, "_isrebasing", False) or \
282 if getattr(self, "_isrebasing", False) or \
284 getattr(self, "_istransplanting", False):
283 getattr(self, "_istransplanting", False):
285 lfcommands.updatelfiles(self.ui, self, filelist=None,
284 lfcommands.updatelfiles(self.ui, self, filelist=None,
286 printmessage=False)
285 printmessage=False)
287 result = orig(text=text, user=user, date=date, match=match,
286 result = orig(text=text, user=user, date=date, match=match,
288 force=force, editor=editor, extra=extra)
287 force=force, editor=editor, extra=extra)
289 return result
288 return result
290 # Case 1: user calls commit with no specific files or
289 # Case 1: user calls commit with no specific files or
291 # include/exclude patterns: refresh and commit all files that
290 # include/exclude patterns: refresh and commit all files that
292 # are "dirty".
291 # are "dirty".
293 if ((match is None) or
292 if ((match is None) or
294 (not match.anypats() and not match.files())):
293 (not match.anypats() and not match.files())):
295 # Spend a bit of time here to get a list of files we know
294 # Spend a bit of time here to get a list of files we know
296 # are modified so we can compare only against those.
295 # are modified so we can compare only against those.
297 # It can cost a lot of time (several seconds)
296 # It can cost a lot of time (several seconds)
298 # otherwise to update all standins if the largefiles are
297 # otherwise to update all standins if the largefiles are
299 # large.
298 # large.
300 lfdirstate = lfutil.openlfdirstate(ui, self)
299 lfdirstate = lfutil.openlfdirstate(ui, self)
301 dirtymatch = match_.always(self.root, self.getcwd())
300 dirtymatch = match_.always(self.root, self.getcwd())
302 s = lfdirstate.status(dirtymatch, [], False, False, False)
301 s = lfdirstate.status(dirtymatch, [], False, False, False)
303 (unsure, modified, added, removed, _missing, _unknown,
302 (unsure, modified, added, removed, _missing, _unknown,
304 _ignored, _clean) = s
303 _ignored, _clean) = s
305 modifiedfiles = unsure + modified + added + removed
304 modifiedfiles = unsure + modified + added + removed
306 lfiles = lfutil.listlfiles(self)
305 lfiles = lfutil.listlfiles(self)
307 # this only loops through largefiles that exist (not
306 # this only loops through largefiles that exist (not
308 # removed/renamed)
307 # removed/renamed)
309 for lfile in lfiles:
308 for lfile in lfiles:
310 if lfile in modifiedfiles:
309 if lfile in modifiedfiles:
311 if os.path.exists(
310 if os.path.exists(
312 self.wjoin(lfutil.standin(lfile))):
311 self.wjoin(lfutil.standin(lfile))):
313 # this handles the case where a rebase is being
312 # this handles the case where a rebase is being
314 # performed and the working copy is not updated
313 # performed and the working copy is not updated
315 # yet.
314 # yet.
316 if os.path.exists(self.wjoin(lfile)):
315 if os.path.exists(self.wjoin(lfile)):
317 lfutil.updatestandin(self,
316 lfutil.updatestandin(self,
318 lfutil.standin(lfile))
317 lfutil.standin(lfile))
319 lfdirstate.normal(lfile)
318 lfdirstate.normal(lfile)
320
319
321 result = orig(text=text, user=user, date=date, match=match,
320 result = orig(text=text, user=user, date=date, match=match,
322 force=force, editor=editor, extra=extra)
321 force=force, editor=editor, extra=extra)
323
322
324 if result is not None:
323 if result is not None:
325 for lfile in lfdirstate:
324 for lfile in lfdirstate:
326 if lfile in modifiedfiles:
325 if lfile in modifiedfiles:
327 if (not os.path.exists(self.wjoin(
326 if (not os.path.exists(self.wjoin(
328 lfutil.standin(lfile)))) or \
327 lfutil.standin(lfile)))) or \
329 (not os.path.exists(self.wjoin(lfile))):
328 (not os.path.exists(self.wjoin(lfile))):
330 lfdirstate.drop(lfile)
329 lfdirstate.drop(lfile)
331
330
332 # This needs to be after commit; otherwise precommit hooks
331 # This needs to be after commit; otherwise precommit hooks
333 # get the wrong status
332 # get the wrong status
334 lfdirstate.write()
333 lfdirstate.write()
335 return result
334 return result
336
335
337 lfiles = lfutil.listlfiles(self)
336 lfiles = lfutil.listlfiles(self)
338 match._files = self._subdirlfs(match.files(), lfiles)
337 match._files = self._subdirlfs(match.files(), lfiles)
339
338
340 # Case 2: user calls commit with specified patterns: refresh
339 # Case 2: user calls commit with specified patterns: refresh
341 # any matching big files.
340 # any matching big files.
342 smatcher = lfutil.composestandinmatcher(self, match)
341 smatcher = lfutil.composestandinmatcher(self, match)
343 standins = self.dirstate.walk(smatcher, [], False, False)
342 standins = self.dirstate.walk(smatcher, [], False, False)
344
343
345 # No matching big files: get out of the way and pass control to
344 # No matching big files: get out of the way and pass control to
346 # the usual commit() method.
345 # the usual commit() method.
347 if not standins:
346 if not standins:
348 return orig(text=text, user=user, date=date, match=match,
347 return orig(text=text, user=user, date=date, match=match,
349 force=force, editor=editor, extra=extra)
348 force=force, editor=editor, extra=extra)
350
349
351 # Refresh all matching big files. It's possible that the
350 # Refresh all matching big files. It's possible that the
352 # commit will end up failing, in which case the big files will
351 # commit will end up failing, in which case the big files will
353 # stay refreshed. No harm done: the user modified them and
352 # stay refreshed. No harm done: the user modified them and
354 # asked to commit them, so sooner or later we're going to
353 # asked to commit them, so sooner or later we're going to
355 # refresh the standins. Might as well leave them refreshed.
354 # refresh the standins. Might as well leave them refreshed.
356 lfdirstate = lfutil.openlfdirstate(ui, self)
355 lfdirstate = lfutil.openlfdirstate(ui, self)
357 for standin in standins:
356 for standin in standins:
358 lfile = lfutil.splitstandin(standin)
357 lfile = lfutil.splitstandin(standin)
359 if lfdirstate[lfile] != 'r':
358 if lfdirstate[lfile] != 'r':
360 lfutil.updatestandin(self, standin)
359 lfutil.updatestandin(self, standin)
361 lfdirstate.normal(lfile)
360 lfdirstate.normal(lfile)
362 else:
361 else:
363 lfdirstate.drop(lfile)
362 lfdirstate.drop(lfile)
364
363
365 # Cook up a new matcher that only matches regular files or
364 # Cook up a new matcher that only matches regular files or
366 # standins corresponding to the big files requested by the
365 # standins corresponding to the big files requested by the
367 # user. Have to modify _files to prevent commit() from
366 # user. Have to modify _files to prevent commit() from
368 # complaining "not tracked" for big files.
367 # complaining "not tracked" for big files.
369 match = copy.copy(match)
368 match = copy.copy(match)
370 origmatchfn = match.matchfn
369 origmatchfn = match.matchfn
371
370
372 # Check both the list of largefiles and the list of
371 # Check both the list of largefiles and the list of
373 # standins because if a largefile was removed, it
372 # standins because if a largefile was removed, it
374 # won't be in the list of largefiles at this point
373 # won't be in the list of largefiles at this point
375 match._files += sorted(standins)
374 match._files += sorted(standins)
376
375
377 actualfiles = []
376 actualfiles = []
378 for f in match._files:
377 for f in match._files:
379 fstandin = lfutil.standin(f)
378 fstandin = lfutil.standin(f)
380
379
381 # ignore known largefiles and standins
380 # ignore known largefiles and standins
382 if f in lfiles or fstandin in standins:
381 if f in lfiles or fstandin in standins:
383 continue
382 continue
384
383
385 # append directory separator to avoid collisions
384 # append directory separator to avoid collisions
386 if not fstandin.endswith(os.sep):
385 if not fstandin.endswith(os.sep):
387 fstandin += os.sep
386 fstandin += os.sep
388
387
389 actualfiles.append(f)
388 actualfiles.append(f)
390 match._files = actualfiles
389 match._files = actualfiles
391
390
392 def matchfn(f):
391 def matchfn(f):
393 if origmatchfn(f):
392 if origmatchfn(f):
394 return f not in lfiles
393 return f not in lfiles
395 else:
394 else:
396 return f in standins
395 return f in standins
397
396
398 match.matchfn = matchfn
397 match.matchfn = matchfn
399 result = orig(text=text, user=user, date=date, match=match,
398 result = orig(text=text, user=user, date=date, match=match,
400 force=force, editor=editor, extra=extra)
399 force=force, editor=editor, extra=extra)
401 # This needs to be after commit; otherwise precommit hooks
400 # This needs to be after commit; otherwise precommit hooks
402 # get the wrong status
401 # get the wrong status
403 lfdirstate.write()
402 lfdirstate.write()
404 return result
403 return result
405 finally:
404 finally:
406 wlock.release()
405 wlock.release()
407
406
408 def push(self, remote, force=False, revs=None, newbranch=False):
407 def push(self, remote, force=False, revs=None, newbranch=False):
409 if remote.local():
408 if remote.local():
410 missing = set(self.requirements) - remote.local().supported
409 missing = set(self.requirements) - remote.local().supported
411 if missing:
410 if missing:
412 msg = _("required features are not"
411 msg = _("required features are not"
413 " supported in the destination:"
412 " supported in the destination:"
414 " %s") % (', '.join(sorted(missing)))
413 " %s") % (', '.join(sorted(missing)))
415 raise util.Abort(msg)
414 raise util.Abort(msg)
416
415
417 outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
416 outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
418 force=force)
417 force=force)
419 if outgoing.missing:
418 if outgoing.missing:
420 toupload = set()
419 toupload = set()
421 o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
420 o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
422 for n in o:
421 addfunc = lambda fn, lfhash: toupload.add(lfhash)
423 parents = [p for p in self.changelog.parents(n)
422 lfutil.getlfilestoupload(self, o, addfunc)
424 if p != node_.nullid]
425 ctx = self[n]
426 files = set(ctx.files())
427 if len(parents) == 2:
428 mc = ctx.manifest()
429 mp1 = ctx.parents()[0].manifest()
430 mp2 = ctx.parents()[1].manifest()
431 for f in mp1:
432 if f not in mc:
433 files.add(f)
434 for f in mp2:
435 if f not in mc:
436 files.add(f)
437 for f in mc:
438 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
439 None):
440 files.add(f)
441
442 toupload = toupload.union(
443 set([ctx[f].data().strip()
444 for f in files
445 if lfutil.isstandin(f) and f in ctx]))
446 lfcommands.uploadlfiles(ui, self, remote, toupload)
423 lfcommands.uploadlfiles(ui, self, remote, toupload)
447 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
424 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
448 newbranch=newbranch)
425 newbranch=newbranch)
449
426
450 def _subdirlfs(self, files, lfiles):
427 def _subdirlfs(self, files, lfiles):
451 '''
428 '''
452 Adjust matched file list
429 Adjust matched file list
453 If we pass a directory to commit whose only commitable files
430 If we pass a directory to commit whose only commitable files
454 are largefiles, the core commit code aborts before finding
431 are largefiles, the core commit code aborts before finding
455 the largefiles.
432 the largefiles.
456 So we do the following:
433 So we do the following:
457 For directories that only have largefiles as matches,
434 For directories that only have largefiles as matches,
458 we explicitly add the largefiles to the match list and remove
435 we explicitly add the largefiles to the match list and remove
459 the directory.
436 the directory.
460 In other cases, we leave the match list unmodified.
437 In other cases, we leave the match list unmodified.
461 '''
438 '''
462 actualfiles = []
439 actualfiles = []
463 dirs = []
440 dirs = []
464 regulars = []
441 regulars = []
465
442
466 for f in files:
443 for f in files:
467 if lfutil.isstandin(f + '/'):
444 if lfutil.isstandin(f + '/'):
468 raise util.Abort(
445 raise util.Abort(
469 _('file "%s" is a largefile standin') % f,
446 _('file "%s" is a largefile standin') % f,
470 hint=('commit the largefile itself instead'))
447 hint=('commit the largefile itself instead'))
471 # Scan directories
448 # Scan directories
472 if os.path.isdir(self.wjoin(f)):
449 if os.path.isdir(self.wjoin(f)):
473 dirs.append(f)
450 dirs.append(f)
474 else:
451 else:
475 regulars.append(f)
452 regulars.append(f)
476
453
477 for f in dirs:
454 for f in dirs:
478 matcheddir = False
455 matcheddir = False
479 d = self.dirstate.normalize(f) + '/'
456 d = self.dirstate.normalize(f) + '/'
480 # Check for matched normal files
457 # Check for matched normal files
481 for mf in regulars:
458 for mf in regulars:
482 if self.dirstate.normalize(mf).startswith(d):
459 if self.dirstate.normalize(mf).startswith(d):
483 actualfiles.append(f)
460 actualfiles.append(f)
484 matcheddir = True
461 matcheddir = True
485 break
462 break
486 if not matcheddir:
463 if not matcheddir:
487 # If no normal match, manually append
464 # If no normal match, manually append
488 # any matching largefiles
465 # any matching largefiles
489 for lf in lfiles:
466 for lf in lfiles:
490 if self.dirstate.normalize(lf).startswith(d):
467 if self.dirstate.normalize(lf).startswith(d):
491 actualfiles.append(lf)
468 actualfiles.append(lf)
492 if not matcheddir:
469 if not matcheddir:
493 actualfiles.append(lfutil.standin(f))
470 actualfiles.append(lfutil.standin(f))
494 matcheddir = True
471 matcheddir = True
495 # Nothing in dir, so readd it
472 # Nothing in dir, so readd it
496 # and let commit reject it
473 # and let commit reject it
497 if not matcheddir:
474 if not matcheddir:
498 actualfiles.append(f)
475 actualfiles.append(f)
499
476
500 # Always add normal files
477 # Always add normal files
501 actualfiles += regulars
478 actualfiles += regulars
502 return actualfiles
479 return actualfiles
503
480
504 repo.__class__ = lfilesrepo
481 repo.__class__ = lfilesrepo
505
482
506 def checkrequireslfiles(ui, repo, **kwargs):
483 def checkrequireslfiles(ui, repo, **kwargs):
507 if 'largefiles' not in repo.requirements and util.any(
484 if 'largefiles' not in repo.requirements and util.any(
508 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
485 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
509 repo.requirements.add('largefiles')
486 repo.requirements.add('largefiles')
510 repo._writerequirements()
487 repo._writerequirements()
511
488
512 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
489 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
513 'largefiles')
490 'largefiles')
514 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
491 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now