##// END OF EJS Templates
made repo locks recursive and deprecate refcounting based lock releasing...
Ronny Pfannschmidt -
r8108:a26d3374 default
parent child Browse files
Show More
@@ -1,2168 +1,2172
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store, encoding
12 import lock, transaction, stat, errno, ui, store, encoding
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 if 'HG_PENDING' in os.environ:
91 if 'HG_PENDING' in os.environ:
92 p = os.environ['HG_PENDING']
92 p = os.environ['HG_PENDING']
93 if p.startswith(self.root):
93 if p.startswith(self.root):
94 self.changelog.readpending('00changelog.i.a')
94 self.changelog.readpending('00changelog.i.a')
95 self.sopener.defversion = self.changelog.version
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
96 return self.changelog
97 if name == 'manifest':
97 if name == 'manifest':
98 self.changelog
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
100 return self.manifest
101 if name == 'dirstate':
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
103 return self.dirstate
104 else:
104 else:
105 raise AttributeError(name)
105 raise AttributeError(name)
106
106
107 def __getitem__(self, changeid):
107 def __getitem__(self, changeid):
108 if changeid == None:
108 if changeid == None:
109 return context.workingctx(self)
109 return context.workingctx(self)
110 return context.changectx(self, changeid)
110 return context.changectx(self, changeid)
111
111
112 def __nonzero__(self):
112 def __nonzero__(self):
113 return True
113 return True
114
114
115 def __len__(self):
115 def __len__(self):
116 return len(self.changelog)
116 return len(self.changelog)
117
117
118 def __iter__(self):
118 def __iter__(self):
119 for i in xrange(len(self)):
119 for i in xrange(len(self)):
120 yield i
120 yield i
121
121
122 def url(self):
122 def url(self):
123 return 'file:' + self.root
123 return 'file:' + self.root
124
124
125 def hook(self, name, throw=False, **args):
125 def hook(self, name, throw=False, **args):
126 return hook.hook(self.ui, self, name, throw, **args)
126 return hook.hook(self.ui, self, name, throw, **args)
127
127
128 tag_disallowed = ':\r\n'
128 tag_disallowed = ':\r\n'
129
129
130 def _tag(self, names, node, message, local, user, date, parent=None,
130 def _tag(self, names, node, message, local, user, date, parent=None,
131 extra={}):
131 extra={}):
132 use_dirstate = parent is None
132 use_dirstate = parent is None
133
133
134 if isinstance(names, str):
134 if isinstance(names, str):
135 allchars = names
135 allchars = names
136 names = (names,)
136 names = (names,)
137 else:
137 else:
138 allchars = ''.join(names)
138 allchars = ''.join(names)
139 for c in self.tag_disallowed:
139 for c in self.tag_disallowed:
140 if c in allchars:
140 if c in allchars:
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 raise util.Abort(_('%r cannot be used in a tag name') % c)
142
142
143 for name in names:
143 for name in names:
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 self.hook('pretag', throw=True, node=hex(node), tag=name,
145 local=local)
145 local=local)
146
146
147 def writetags(fp, names, munge, prevtags):
147 def writetags(fp, names, munge, prevtags):
148 fp.seek(0, 2)
148 fp.seek(0, 2)
149 if prevtags and prevtags[-1] != '\n':
149 if prevtags and prevtags[-1] != '\n':
150 fp.write('\n')
150 fp.write('\n')
151 for name in names:
151 for name in names:
152 m = munge and munge(name) or name
152 m = munge and munge(name) or name
153 if self._tagstypecache and name in self._tagstypecache:
153 if self._tagstypecache and name in self._tagstypecache:
154 old = self.tagscache.get(name, nullid)
154 old = self.tagscache.get(name, nullid)
155 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(old), m))
156 fp.write('%s %s\n' % (hex(node), m))
156 fp.write('%s %s\n' % (hex(node), m))
157 fp.close()
157 fp.close()
158
158
159 prevtags = ''
159 prevtags = ''
160 if local:
160 if local:
161 try:
161 try:
162 fp = self.opener('localtags', 'r+')
162 fp = self.opener('localtags', 'r+')
163 except IOError:
163 except IOError:
164 fp = self.opener('localtags', 'a')
164 fp = self.opener('localtags', 'a')
165 else:
165 else:
166 prevtags = fp.read()
166 prevtags = fp.read()
167
167
168 # local tags are stored in the current charset
168 # local tags are stored in the current charset
169 writetags(fp, names, None, prevtags)
169 writetags(fp, names, None, prevtags)
170 for name in names:
170 for name in names:
171 self.hook('tag', node=hex(node), tag=name, local=local)
171 self.hook('tag', node=hex(node), tag=name, local=local)
172 return
172 return
173
173
174 if use_dirstate:
174 if use_dirstate:
175 try:
175 try:
176 fp = self.wfile('.hgtags', 'rb+')
176 fp = self.wfile('.hgtags', 'rb+')
177 except IOError:
177 except IOError:
178 fp = self.wfile('.hgtags', 'ab')
178 fp = self.wfile('.hgtags', 'ab')
179 else:
179 else:
180 prevtags = fp.read()
180 prevtags = fp.read()
181 else:
181 else:
182 try:
182 try:
183 prevtags = self.filectx('.hgtags', parent).data()
183 prevtags = self.filectx('.hgtags', parent).data()
184 except error.LookupError:
184 except error.LookupError:
185 pass
185 pass
186 fp = self.wfile('.hgtags', 'wb')
186 fp = self.wfile('.hgtags', 'wb')
187 if prevtags:
187 if prevtags:
188 fp.write(prevtags)
188 fp.write(prevtags)
189
189
190 # committed tags are stored in UTF-8
190 # committed tags are stored in UTF-8
191 writetags(fp, names, encoding.fromlocal, prevtags)
191 writetags(fp, names, encoding.fromlocal, prevtags)
192
192
193 if use_dirstate and '.hgtags' not in self.dirstate:
193 if use_dirstate and '.hgtags' not in self.dirstate:
194 self.add(['.hgtags'])
194 self.add(['.hgtags'])
195
195
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
197 extra=extra)
197 extra=extra)
198
198
199 for name in names:
199 for name in names:
200 self.hook('tag', node=hex(node), tag=name, local=local)
200 self.hook('tag', node=hex(node), tag=name, local=local)
201
201
202 return tagnode
202 return tagnode
203
203
204 def tag(self, names, node, message, local, user, date):
204 def tag(self, names, node, message, local, user, date):
205 '''tag a revision with one or more symbolic names.
205 '''tag a revision with one or more symbolic names.
206
206
207 names is a list of strings or, when adding a single tag, names may be a
207 names is a list of strings or, when adding a single tag, names may be a
208 string.
208 string.
209
209
210 if local is True, the tags are stored in a per-repository file.
210 if local is True, the tags are stored in a per-repository file.
211 otherwise, they are stored in the .hgtags file, and a new
211 otherwise, they are stored in the .hgtags file, and a new
212 changeset is committed with the change.
212 changeset is committed with the change.
213
213
214 keyword arguments:
214 keyword arguments:
215
215
216 local: whether to store tags in non-version-controlled file
216 local: whether to store tags in non-version-controlled file
217 (default False)
217 (default False)
218
218
219 message: commit message to use if committing
219 message: commit message to use if committing
220
220
221 user: name of user to use if committing
221 user: name of user to use if committing
222
222
223 date: date tuple to use if committing'''
223 date: date tuple to use if committing'''
224
224
225 for x in self.status()[:5]:
225 for x in self.status()[:5]:
226 if '.hgtags' in x:
226 if '.hgtags' in x:
227 raise util.Abort(_('working copy of .hgtags is changed '
227 raise util.Abort(_('working copy of .hgtags is changed '
228 '(please commit .hgtags manually)'))
228 '(please commit .hgtags manually)'))
229
229
230 self.tags() # instantiate the cache
230 self.tags() # instantiate the cache
231 self._tag(names, node, message, local, user, date)
231 self._tag(names, node, message, local, user, date)
232
232
233 def tags(self):
233 def tags(self):
234 '''return a mapping of tag to node'''
234 '''return a mapping of tag to node'''
235 if self.tagscache:
235 if self.tagscache:
236 return self.tagscache
236 return self.tagscache
237
237
238 globaltags = {}
238 globaltags = {}
239 tagtypes = {}
239 tagtypes = {}
240
240
241 def readtags(lines, fn, tagtype):
241 def readtags(lines, fn, tagtype):
242 filetags = {}
242 filetags = {}
243 count = 0
243 count = 0
244
244
245 def warn(msg):
245 def warn(msg):
246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247
247
248 for l in lines:
248 for l in lines:
249 count += 1
249 count += 1
250 if not l:
250 if not l:
251 continue
251 continue
252 s = l.split(" ", 1)
252 s = l.split(" ", 1)
253 if len(s) != 2:
253 if len(s) != 2:
254 warn(_("cannot parse entry"))
254 warn(_("cannot parse entry"))
255 continue
255 continue
256 node, key = s
256 node, key = s
257 key = encoding.tolocal(key.strip()) # stored in UTF-8
257 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 try:
258 try:
259 bin_n = bin(node)
259 bin_n = bin(node)
260 except TypeError:
260 except TypeError:
261 warn(_("node '%s' is not well formed") % node)
261 warn(_("node '%s' is not well formed") % node)
262 continue
262 continue
263 if bin_n not in self.changelog.nodemap:
263 if bin_n not in self.changelog.nodemap:
264 warn(_("tag '%s' refers to unknown node") % key)
264 warn(_("tag '%s' refers to unknown node") % key)
265 continue
265 continue
266
266
267 h = []
267 h = []
268 if key in filetags:
268 if key in filetags:
269 n, h = filetags[key]
269 n, h = filetags[key]
270 h.append(n)
270 h.append(n)
271 filetags[key] = (bin_n, h)
271 filetags[key] = (bin_n, h)
272
272
273 for k, nh in filetags.iteritems():
273 for k, nh in filetags.iteritems():
274 if k not in globaltags:
274 if k not in globaltags:
275 globaltags[k] = nh
275 globaltags[k] = nh
276 tagtypes[k] = tagtype
276 tagtypes[k] = tagtype
277 continue
277 continue
278
278
279 # we prefer the global tag if:
279 # we prefer the global tag if:
280 # it supercedes us OR
280 # it supercedes us OR
281 # mutual supercedes and it has a higher rank
281 # mutual supercedes and it has a higher rank
282 # otherwise we win because we're tip-most
282 # otherwise we win because we're tip-most
283 an, ah = nh
283 an, ah = nh
284 bn, bh = globaltags[k]
284 bn, bh = globaltags[k]
285 if (bn != an and an in bh and
285 if (bn != an and an in bh and
286 (bn not in ah or len(bh) > len(ah))):
286 (bn not in ah or len(bh) > len(ah))):
287 an = bn
287 an = bn
288 ah.extend([n for n in bh if n not in ah])
288 ah.extend([n for n in bh if n not in ah])
289 globaltags[k] = an, ah
289 globaltags[k] = an, ah
290 tagtypes[k] = tagtype
290 tagtypes[k] = tagtype
291
291
292 # read the tags file from each head, ending with the tip
292 # read the tags file from each head, ending with the tip
293 f = None
293 f = None
294 for rev, node, fnode in self._hgtagsnodes():
294 for rev, node, fnode in self._hgtagsnodes():
295 f = (f and f.filectx(fnode) or
295 f = (f and f.filectx(fnode) or
296 self.filectx('.hgtags', fileid=fnode))
296 self.filectx('.hgtags', fileid=fnode))
297 readtags(f.data().splitlines(), f, "global")
297 readtags(f.data().splitlines(), f, "global")
298
298
299 try:
299 try:
300 data = encoding.fromlocal(self.opener("localtags").read())
300 data = encoding.fromlocal(self.opener("localtags").read())
301 # localtags are stored in the local character set
301 # localtags are stored in the local character set
302 # while the internal tag table is stored in UTF-8
302 # while the internal tag table is stored in UTF-8
303 readtags(data.splitlines(), "localtags", "local")
303 readtags(data.splitlines(), "localtags", "local")
304 except IOError:
304 except IOError:
305 pass
305 pass
306
306
307 self.tagscache = {}
307 self.tagscache = {}
308 self._tagstypecache = {}
308 self._tagstypecache = {}
309 for k, nh in globaltags.iteritems():
309 for k, nh in globaltags.iteritems():
310 n = nh[0]
310 n = nh[0]
311 if n != nullid:
311 if n != nullid:
312 self.tagscache[k] = n
312 self.tagscache[k] = n
313 self._tagstypecache[k] = tagtypes[k]
313 self._tagstypecache[k] = tagtypes[k]
314 self.tagscache['tip'] = self.changelog.tip()
314 self.tagscache['tip'] = self.changelog.tip()
315 return self.tagscache
315 return self.tagscache
316
316
317 def tagtype(self, tagname):
317 def tagtype(self, tagname):
318 '''
318 '''
319 return the type of the given tag. result can be:
319 return the type of the given tag. result can be:
320
320
321 'local' : a local tag
321 'local' : a local tag
322 'global' : a global tag
322 'global' : a global tag
323 None : tag does not exist
323 None : tag does not exist
324 '''
324 '''
325
325
326 self.tags()
326 self.tags()
327
327
328 return self._tagstypecache.get(tagname)
328 return self._tagstypecache.get(tagname)
329
329
330 def _hgtagsnodes(self):
330 def _hgtagsnodes(self):
331 heads = self.heads()
331 heads = self.heads()
332 heads.reverse()
332 heads.reverse()
333 last = {}
333 last = {}
334 ret = []
334 ret = []
335 for node in heads:
335 for node in heads:
336 c = self[node]
336 c = self[node]
337 rev = c.rev()
337 rev = c.rev()
338 try:
338 try:
339 fnode = c.filenode('.hgtags')
339 fnode = c.filenode('.hgtags')
340 except error.LookupError:
340 except error.LookupError:
341 continue
341 continue
342 ret.append((rev, node, fnode))
342 ret.append((rev, node, fnode))
343 if fnode in last:
343 if fnode in last:
344 ret[last[fnode]] = None
344 ret[last[fnode]] = None
345 last[fnode] = len(ret) - 1
345 last[fnode] = len(ret) - 1
346 return [item for item in ret if item]
346 return [item for item in ret if item]
347
347
348 def tagslist(self):
348 def tagslist(self):
349 '''return a list of tags ordered by revision'''
349 '''return a list of tags ordered by revision'''
350 l = []
350 l = []
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 try:
352 try:
353 r = self.changelog.rev(n)
353 r = self.changelog.rev(n)
354 except:
354 except:
355 r = -2 # sort to the beginning of the list if unknown
355 r = -2 # sort to the beginning of the list if unknown
356 l.append((r, t, n))
356 l.append((r, t, n))
357 return [(t, n) for r, t, n in util.sort(l)]
357 return [(t, n) for r, t, n in util.sort(l)]
358
358
359 def nodetags(self, node):
359 def nodetags(self, node):
360 '''return the tags associated with a node'''
360 '''return the tags associated with a node'''
361 if not self.nodetagscache:
361 if not self.nodetagscache:
362 self.nodetagscache = {}
362 self.nodetagscache = {}
363 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
364 self.nodetagscache.setdefault(n, []).append(t)
364 self.nodetagscache.setdefault(n, []).append(t)
365 return self.nodetagscache.get(node, [])
365 return self.nodetagscache.get(node, [])
366
366
367 def _branchtags(self, partial, lrev):
367 def _branchtags(self, partial, lrev):
368 # TODO: rename this function?
368 # TODO: rename this function?
369 tiprev = len(self) - 1
369 tiprev = len(self) - 1
370 if lrev != tiprev:
370 if lrev != tiprev:
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
371 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373
373
374 return partial
374 return partial
375
375
376 def _branchheads(self):
376 def _branchheads(self):
377 tip = self.changelog.tip()
377 tip = self.changelog.tip()
378 if self.branchcache is not None and self._branchcachetip == tip:
378 if self.branchcache is not None and self._branchcachetip == tip:
379 return self.branchcache
379 return self.branchcache
380
380
381 oldtip = self._branchcachetip
381 oldtip = self._branchcachetip
382 self._branchcachetip = tip
382 self._branchcachetip = tip
383 if self.branchcache is None:
383 if self.branchcache is None:
384 self.branchcache = {} # avoid recursion in changectx
384 self.branchcache = {} # avoid recursion in changectx
385 else:
385 else:
386 self.branchcache.clear() # keep using the same dict
386 self.branchcache.clear() # keep using the same dict
387 if oldtip is None or oldtip not in self.changelog.nodemap:
387 if oldtip is None or oldtip not in self.changelog.nodemap:
388 partial, last, lrev = self._readbranchcache()
388 partial, last, lrev = self._readbranchcache()
389 else:
389 else:
390 lrev = self.changelog.rev(oldtip)
390 lrev = self.changelog.rev(oldtip)
391 partial = self._ubranchcache
391 partial = self._ubranchcache
392
392
393 self._branchtags(partial, lrev)
393 self._branchtags(partial, lrev)
394 # this private cache holds all heads (not just tips)
394 # this private cache holds all heads (not just tips)
395 self._ubranchcache = partial
395 self._ubranchcache = partial
396
396
397 # the branch cache is stored on disk as UTF-8, but in the local
397 # the branch cache is stored on disk as UTF-8, but in the local
398 # charset internally
398 # charset internally
399 for k, v in partial.iteritems():
399 for k, v in partial.iteritems():
400 self.branchcache[encoding.tolocal(k)] = v
400 self.branchcache[encoding.tolocal(k)] = v
401 return self.branchcache
401 return self.branchcache
402
402
403
403
404 def branchtags(self):
404 def branchtags(self):
405 '''return a dict where branch names map to the tipmost head of
405 '''return a dict where branch names map to the tipmost head of
406 the branch, open heads come before closed'''
406 the branch, open heads come before closed'''
407 bt = {}
407 bt = {}
408 for bn, heads in self._branchheads().iteritems():
408 for bn, heads in self._branchheads().iteritems():
409 head = None
409 head = None
410 for i in range(len(heads)-1, -1, -1):
410 for i in range(len(heads)-1, -1, -1):
411 h = heads[i]
411 h = heads[i]
412 if 'close' not in self.changelog.read(h)[5]:
412 if 'close' not in self.changelog.read(h)[5]:
413 head = h
413 head = h
414 break
414 break
415 # no open heads were found
415 # no open heads were found
416 if head is None:
416 if head is None:
417 head = heads[-1]
417 head = heads[-1]
418 bt[bn] = head
418 bt[bn] = head
419 return bt
419 return bt
420
420
421
421
422 def _readbranchcache(self):
422 def _readbranchcache(self):
423 partial = {}
423 partial = {}
424 try:
424 try:
425 f = self.opener("branchheads.cache")
425 f = self.opener("branchheads.cache")
426 lines = f.read().split('\n')
426 lines = f.read().split('\n')
427 f.close()
427 f.close()
428 except (IOError, OSError):
428 except (IOError, OSError):
429 return {}, nullid, nullrev
429 return {}, nullid, nullrev
430
430
431 try:
431 try:
432 last, lrev = lines.pop(0).split(" ", 1)
432 last, lrev = lines.pop(0).split(" ", 1)
433 last, lrev = bin(last), int(lrev)
433 last, lrev = bin(last), int(lrev)
434 if lrev >= len(self) or self[lrev].node() != last:
434 if lrev >= len(self) or self[lrev].node() != last:
435 # invalidate the cache
435 # invalidate the cache
436 raise ValueError('invalidating branch cache (tip differs)')
436 raise ValueError('invalidating branch cache (tip differs)')
437 for l in lines:
437 for l in lines:
438 if not l: continue
438 if not l: continue
439 node, label = l.split(" ", 1)
439 node, label = l.split(" ", 1)
440 partial.setdefault(label.strip(), []).append(bin(node))
440 partial.setdefault(label.strip(), []).append(bin(node))
441 except KeyboardInterrupt:
441 except KeyboardInterrupt:
442 raise
442 raise
443 except Exception, inst:
443 except Exception, inst:
444 if self.ui.debugflag:
444 if self.ui.debugflag:
445 self.ui.warn(str(inst), '\n')
445 self.ui.warn(str(inst), '\n')
446 partial, last, lrev = {}, nullid, nullrev
446 partial, last, lrev = {}, nullid, nullrev
447 return partial, last, lrev
447 return partial, last, lrev
448
448
449 def _writebranchcache(self, branches, tip, tiprev):
449 def _writebranchcache(self, branches, tip, tiprev):
450 try:
450 try:
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
451 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 f.write("%s %s\n" % (hex(tip), tiprev))
452 f.write("%s %s\n" % (hex(tip), tiprev))
453 for label, nodes in branches.iteritems():
453 for label, nodes in branches.iteritems():
454 for node in nodes:
454 for node in nodes:
455 f.write("%s %s\n" % (hex(node), label))
455 f.write("%s %s\n" % (hex(node), label))
456 f.rename()
456 f.rename()
457 except (IOError, OSError):
457 except (IOError, OSError):
458 pass
458 pass
459
459
460 def _updatebranchcache(self, partial, start, end):
460 def _updatebranchcache(self, partial, start, end):
461 for r in xrange(start, end):
461 for r in xrange(start, end):
462 c = self[r]
462 c = self[r]
463 b = c.branch()
463 b = c.branch()
464 bheads = partial.setdefault(b, [])
464 bheads = partial.setdefault(b, [])
465 bheads.append(c.node())
465 bheads.append(c.node())
466 for p in c.parents():
466 for p in c.parents():
467 pn = p.node()
467 pn = p.node()
468 if pn in bheads:
468 if pn in bheads:
469 bheads.remove(pn)
469 bheads.remove(pn)
470
470
471 def lookup(self, key):
471 def lookup(self, key):
472 if isinstance(key, int):
472 if isinstance(key, int):
473 return self.changelog.node(key)
473 return self.changelog.node(key)
474 elif key == '.':
474 elif key == '.':
475 return self.dirstate.parents()[0]
475 return self.dirstate.parents()[0]
476 elif key == 'null':
476 elif key == 'null':
477 return nullid
477 return nullid
478 elif key == 'tip':
478 elif key == 'tip':
479 return self.changelog.tip()
479 return self.changelog.tip()
480 n = self.changelog._match(key)
480 n = self.changelog._match(key)
481 if n:
481 if n:
482 return n
482 return n
483 if key in self.tags():
483 if key in self.tags():
484 return self.tags()[key]
484 return self.tags()[key]
485 if key in self.branchtags():
485 if key in self.branchtags():
486 return self.branchtags()[key]
486 return self.branchtags()[key]
487 n = self.changelog._partialmatch(key)
487 n = self.changelog._partialmatch(key)
488 if n:
488 if n:
489 return n
489 return n
490 try:
490 try:
491 if len(key) == 20:
491 if len(key) == 20:
492 key = hex(key)
492 key = hex(key)
493 except:
493 except:
494 pass
494 pass
495 raise error.RepoError(_("unknown revision '%s'") % key)
495 raise error.RepoError(_("unknown revision '%s'") % key)
496
496
497 def local(self):
497 def local(self):
498 return True
498 return True
499
499
500 def join(self, f):
500 def join(self, f):
501 return os.path.join(self.path, f)
501 return os.path.join(self.path, f)
502
502
503 def wjoin(self, f):
503 def wjoin(self, f):
504 return os.path.join(self.root, f)
504 return os.path.join(self.root, f)
505
505
506 def rjoin(self, f):
506 def rjoin(self, f):
507 return os.path.join(self.root, util.pconvert(f))
507 return os.path.join(self.root, util.pconvert(f))
508
508
509 def file(self, f):
509 def file(self, f):
510 if f[0] == '/':
510 if f[0] == '/':
511 f = f[1:]
511 f = f[1:]
512 return filelog.filelog(self.sopener, f)
512 return filelog.filelog(self.sopener, f)
513
513
514 def changectx(self, changeid):
514 def changectx(self, changeid):
515 return self[changeid]
515 return self[changeid]
516
516
517 def parents(self, changeid=None):
517 def parents(self, changeid=None):
518 '''get list of changectxs for parents of changeid'''
518 '''get list of changectxs for parents of changeid'''
519 return self[changeid].parents()
519 return self[changeid].parents()
520
520
521 def filectx(self, path, changeid=None, fileid=None):
521 def filectx(self, path, changeid=None, fileid=None):
522 """changeid can be a changeset revision, node, or tag.
522 """changeid can be a changeset revision, node, or tag.
523 fileid can be a file revision or node."""
523 fileid can be a file revision or node."""
524 return context.filectx(self, path, changeid, fileid)
524 return context.filectx(self, path, changeid, fileid)
525
525
526 def getcwd(self):
526 def getcwd(self):
527 return self.dirstate.getcwd()
527 return self.dirstate.getcwd()
528
528
529 def pathto(self, f, cwd=None):
529 def pathto(self, f, cwd=None):
530 return self.dirstate.pathto(f, cwd)
530 return self.dirstate.pathto(f, cwd)
531
531
532 def wfile(self, f, mode='r'):
532 def wfile(self, f, mode='r'):
533 return self.wopener(f, mode)
533 return self.wopener(f, mode)
534
534
535 def _link(self, f):
535 def _link(self, f):
536 return os.path.islink(self.wjoin(f))
536 return os.path.islink(self.wjoin(f))
537
537
538 def _filter(self, filter, filename, data):
538 def _filter(self, filter, filename, data):
539 if filter not in self.filterpats:
539 if filter not in self.filterpats:
540 l = []
540 l = []
541 for pat, cmd in self.ui.configitems(filter):
541 for pat, cmd in self.ui.configitems(filter):
542 if cmd == '!':
542 if cmd == '!':
543 continue
543 continue
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
544 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 fn = None
545 fn = None
546 params = cmd
546 params = cmd
547 for name, filterfn in self._datafilters.iteritems():
547 for name, filterfn in self._datafilters.iteritems():
548 if cmd.startswith(name):
548 if cmd.startswith(name):
549 fn = filterfn
549 fn = filterfn
550 params = cmd[len(name):].lstrip()
550 params = cmd[len(name):].lstrip()
551 break
551 break
552 if not fn:
552 if not fn:
553 fn = lambda s, c, **kwargs: util.filter(s, c)
553 fn = lambda s, c, **kwargs: util.filter(s, c)
554 # Wrap old filters not supporting keyword arguments
554 # Wrap old filters not supporting keyword arguments
555 if not inspect.getargspec(fn)[2]:
555 if not inspect.getargspec(fn)[2]:
556 oldfn = fn
556 oldfn = fn
557 fn = lambda s, c, **kwargs: oldfn(s, c)
557 fn = lambda s, c, **kwargs: oldfn(s, c)
558 l.append((mf, fn, params))
558 l.append((mf, fn, params))
559 self.filterpats[filter] = l
559 self.filterpats[filter] = l
560
560
561 for mf, fn, cmd in self.filterpats[filter]:
561 for mf, fn, cmd in self.filterpats[filter]:
562 if mf(filename):
562 if mf(filename):
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 break
565 break
566
566
567 return data
567 return data
568
568
569 def adddatafilter(self, name, filter):
569 def adddatafilter(self, name, filter):
570 self._datafilters[name] = filter
570 self._datafilters[name] = filter
571
571
572 def wread(self, filename):
572 def wread(self, filename):
573 if self._link(filename):
573 if self._link(filename):
574 data = os.readlink(self.wjoin(filename))
574 data = os.readlink(self.wjoin(filename))
575 else:
575 else:
576 data = self.wopener(filename, 'r').read()
576 data = self.wopener(filename, 'r').read()
577 return self._filter("encode", filename, data)
577 return self._filter("encode", filename, data)
578
578
579 def wwrite(self, filename, data, flags):
579 def wwrite(self, filename, data, flags):
580 data = self._filter("decode", filename, data)
580 data = self._filter("decode", filename, data)
581 try:
581 try:
582 os.unlink(self.wjoin(filename))
582 os.unlink(self.wjoin(filename))
583 except OSError:
583 except OSError:
584 pass
584 pass
585 if 'l' in flags:
585 if 'l' in flags:
586 self.wopener.symlink(data, filename)
586 self.wopener.symlink(data, filename)
587 else:
587 else:
588 self.wopener(filename, 'w').write(data)
588 self.wopener(filename, 'w').write(data)
589 if 'x' in flags:
589 if 'x' in flags:
590 util.set_flags(self.wjoin(filename), False, True)
590 util.set_flags(self.wjoin(filename), False, True)
591
591
592 def wwritedata(self, filename, data):
592 def wwritedata(self, filename, data):
593 return self._filter("decode", filename, data)
593 return self._filter("decode", filename, data)
594
594
595 def transaction(self):
595 def transaction(self):
596 tr = self._transref and self._transref() or None
596 tr = self._transref and self._transref() or None
597 if tr and tr.running():
597 if tr and tr.running():
598 return tr.nest()
598 return tr.nest()
599
599
600 # abort here if the journal already exists
600 # abort here if the journal already exists
601 if os.path.exists(self.sjoin("journal")):
601 if os.path.exists(self.sjoin("journal")):
602 raise error.RepoError(_("journal already exists - run hg recover"))
602 raise error.RepoError(_("journal already exists - run hg recover"))
603
603
604 # save dirstate for rollback
604 # save dirstate for rollback
605 try:
605 try:
606 ds = self.opener("dirstate").read()
606 ds = self.opener("dirstate").read()
607 except IOError:
607 except IOError:
608 ds = ""
608 ds = ""
609 self.opener("journal.dirstate", "w").write(ds)
609 self.opener("journal.dirstate", "w").write(ds)
610 self.opener("journal.branch", "w").write(self.dirstate.branch())
610 self.opener("journal.branch", "w").write(self.dirstate.branch())
611
611
612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 (self.join("journal.branch"), self.join("undo.branch"))]
614 (self.join("journal.branch"), self.join("undo.branch"))]
615 tr = transaction.transaction(self.ui.warn, self.sopener,
615 tr = transaction.transaction(self.ui.warn, self.sopener,
616 self.sjoin("journal"),
616 self.sjoin("journal"),
617 aftertrans(renames),
617 aftertrans(renames),
618 self.store.createmode)
618 self.store.createmode)
619 self._transref = weakref.ref(tr)
619 self._transref = weakref.ref(tr)
620 return tr
620 return tr
621
621
622 def recover(self):
622 def recover(self):
623 l = self.lock()
623 l = self.lock()
624 try:
624 try:
625 if os.path.exists(self.sjoin("journal")):
625 if os.path.exists(self.sjoin("journal")):
626 self.ui.status(_("rolling back interrupted transaction\n"))
626 self.ui.status(_("rolling back interrupted transaction\n"))
627 transaction.rollback(self.sopener, self.sjoin("journal"))
627 transaction.rollback(self.sopener, self.sjoin("journal"))
628 self.invalidate()
628 self.invalidate()
629 return True
629 return True
630 else:
630 else:
631 self.ui.warn(_("no interrupted transaction available\n"))
631 self.ui.warn(_("no interrupted transaction available\n"))
632 return False
632 return False
633 finally:
633 finally:
634 del l
634 del l
635
635
636 def rollback(self):
636 def rollback(self):
637 wlock = lock = None
637 wlock = lock = None
638 try:
638 try:
639 wlock = self.wlock()
639 wlock = self.wlock()
640 lock = self.lock()
640 lock = self.lock()
641 if os.path.exists(self.sjoin("undo")):
641 if os.path.exists(self.sjoin("undo")):
642 self.ui.status(_("rolling back last transaction\n"))
642 self.ui.status(_("rolling back last transaction\n"))
643 transaction.rollback(self.sopener, self.sjoin("undo"))
643 transaction.rollback(self.sopener, self.sjoin("undo"))
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 try:
645 try:
646 branch = self.opener("undo.branch").read()
646 branch = self.opener("undo.branch").read()
647 self.dirstate.setbranch(branch)
647 self.dirstate.setbranch(branch)
648 except IOError:
648 except IOError:
649 self.ui.warn(_("Named branch could not be reset, "
649 self.ui.warn(_("Named branch could not be reset, "
650 "current branch still is: %s\n")
650 "current branch still is: %s\n")
651 % encoding.tolocal(self.dirstate.branch()))
651 % encoding.tolocal(self.dirstate.branch()))
652 self.invalidate()
652 self.invalidate()
653 self.dirstate.invalidate()
653 self.dirstate.invalidate()
654 else:
654 else:
655 self.ui.warn(_("no rollback information available\n"))
655 self.ui.warn(_("no rollback information available\n"))
656 finally:
656 finally:
657 del lock, wlock
657 del lock, wlock
658
658
659 def invalidate(self):
659 def invalidate(self):
660 for a in "changelog manifest".split():
660 for a in "changelog manifest".split():
661 if a in self.__dict__:
661 if a in self.__dict__:
662 delattr(self, a)
662 delattr(self, a)
663 self.tagscache = None
663 self.tagscache = None
664 self._tagstypecache = None
664 self._tagstypecache = None
665 self.nodetagscache = None
665 self.nodetagscache = None
666 self.branchcache = None
666 self.branchcache = None
667 self._ubranchcache = None
667 self._ubranchcache = None
668 self._branchcachetip = None
668 self._branchcachetip = None
669
669
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 try:
671 try:
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 except error.LockHeld, inst:
673 except error.LockHeld, inst:
674 if not wait:
674 if not wait:
675 raise
675 raise
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 (desc, inst.locker))
677 (desc, inst.locker))
678 # default to 600 seconds timeout
678 # default to 600 seconds timeout
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 releasefn, desc=desc)
680 releasefn, desc=desc)
681 if acquirefn:
681 if acquirefn:
682 acquirefn()
682 acquirefn()
683 return l
683 return l
684
684
685 def lock(self, wait=True):
685 def lock(self, wait=True):
686 if self._lockref and self._lockref():
686 l = self._lockref and self._lockref()
687 return self._lockref()
687 if l is not None and l.held:
688 l.lock()
689 return l
688
690
689 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
690 _('repository %s') % self.origroot)
692 _('repository %s') % self.origroot)
691 self._lockref = weakref.ref(l)
693 self._lockref = weakref.ref(l)
692 return l
694 return l
693
695
694 def wlock(self, wait=True):
696 def wlock(self, wait=True):
695 if self._wlockref and self._wlockref():
697 l = self._wlockref and self._wlockref()
696 return self._wlockref()
698 if l is not None and l.held:
699 l.lock()
700 return l
697
701
698 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
702 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
699 self.dirstate.invalidate, _('working directory of %s') %
703 self.dirstate.invalidate, _('working directory of %s') %
700 self.origroot)
704 self.origroot)
701 self._wlockref = weakref.ref(l)
705 self._wlockref = weakref.ref(l)
702 return l
706 return l
703
707
704 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
708 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
705 """
709 """
706 commit an individual file as part of a larger transaction
710 commit an individual file as part of a larger transaction
707 """
711 """
708
712
709 fn = fctx.path()
713 fn = fctx.path()
710 t = fctx.data()
714 t = fctx.data()
711 fl = self.file(fn)
715 fl = self.file(fn)
712 fp1 = manifest1.get(fn, nullid)
716 fp1 = manifest1.get(fn, nullid)
713 fp2 = manifest2.get(fn, nullid)
717 fp2 = manifest2.get(fn, nullid)
714
718
715 meta = {}
719 meta = {}
716 cp = fctx.renamed()
720 cp = fctx.renamed()
717 if cp and cp[0] != fn:
721 if cp and cp[0] != fn:
718 # Mark the new revision of this file as a copy of another
722 # Mark the new revision of this file as a copy of another
719 # file. This copy data will effectively act as a parent
723 # file. This copy data will effectively act as a parent
720 # of this new revision. If this is a merge, the first
724 # of this new revision. If this is a merge, the first
721 # parent will be the nullid (meaning "look up the copy data")
725 # parent will be the nullid (meaning "look up the copy data")
722 # and the second one will be the other parent. For example:
726 # and the second one will be the other parent. For example:
723 #
727 #
724 # 0 --- 1 --- 3 rev1 changes file foo
728 # 0 --- 1 --- 3 rev1 changes file foo
725 # \ / rev2 renames foo to bar and changes it
729 # \ / rev2 renames foo to bar and changes it
726 # \- 2 -/ rev3 should have bar with all changes and
730 # \- 2 -/ rev3 should have bar with all changes and
727 # should record that bar descends from
731 # should record that bar descends from
728 # bar in rev2 and foo in rev1
732 # bar in rev2 and foo in rev1
729 #
733 #
730 # this allows this merge to succeed:
734 # this allows this merge to succeed:
731 #
735 #
732 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
733 # \ / merging rev3 and rev4 should use bar@rev2
737 # \ / merging rev3 and rev4 should use bar@rev2
734 # \- 2 --- 4 as the merge base
738 # \- 2 --- 4 as the merge base
735 #
739 #
736
740
737 cf = cp[0]
741 cf = cp[0]
738 cr = manifest1.get(cf)
742 cr = manifest1.get(cf)
739 nfp = fp2
743 nfp = fp2
740
744
741 if manifest2: # branch merge
745 if manifest2: # branch merge
742 if fp2 == nullid or cr is None: # copied on remote side
746 if fp2 == nullid or cr is None: # copied on remote side
743 if cf in manifest2:
747 if cf in manifest2:
744 cr = manifest2[cf]
748 cr = manifest2[cf]
745 nfp = fp1
749 nfp = fp1
746
750
747 # find source in nearest ancestor if we've lost track
751 # find source in nearest ancestor if we've lost track
748 if not cr:
752 if not cr:
749 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
753 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
750 (fn, cf))
754 (fn, cf))
751 for a in self['.'].ancestors():
755 for a in self['.'].ancestors():
752 if cf in a:
756 if cf in a:
753 cr = a[cf].filenode()
757 cr = a[cf].filenode()
754 break
758 break
755
759
756 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
760 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
757 meta["copy"] = cf
761 meta["copy"] = cf
758 meta["copyrev"] = hex(cr)
762 meta["copyrev"] = hex(cr)
759 fp1, fp2 = nullid, nfp
763 fp1, fp2 = nullid, nfp
760 elif fp2 != nullid:
764 elif fp2 != nullid:
761 # is one parent an ancestor of the other?
765 # is one parent an ancestor of the other?
762 fpa = fl.ancestor(fp1, fp2)
766 fpa = fl.ancestor(fp1, fp2)
763 if fpa == fp1:
767 if fpa == fp1:
764 fp1, fp2 = fp2, nullid
768 fp1, fp2 = fp2, nullid
765 elif fpa == fp2:
769 elif fpa == fp2:
766 fp2 = nullid
770 fp2 = nullid
767
771
768 # is the file unmodified from the parent? report existing entry
772 # is the file unmodified from the parent? report existing entry
769 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
773 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
770 return fp1
774 return fp1
771
775
772 changelist.append(fn)
776 changelist.append(fn)
773 return fl.add(t, meta, tr, linkrev, fp1, fp2)
777 return fl.add(t, meta, tr, linkrev, fp1, fp2)
774
778
775 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
779 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
776 if p1 is None:
780 if p1 is None:
777 p1, p2 = self.dirstate.parents()
781 p1, p2 = self.dirstate.parents()
778 return self.commit(files=files, text=text, user=user, date=date,
782 return self.commit(files=files, text=text, user=user, date=date,
779 p1=p1, p2=p2, extra=extra, empty_ok=True)
783 p1=p1, p2=p2, extra=extra, empty_ok=True)
780
784
781 def commit(self, files=None, text="", user=None, date=None,
785 def commit(self, files=None, text="", user=None, date=None,
782 match=None, force=False, force_editor=False,
786 match=None, force=False, force_editor=False,
783 p1=None, p2=None, extra={}, empty_ok=False):
787 p1=None, p2=None, extra={}, empty_ok=False):
784 wlock = lock = None
788 wlock = lock = None
785 if extra.get("close"):
789 if extra.get("close"):
786 force = True
790 force = True
787 if files:
791 if files:
788 files = util.unique(files)
792 files = util.unique(files)
789 try:
793 try:
790 wlock = self.wlock()
794 wlock = self.wlock()
791 lock = self.lock()
795 lock = self.lock()
792 use_dirstate = (p1 is None) # not rawcommit
796 use_dirstate = (p1 is None) # not rawcommit
793
797
794 if use_dirstate:
798 if use_dirstate:
795 p1, p2 = self.dirstate.parents()
799 p1, p2 = self.dirstate.parents()
796 update_dirstate = True
800 update_dirstate = True
797
801
798 if (not force and p2 != nullid and
802 if (not force and p2 != nullid and
799 (match and (match.files() or match.anypats()))):
803 (match and (match.files() or match.anypats()))):
800 raise util.Abort(_('cannot partially commit a merge '
804 raise util.Abort(_('cannot partially commit a merge '
801 '(do not specify files or patterns)'))
805 '(do not specify files or patterns)'))
802
806
803 if files:
807 if files:
804 modified, removed = [], []
808 modified, removed = [], []
805 for f in files:
809 for f in files:
806 s = self.dirstate[f]
810 s = self.dirstate[f]
807 if s in 'nma':
811 if s in 'nma':
808 modified.append(f)
812 modified.append(f)
809 elif s == 'r':
813 elif s == 'r':
810 removed.append(f)
814 removed.append(f)
811 else:
815 else:
812 self.ui.warn(_("%s not tracked!\n") % f)
816 self.ui.warn(_("%s not tracked!\n") % f)
813 changes = [modified, [], removed, [], []]
817 changes = [modified, [], removed, [], []]
814 else:
818 else:
815 changes = self.status(match=match)
819 changes = self.status(match=match)
816 else:
820 else:
817 p1, p2 = p1, p2 or nullid
821 p1, p2 = p1, p2 or nullid
818 update_dirstate = (self.dirstate.parents()[0] == p1)
822 update_dirstate = (self.dirstate.parents()[0] == p1)
819 changes = [files, [], [], [], []]
823 changes = [files, [], [], [], []]
820
824
821 ms = merge_.mergestate(self)
825 ms = merge_.mergestate(self)
822 for f in changes[0]:
826 for f in changes[0]:
823 if f in ms and ms[f] == 'u':
827 if f in ms and ms[f] == 'u':
824 raise util.Abort(_("unresolved merge conflicts "
828 raise util.Abort(_("unresolved merge conflicts "
825 "(see hg resolve)"))
829 "(see hg resolve)"))
826 wctx = context.workingctx(self, (p1, p2), text, user, date,
830 wctx = context.workingctx(self, (p1, p2), text, user, date,
827 extra, changes)
831 extra, changes)
828 r = self._commitctx(wctx, force, force_editor, empty_ok,
832 r = self._commitctx(wctx, force, force_editor, empty_ok,
829 use_dirstate, update_dirstate)
833 use_dirstate, update_dirstate)
830 ms.reset()
834 ms.reset()
831 return r
835 return r
832
836
833 finally:
837 finally:
834 del lock, wlock
838 del lock, wlock
835
839
836 def commitctx(self, ctx):
840 def commitctx(self, ctx):
837 """Add a new revision to current repository.
841 """Add a new revision to current repository.
838
842
839 Revision information is passed in the context.memctx argument.
843 Revision information is passed in the context.memctx argument.
840 commitctx() does not touch the working directory.
844 commitctx() does not touch the working directory.
841 """
845 """
842 wlock = lock = None
846 wlock = lock = None
843 try:
847 try:
844 wlock = self.wlock()
848 wlock = self.wlock()
845 lock = self.lock()
849 lock = self.lock()
846 return self._commitctx(ctx, force=True, force_editor=False,
850 return self._commitctx(ctx, force=True, force_editor=False,
847 empty_ok=True, use_dirstate=False,
851 empty_ok=True, use_dirstate=False,
848 update_dirstate=False)
852 update_dirstate=False)
849 finally:
853 finally:
850 del lock, wlock
854 del lock, wlock
851
855
852 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
856 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
853 use_dirstate=True, update_dirstate=True):
857 use_dirstate=True, update_dirstate=True):
854 tr = None
858 tr = None
855 valid = 0 # don't save the dirstate if this isn't set
859 valid = 0 # don't save the dirstate if this isn't set
856 try:
860 try:
857 commit = util.sort(wctx.modified() + wctx.added())
861 commit = util.sort(wctx.modified() + wctx.added())
858 remove = wctx.removed()
862 remove = wctx.removed()
859 extra = wctx.extra().copy()
863 extra = wctx.extra().copy()
860 branchname = extra['branch']
864 branchname = extra['branch']
861 user = wctx.user()
865 user = wctx.user()
862 text = wctx.description()
866 text = wctx.description()
863
867
864 p1, p2 = [p.node() for p in wctx.parents()]
868 p1, p2 = [p.node() for p in wctx.parents()]
865 c1 = self.changelog.read(p1)
869 c1 = self.changelog.read(p1)
866 c2 = self.changelog.read(p2)
870 c2 = self.changelog.read(p2)
867 m1 = self.manifest.read(c1[0]).copy()
871 m1 = self.manifest.read(c1[0]).copy()
868 m2 = self.manifest.read(c2[0])
872 m2 = self.manifest.read(c2[0])
869
873
870 if use_dirstate:
874 if use_dirstate:
871 oldname = c1[5].get("branch") # stored in UTF-8
875 oldname = c1[5].get("branch") # stored in UTF-8
872 if (not commit and not remove and not force and p2 == nullid
876 if (not commit and not remove and not force and p2 == nullid
873 and branchname == oldname):
877 and branchname == oldname):
874 self.ui.status(_("nothing changed\n"))
878 self.ui.status(_("nothing changed\n"))
875 return None
879 return None
876
880
877 xp1 = hex(p1)
881 xp1 = hex(p1)
878 if p2 == nullid: xp2 = ''
882 if p2 == nullid: xp2 = ''
879 else: xp2 = hex(p2)
883 else: xp2 = hex(p2)
880
884
881 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
885 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
882
886
883 tr = self.transaction()
887 tr = self.transaction()
884 trp = weakref.proxy(tr)
888 trp = weakref.proxy(tr)
885
889
886 # check in files
890 # check in files
887 new = {}
891 new = {}
888 changed = []
892 changed = []
889 linkrev = len(self)
893 linkrev = len(self)
890 for f in commit:
894 for f in commit:
891 self.ui.note(f + "\n")
895 self.ui.note(f + "\n")
892 try:
896 try:
893 fctx = wctx.filectx(f)
897 fctx = wctx.filectx(f)
894 newflags = fctx.flags()
898 newflags = fctx.flags()
895 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
899 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
896 if ((not changed or changed[-1] != f) and
900 if ((not changed or changed[-1] != f) and
897 m2.get(f) != new[f]):
901 m2.get(f) != new[f]):
898 # mention the file in the changelog if some
902 # mention the file in the changelog if some
899 # flag changed, even if there was no content
903 # flag changed, even if there was no content
900 # change.
904 # change.
901 if m1.flags(f) != newflags:
905 if m1.flags(f) != newflags:
902 changed.append(f)
906 changed.append(f)
903 m1.set(f, newflags)
907 m1.set(f, newflags)
904 if use_dirstate:
908 if use_dirstate:
905 self.dirstate.normal(f)
909 self.dirstate.normal(f)
906
910
907 except (OSError, IOError):
911 except (OSError, IOError):
908 if use_dirstate:
912 if use_dirstate:
909 self.ui.warn(_("trouble committing %s!\n") % f)
913 self.ui.warn(_("trouble committing %s!\n") % f)
910 raise
914 raise
911 else:
915 else:
912 remove.append(f)
916 remove.append(f)
913
917
914 updated, added = [], []
918 updated, added = [], []
915 for f in util.sort(changed):
919 for f in util.sort(changed):
916 if f in m1 or f in m2:
920 if f in m1 or f in m2:
917 updated.append(f)
921 updated.append(f)
918 else:
922 else:
919 added.append(f)
923 added.append(f)
920
924
921 # update manifest
925 # update manifest
922 m1.update(new)
926 m1.update(new)
923 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
927 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
924 removed1 = []
928 removed1 = []
925
929
926 for f in removed:
930 for f in removed:
927 if f in m1:
931 if f in m1:
928 del m1[f]
932 del m1[f]
929 removed1.append(f)
933 removed1.append(f)
930 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
934 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
931 (new, removed1))
935 (new, removed1))
932
936
933 # add changeset
937 # add changeset
934 if (not empty_ok and not text) or force_editor:
938 if (not empty_ok and not text) or force_editor:
935 edittext = []
939 edittext = []
936 if text:
940 if text:
937 edittext.append(text)
941 edittext.append(text)
938 edittext.append("")
942 edittext.append("")
939 edittext.append("") # Empty line between message and comments.
943 edittext.append("") # Empty line between message and comments.
940 edittext.append(_("HG: Enter commit message."
944 edittext.append(_("HG: Enter commit message."
941 " Lines beginning with 'HG:' are removed."))
945 " Lines beginning with 'HG:' are removed."))
942 edittext.append("HG: --")
946 edittext.append("HG: --")
943 edittext.append("HG: user: %s" % user)
947 edittext.append("HG: user: %s" % user)
944 if p2 != nullid:
948 if p2 != nullid:
945 edittext.append("HG: branch merge")
949 edittext.append("HG: branch merge")
946 if branchname:
950 if branchname:
947 edittext.append("HG: branch '%s'"
951 edittext.append("HG: branch '%s'"
948 % encoding.tolocal(branchname))
952 % encoding.tolocal(branchname))
949 edittext.extend(["HG: added %s" % f for f in added])
953 edittext.extend(["HG: added %s" % f for f in added])
950 edittext.extend(["HG: changed %s" % f for f in updated])
954 edittext.extend(["HG: changed %s" % f for f in updated])
951 edittext.extend(["HG: removed %s" % f for f in removed])
955 edittext.extend(["HG: removed %s" % f for f in removed])
952 if not added and not updated and not removed:
956 if not added and not updated and not removed:
953 edittext.append("HG: no files changed")
957 edittext.append("HG: no files changed")
954 edittext.append("")
958 edittext.append("")
955 # run editor in the repository root
959 # run editor in the repository root
956 olddir = os.getcwd()
960 olddir = os.getcwd()
957 os.chdir(self.root)
961 os.chdir(self.root)
958 text = self.ui.edit("\n".join(edittext), user)
962 text = self.ui.edit("\n".join(edittext), user)
959 os.chdir(olddir)
963 os.chdir(olddir)
960
964
961 lines = [line.rstrip() for line in text.rstrip().splitlines()]
965 lines = [line.rstrip() for line in text.rstrip().splitlines()]
962 while lines and not lines[0]:
966 while lines and not lines[0]:
963 del lines[0]
967 del lines[0]
964 if not lines and use_dirstate:
968 if not lines and use_dirstate:
965 raise util.Abort(_("empty commit message"))
969 raise util.Abort(_("empty commit message"))
966 text = '\n'.join(lines)
970 text = '\n'.join(lines)
967
971
968 self.changelog.delayupdate()
972 self.changelog.delayupdate()
969 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
973 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
970 user, wctx.date(), extra)
974 user, wctx.date(), extra)
971 p = lambda: self.changelog.writepending() and self.root or ""
975 p = lambda: self.changelog.writepending() and self.root or ""
972 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
976 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
973 parent2=xp2, pending=p)
977 parent2=xp2, pending=p)
974 self.changelog.finalize(trp)
978 self.changelog.finalize(trp)
975 tr.close()
979 tr.close()
976
980
977 if self.branchcache:
981 if self.branchcache:
978 self.branchtags()
982 self.branchtags()
979
983
980 if use_dirstate or update_dirstate:
984 if use_dirstate or update_dirstate:
981 self.dirstate.setparents(n)
985 self.dirstate.setparents(n)
982 if use_dirstate:
986 if use_dirstate:
983 for f in removed:
987 for f in removed:
984 self.dirstate.forget(f)
988 self.dirstate.forget(f)
985 valid = 1 # our dirstate updates are complete
989 valid = 1 # our dirstate updates are complete
986
990
987 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
991 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
988 return n
992 return n
989 finally:
993 finally:
990 if not valid: # don't save our updated dirstate
994 if not valid: # don't save our updated dirstate
991 self.dirstate.invalidate()
995 self.dirstate.invalidate()
992 del tr
996 del tr
993
997
994 def walk(self, match, node=None):
998 def walk(self, match, node=None):
995 '''
999 '''
996 walk recursively through the directory tree or a given
1000 walk recursively through the directory tree or a given
997 changeset, finding all files matched by the match
1001 changeset, finding all files matched by the match
998 function
1002 function
999 '''
1003 '''
1000 return self[node].walk(match)
1004 return self[node].walk(match)
1001
1005
1002 def status(self, node1='.', node2=None, match=None,
1006 def status(self, node1='.', node2=None, match=None,
1003 ignored=False, clean=False, unknown=False):
1007 ignored=False, clean=False, unknown=False):
1004 """return status of files between two nodes or node and working directory
1008 """return status of files between two nodes or node and working directory
1005
1009
1006 If node1 is None, use the first dirstate parent instead.
1010 If node1 is None, use the first dirstate parent instead.
1007 If node2 is None, compare node1 with working directory.
1011 If node2 is None, compare node1 with working directory.
1008 """
1012 """
1009
1013
1010 def mfmatches(ctx):
1014 def mfmatches(ctx):
1011 mf = ctx.manifest().copy()
1015 mf = ctx.manifest().copy()
1012 for fn in mf.keys():
1016 for fn in mf.keys():
1013 if not match(fn):
1017 if not match(fn):
1014 del mf[fn]
1018 del mf[fn]
1015 return mf
1019 return mf
1016
1020
1017 if isinstance(node1, context.changectx):
1021 if isinstance(node1, context.changectx):
1018 ctx1 = node1
1022 ctx1 = node1
1019 else:
1023 else:
1020 ctx1 = self[node1]
1024 ctx1 = self[node1]
1021 if isinstance(node2, context.changectx):
1025 if isinstance(node2, context.changectx):
1022 ctx2 = node2
1026 ctx2 = node2
1023 else:
1027 else:
1024 ctx2 = self[node2]
1028 ctx2 = self[node2]
1025
1029
1026 working = ctx2.rev() is None
1030 working = ctx2.rev() is None
1027 parentworking = working and ctx1 == self['.']
1031 parentworking = working and ctx1 == self['.']
1028 match = match or match_.always(self.root, self.getcwd())
1032 match = match or match_.always(self.root, self.getcwd())
1029 listignored, listclean, listunknown = ignored, clean, unknown
1033 listignored, listclean, listunknown = ignored, clean, unknown
1030
1034
1031 # load earliest manifest first for caching reasons
1035 # load earliest manifest first for caching reasons
1032 if not working and ctx2.rev() < ctx1.rev():
1036 if not working and ctx2.rev() < ctx1.rev():
1033 ctx2.manifest()
1037 ctx2.manifest()
1034
1038
1035 if not parentworking:
1039 if not parentworking:
1036 def bad(f, msg):
1040 def bad(f, msg):
1037 if f not in ctx1:
1041 if f not in ctx1:
1038 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1042 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1039 return False
1043 return False
1040 match.bad = bad
1044 match.bad = bad
1041
1045
1042 if working: # we need to scan the working dir
1046 if working: # we need to scan the working dir
1043 s = self.dirstate.status(match, listignored, listclean, listunknown)
1047 s = self.dirstate.status(match, listignored, listclean, listunknown)
1044 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1048 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1045
1049
1046 # check for any possibly clean files
1050 # check for any possibly clean files
1047 if parentworking and cmp:
1051 if parentworking and cmp:
1048 fixup = []
1052 fixup = []
1049 # do a full compare of any files that might have changed
1053 # do a full compare of any files that might have changed
1050 for f in cmp:
1054 for f in cmp:
1051 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1055 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1052 or ctx1[f].cmp(ctx2[f].data())):
1056 or ctx1[f].cmp(ctx2[f].data())):
1053 modified.append(f)
1057 modified.append(f)
1054 else:
1058 else:
1055 fixup.append(f)
1059 fixup.append(f)
1056
1060
1057 if listclean:
1061 if listclean:
1058 clean += fixup
1062 clean += fixup
1059
1063
1060 # update dirstate for files that are actually clean
1064 # update dirstate for files that are actually clean
1061 if fixup:
1065 if fixup:
1062 wlock = None
1066 wlock = None
1063 try:
1067 try:
1064 try:
1068 try:
1065 wlock = self.wlock(False)
1069 wlock = self.wlock(False)
1066 for f in fixup:
1070 for f in fixup:
1067 self.dirstate.normal(f)
1071 self.dirstate.normal(f)
1068 except error.LockError:
1072 except error.LockError:
1069 pass
1073 pass
1070 finally:
1074 finally:
1071 del wlock
1075 del wlock
1072
1076
1073 if not parentworking:
1077 if not parentworking:
1074 mf1 = mfmatches(ctx1)
1078 mf1 = mfmatches(ctx1)
1075 if working:
1079 if working:
1076 # we are comparing working dir against non-parent
1080 # we are comparing working dir against non-parent
1077 # generate a pseudo-manifest for the working dir
1081 # generate a pseudo-manifest for the working dir
1078 mf2 = mfmatches(self['.'])
1082 mf2 = mfmatches(self['.'])
1079 for f in cmp + modified + added:
1083 for f in cmp + modified + added:
1080 mf2[f] = None
1084 mf2[f] = None
1081 mf2.set(f, ctx2.flags(f))
1085 mf2.set(f, ctx2.flags(f))
1082 for f in removed:
1086 for f in removed:
1083 if f in mf2:
1087 if f in mf2:
1084 del mf2[f]
1088 del mf2[f]
1085 else:
1089 else:
1086 # we are comparing two revisions
1090 # we are comparing two revisions
1087 deleted, unknown, ignored = [], [], []
1091 deleted, unknown, ignored = [], [], []
1088 mf2 = mfmatches(ctx2)
1092 mf2 = mfmatches(ctx2)
1089
1093
1090 modified, added, clean = [], [], []
1094 modified, added, clean = [], [], []
1091 for fn in mf2:
1095 for fn in mf2:
1092 if fn in mf1:
1096 if fn in mf1:
1093 if (mf1.flags(fn) != mf2.flags(fn) or
1097 if (mf1.flags(fn) != mf2.flags(fn) or
1094 (mf1[fn] != mf2[fn] and
1098 (mf1[fn] != mf2[fn] and
1095 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1099 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1096 modified.append(fn)
1100 modified.append(fn)
1097 elif listclean:
1101 elif listclean:
1098 clean.append(fn)
1102 clean.append(fn)
1099 del mf1[fn]
1103 del mf1[fn]
1100 else:
1104 else:
1101 added.append(fn)
1105 added.append(fn)
1102 removed = mf1.keys()
1106 removed = mf1.keys()
1103
1107
1104 r = modified, added, removed, deleted, unknown, ignored, clean
1108 r = modified, added, removed, deleted, unknown, ignored, clean
1105 [l.sort() for l in r]
1109 [l.sort() for l in r]
1106 return r
1110 return r
1107
1111
1108 def add(self, list):
1112 def add(self, list):
1109 wlock = self.wlock()
1113 wlock = self.wlock()
1110 try:
1114 try:
1111 rejected = []
1115 rejected = []
1112 for f in list:
1116 for f in list:
1113 p = self.wjoin(f)
1117 p = self.wjoin(f)
1114 try:
1118 try:
1115 st = os.lstat(p)
1119 st = os.lstat(p)
1116 except:
1120 except:
1117 self.ui.warn(_("%s does not exist!\n") % f)
1121 self.ui.warn(_("%s does not exist!\n") % f)
1118 rejected.append(f)
1122 rejected.append(f)
1119 continue
1123 continue
1120 if st.st_size > 10000000:
1124 if st.st_size > 10000000:
1121 self.ui.warn(_("%s: files over 10MB may cause memory and"
1125 self.ui.warn(_("%s: files over 10MB may cause memory and"
1122 " performance problems\n"
1126 " performance problems\n"
1123 "(use 'hg revert %s' to unadd the file)\n")
1127 "(use 'hg revert %s' to unadd the file)\n")
1124 % (f, f))
1128 % (f, f))
1125 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1129 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1126 self.ui.warn(_("%s not added: only files and symlinks "
1130 self.ui.warn(_("%s not added: only files and symlinks "
1127 "supported currently\n") % f)
1131 "supported currently\n") % f)
1128 rejected.append(p)
1132 rejected.append(p)
1129 elif self.dirstate[f] in 'amn':
1133 elif self.dirstate[f] in 'amn':
1130 self.ui.warn(_("%s already tracked!\n") % f)
1134 self.ui.warn(_("%s already tracked!\n") % f)
1131 elif self.dirstate[f] == 'r':
1135 elif self.dirstate[f] == 'r':
1132 self.dirstate.normallookup(f)
1136 self.dirstate.normallookup(f)
1133 else:
1137 else:
1134 self.dirstate.add(f)
1138 self.dirstate.add(f)
1135 return rejected
1139 return rejected
1136 finally:
1140 finally:
1137 del wlock
1141 del wlock
1138
1142
1139 def forget(self, list):
1143 def forget(self, list):
1140 wlock = self.wlock()
1144 wlock = self.wlock()
1141 try:
1145 try:
1142 for f in list:
1146 for f in list:
1143 if self.dirstate[f] != 'a':
1147 if self.dirstate[f] != 'a':
1144 self.ui.warn(_("%s not added!\n") % f)
1148 self.ui.warn(_("%s not added!\n") % f)
1145 else:
1149 else:
1146 self.dirstate.forget(f)
1150 self.dirstate.forget(f)
1147 finally:
1151 finally:
1148 del wlock
1152 del wlock
1149
1153
1150 def remove(self, list, unlink=False):
1154 def remove(self, list, unlink=False):
1151 wlock = None
1155 wlock = None
1152 try:
1156 try:
1153 if unlink:
1157 if unlink:
1154 for f in list:
1158 for f in list:
1155 try:
1159 try:
1156 util.unlink(self.wjoin(f))
1160 util.unlink(self.wjoin(f))
1157 except OSError, inst:
1161 except OSError, inst:
1158 if inst.errno != errno.ENOENT:
1162 if inst.errno != errno.ENOENT:
1159 raise
1163 raise
1160 wlock = self.wlock()
1164 wlock = self.wlock()
1161 for f in list:
1165 for f in list:
1162 if unlink and os.path.exists(self.wjoin(f)):
1166 if unlink and os.path.exists(self.wjoin(f)):
1163 self.ui.warn(_("%s still exists!\n") % f)
1167 self.ui.warn(_("%s still exists!\n") % f)
1164 elif self.dirstate[f] == 'a':
1168 elif self.dirstate[f] == 'a':
1165 self.dirstate.forget(f)
1169 self.dirstate.forget(f)
1166 elif f not in self.dirstate:
1170 elif f not in self.dirstate:
1167 self.ui.warn(_("%s not tracked!\n") % f)
1171 self.ui.warn(_("%s not tracked!\n") % f)
1168 else:
1172 else:
1169 self.dirstate.remove(f)
1173 self.dirstate.remove(f)
1170 finally:
1174 finally:
1171 del wlock
1175 del wlock
1172
1176
1173 def undelete(self, list):
1177 def undelete(self, list):
1174 wlock = None
1178 wlock = None
1175 try:
1179 try:
1176 manifests = [self.manifest.read(self.changelog.read(p)[0])
1180 manifests = [self.manifest.read(self.changelog.read(p)[0])
1177 for p in self.dirstate.parents() if p != nullid]
1181 for p in self.dirstate.parents() if p != nullid]
1178 wlock = self.wlock()
1182 wlock = self.wlock()
1179 for f in list:
1183 for f in list:
1180 if self.dirstate[f] != 'r':
1184 if self.dirstate[f] != 'r':
1181 self.ui.warn(_("%s not removed!\n") % f)
1185 self.ui.warn(_("%s not removed!\n") % f)
1182 else:
1186 else:
1183 m = f in manifests[0] and manifests[0] or manifests[1]
1187 m = f in manifests[0] and manifests[0] or manifests[1]
1184 t = self.file(f).read(m[f])
1188 t = self.file(f).read(m[f])
1185 self.wwrite(f, t, m.flags(f))
1189 self.wwrite(f, t, m.flags(f))
1186 self.dirstate.normal(f)
1190 self.dirstate.normal(f)
1187 finally:
1191 finally:
1188 del wlock
1192 del wlock
1189
1193
1190 def copy(self, source, dest):
1194 def copy(self, source, dest):
1191 wlock = None
1195 wlock = None
1192 try:
1196 try:
1193 p = self.wjoin(dest)
1197 p = self.wjoin(dest)
1194 if not (os.path.exists(p) or os.path.islink(p)):
1198 if not (os.path.exists(p) or os.path.islink(p)):
1195 self.ui.warn(_("%s does not exist!\n") % dest)
1199 self.ui.warn(_("%s does not exist!\n") % dest)
1196 elif not (os.path.isfile(p) or os.path.islink(p)):
1200 elif not (os.path.isfile(p) or os.path.islink(p)):
1197 self.ui.warn(_("copy failed: %s is not a file or a "
1201 self.ui.warn(_("copy failed: %s is not a file or a "
1198 "symbolic link\n") % dest)
1202 "symbolic link\n") % dest)
1199 else:
1203 else:
1200 wlock = self.wlock()
1204 wlock = self.wlock()
1201 if self.dirstate[dest] in '?r':
1205 if self.dirstate[dest] in '?r':
1202 self.dirstate.add(dest)
1206 self.dirstate.add(dest)
1203 self.dirstate.copy(source, dest)
1207 self.dirstate.copy(source, dest)
1204 finally:
1208 finally:
1205 del wlock
1209 del wlock
1206
1210
1207 def heads(self, start=None, closed=True):
1211 def heads(self, start=None, closed=True):
1208 heads = self.changelog.heads(start)
1212 heads = self.changelog.heads(start)
1209 def display(head):
1213 def display(head):
1210 if closed:
1214 if closed:
1211 return True
1215 return True
1212 extras = self.changelog.read(head)[5]
1216 extras = self.changelog.read(head)[5]
1213 return ('close' not in extras)
1217 return ('close' not in extras)
1214 # sort the output in rev descending order
1218 # sort the output in rev descending order
1215 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1219 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1216 return [n for (r, n) in util.sort(heads)]
1220 return [n for (r, n) in util.sort(heads)]
1217
1221
1218 def branchheads(self, branch=None, start=None, closed=True):
1222 def branchheads(self, branch=None, start=None, closed=True):
1219 if branch is None:
1223 if branch is None:
1220 branch = self[None].branch()
1224 branch = self[None].branch()
1221 branches = self._branchheads()
1225 branches = self._branchheads()
1222 if branch not in branches:
1226 if branch not in branches:
1223 return []
1227 return []
1224 bheads = branches[branch]
1228 bheads = branches[branch]
1225 # the cache returns heads ordered lowest to highest
1229 # the cache returns heads ordered lowest to highest
1226 bheads.reverse()
1230 bheads.reverse()
1227 if start is not None:
1231 if start is not None:
1228 # filter out the heads that cannot be reached from startrev
1232 # filter out the heads that cannot be reached from startrev
1229 bheads = self.changelog.nodesbetween([start], bheads)[2]
1233 bheads = self.changelog.nodesbetween([start], bheads)[2]
1230 if not closed:
1234 if not closed:
1231 bheads = [h for h in bheads if
1235 bheads = [h for h in bheads if
1232 ('close' not in self.changelog.read(h)[5])]
1236 ('close' not in self.changelog.read(h)[5])]
1233 return bheads
1237 return bheads
1234
1238
1235 def branches(self, nodes):
1239 def branches(self, nodes):
1236 if not nodes:
1240 if not nodes:
1237 nodes = [self.changelog.tip()]
1241 nodes = [self.changelog.tip()]
1238 b = []
1242 b = []
1239 for n in nodes:
1243 for n in nodes:
1240 t = n
1244 t = n
1241 while 1:
1245 while 1:
1242 p = self.changelog.parents(n)
1246 p = self.changelog.parents(n)
1243 if p[1] != nullid or p[0] == nullid:
1247 if p[1] != nullid or p[0] == nullid:
1244 b.append((t, n, p[0], p[1]))
1248 b.append((t, n, p[0], p[1]))
1245 break
1249 break
1246 n = p[0]
1250 n = p[0]
1247 return b
1251 return b
1248
1252
1249 def between(self, pairs):
1253 def between(self, pairs):
1250 r = []
1254 r = []
1251
1255
1252 for top, bottom in pairs:
1256 for top, bottom in pairs:
1253 n, l, i = top, [], 0
1257 n, l, i = top, [], 0
1254 f = 1
1258 f = 1
1255
1259
1256 while n != bottom and n != nullid:
1260 while n != bottom and n != nullid:
1257 p = self.changelog.parents(n)[0]
1261 p = self.changelog.parents(n)[0]
1258 if i == f:
1262 if i == f:
1259 l.append(n)
1263 l.append(n)
1260 f = f * 2
1264 f = f * 2
1261 n = p
1265 n = p
1262 i += 1
1266 i += 1
1263
1267
1264 r.append(l)
1268 r.append(l)
1265
1269
1266 return r
1270 return r
1267
1271
1268 def findincoming(self, remote, base=None, heads=None, force=False):
1272 def findincoming(self, remote, base=None, heads=None, force=False):
1269 """Return list of roots of the subsets of missing nodes from remote
1273 """Return list of roots of the subsets of missing nodes from remote
1270
1274
1271 If base dict is specified, assume that these nodes and their parents
1275 If base dict is specified, assume that these nodes and their parents
1272 exist on the remote side and that no child of a node of base exists
1276 exist on the remote side and that no child of a node of base exists
1273 in both remote and self.
1277 in both remote and self.
1274 Furthermore base will be updated to include the nodes that exists
1278 Furthermore base will be updated to include the nodes that exists
1275 in self and remote but no children exists in self and remote.
1279 in self and remote but no children exists in self and remote.
1276 If a list of heads is specified, return only nodes which are heads
1280 If a list of heads is specified, return only nodes which are heads
1277 or ancestors of these heads.
1281 or ancestors of these heads.
1278
1282
1279 All the ancestors of base are in self and in remote.
1283 All the ancestors of base are in self and in remote.
1280 All the descendants of the list returned are missing in self.
1284 All the descendants of the list returned are missing in self.
1281 (and so we know that the rest of the nodes are missing in remote, see
1285 (and so we know that the rest of the nodes are missing in remote, see
1282 outgoing)
1286 outgoing)
1283 """
1287 """
1284 return self.findcommonincoming(remote, base, heads, force)[1]
1288 return self.findcommonincoming(remote, base, heads, force)[1]
1285
1289
1286 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1290 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1287 """Return a tuple (common, missing roots, heads) used to identify
1291 """Return a tuple (common, missing roots, heads) used to identify
1288 missing nodes from remote.
1292 missing nodes from remote.
1289
1293
1290 If base dict is specified, assume that these nodes and their parents
1294 If base dict is specified, assume that these nodes and their parents
1291 exist on the remote side and that no child of a node of base exists
1295 exist on the remote side and that no child of a node of base exists
1292 in both remote and self.
1296 in both remote and self.
1293 Furthermore base will be updated to include the nodes that exists
1297 Furthermore base will be updated to include the nodes that exists
1294 in self and remote but no children exists in self and remote.
1298 in self and remote but no children exists in self and remote.
1295 If a list of heads is specified, return only nodes which are heads
1299 If a list of heads is specified, return only nodes which are heads
1296 or ancestors of these heads.
1300 or ancestors of these heads.
1297
1301
1298 All the ancestors of base are in self and in remote.
1302 All the ancestors of base are in self and in remote.
1299 """
1303 """
1300 m = self.changelog.nodemap
1304 m = self.changelog.nodemap
1301 search = []
1305 search = []
1302 fetch = {}
1306 fetch = {}
1303 seen = {}
1307 seen = {}
1304 seenbranch = {}
1308 seenbranch = {}
1305 if base == None:
1309 if base == None:
1306 base = {}
1310 base = {}
1307
1311
1308 if not heads:
1312 if not heads:
1309 heads = remote.heads()
1313 heads = remote.heads()
1310
1314
1311 if self.changelog.tip() == nullid:
1315 if self.changelog.tip() == nullid:
1312 base[nullid] = 1
1316 base[nullid] = 1
1313 if heads != [nullid]:
1317 if heads != [nullid]:
1314 return [nullid], [nullid], list(heads)
1318 return [nullid], [nullid], list(heads)
1315 return [nullid], [], []
1319 return [nullid], [], []
1316
1320
1317 # assume we're closer to the tip than the root
1321 # assume we're closer to the tip than the root
1318 # and start by examining the heads
1322 # and start by examining the heads
1319 self.ui.status(_("searching for changes\n"))
1323 self.ui.status(_("searching for changes\n"))
1320
1324
1321 unknown = []
1325 unknown = []
1322 for h in heads:
1326 for h in heads:
1323 if h not in m:
1327 if h not in m:
1324 unknown.append(h)
1328 unknown.append(h)
1325 else:
1329 else:
1326 base[h] = 1
1330 base[h] = 1
1327
1331
1328 heads = unknown
1332 heads = unknown
1329 if not unknown:
1333 if not unknown:
1330 return base.keys(), [], []
1334 return base.keys(), [], []
1331
1335
1332 req = dict.fromkeys(unknown)
1336 req = dict.fromkeys(unknown)
1333 reqcnt = 0
1337 reqcnt = 0
1334
1338
1335 # search through remote branches
1339 # search through remote branches
1336 # a 'branch' here is a linear segment of history, with four parts:
1340 # a 'branch' here is a linear segment of history, with four parts:
1337 # head, root, first parent, second parent
1341 # head, root, first parent, second parent
1338 # (a branch always has two parents (or none) by definition)
1342 # (a branch always has two parents (or none) by definition)
1339 unknown = remote.branches(unknown)
1343 unknown = remote.branches(unknown)
1340 while unknown:
1344 while unknown:
1341 r = []
1345 r = []
1342 while unknown:
1346 while unknown:
1343 n = unknown.pop(0)
1347 n = unknown.pop(0)
1344 if n[0] in seen:
1348 if n[0] in seen:
1345 continue
1349 continue
1346
1350
1347 self.ui.debug(_("examining %s:%s\n")
1351 self.ui.debug(_("examining %s:%s\n")
1348 % (short(n[0]), short(n[1])))
1352 % (short(n[0]), short(n[1])))
1349 if n[0] == nullid: # found the end of the branch
1353 if n[0] == nullid: # found the end of the branch
1350 pass
1354 pass
1351 elif n in seenbranch:
1355 elif n in seenbranch:
1352 self.ui.debug(_("branch already found\n"))
1356 self.ui.debug(_("branch already found\n"))
1353 continue
1357 continue
1354 elif n[1] and n[1] in m: # do we know the base?
1358 elif n[1] and n[1] in m: # do we know the base?
1355 self.ui.debug(_("found incomplete branch %s:%s\n")
1359 self.ui.debug(_("found incomplete branch %s:%s\n")
1356 % (short(n[0]), short(n[1])))
1360 % (short(n[0]), short(n[1])))
1357 search.append(n[0:2]) # schedule branch range for scanning
1361 search.append(n[0:2]) # schedule branch range for scanning
1358 seenbranch[n] = 1
1362 seenbranch[n] = 1
1359 else:
1363 else:
1360 if n[1] not in seen and n[1] not in fetch:
1364 if n[1] not in seen and n[1] not in fetch:
1361 if n[2] in m and n[3] in m:
1365 if n[2] in m and n[3] in m:
1362 self.ui.debug(_("found new changeset %s\n") %
1366 self.ui.debug(_("found new changeset %s\n") %
1363 short(n[1]))
1367 short(n[1]))
1364 fetch[n[1]] = 1 # earliest unknown
1368 fetch[n[1]] = 1 # earliest unknown
1365 for p in n[2:4]:
1369 for p in n[2:4]:
1366 if p in m:
1370 if p in m:
1367 base[p] = 1 # latest known
1371 base[p] = 1 # latest known
1368
1372
1369 for p in n[2:4]:
1373 for p in n[2:4]:
1370 if p not in req and p not in m:
1374 if p not in req and p not in m:
1371 r.append(p)
1375 r.append(p)
1372 req[p] = 1
1376 req[p] = 1
1373 seen[n[0]] = 1
1377 seen[n[0]] = 1
1374
1378
1375 if r:
1379 if r:
1376 reqcnt += 1
1380 reqcnt += 1
1377 self.ui.debug(_("request %d: %s\n") %
1381 self.ui.debug(_("request %d: %s\n") %
1378 (reqcnt, " ".join(map(short, r))))
1382 (reqcnt, " ".join(map(short, r))))
1379 for p in xrange(0, len(r), 10):
1383 for p in xrange(0, len(r), 10):
1380 for b in remote.branches(r[p:p+10]):
1384 for b in remote.branches(r[p:p+10]):
1381 self.ui.debug(_("received %s:%s\n") %
1385 self.ui.debug(_("received %s:%s\n") %
1382 (short(b[0]), short(b[1])))
1386 (short(b[0]), short(b[1])))
1383 unknown.append(b)
1387 unknown.append(b)
1384
1388
1385 # do binary search on the branches we found
1389 # do binary search on the branches we found
1386 while search:
1390 while search:
1387 newsearch = []
1391 newsearch = []
1388 reqcnt += 1
1392 reqcnt += 1
1389 for n, l in zip(search, remote.between(search)):
1393 for n, l in zip(search, remote.between(search)):
1390 l.append(n[1])
1394 l.append(n[1])
1391 p = n[0]
1395 p = n[0]
1392 f = 1
1396 f = 1
1393 for i in l:
1397 for i in l:
1394 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1398 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1395 if i in m:
1399 if i in m:
1396 if f <= 2:
1400 if f <= 2:
1397 self.ui.debug(_("found new branch changeset %s\n") %
1401 self.ui.debug(_("found new branch changeset %s\n") %
1398 short(p))
1402 short(p))
1399 fetch[p] = 1
1403 fetch[p] = 1
1400 base[i] = 1
1404 base[i] = 1
1401 else:
1405 else:
1402 self.ui.debug(_("narrowed branch search to %s:%s\n")
1406 self.ui.debug(_("narrowed branch search to %s:%s\n")
1403 % (short(p), short(i)))
1407 % (short(p), short(i)))
1404 newsearch.append((p, i))
1408 newsearch.append((p, i))
1405 break
1409 break
1406 p, f = i, f * 2
1410 p, f = i, f * 2
1407 search = newsearch
1411 search = newsearch
1408
1412
1409 # sanity check our fetch list
1413 # sanity check our fetch list
1410 for f in fetch.keys():
1414 for f in fetch.keys():
1411 if f in m:
1415 if f in m:
1412 raise error.RepoError(_("already have changeset ")
1416 raise error.RepoError(_("already have changeset ")
1413 + short(f[:4]))
1417 + short(f[:4]))
1414
1418
1415 if base.keys() == [nullid]:
1419 if base.keys() == [nullid]:
1416 if force:
1420 if force:
1417 self.ui.warn(_("warning: repository is unrelated\n"))
1421 self.ui.warn(_("warning: repository is unrelated\n"))
1418 else:
1422 else:
1419 raise util.Abort(_("repository is unrelated"))
1423 raise util.Abort(_("repository is unrelated"))
1420
1424
1421 self.ui.debug(_("found new changesets starting at ") +
1425 self.ui.debug(_("found new changesets starting at ") +
1422 " ".join([short(f) for f in fetch]) + "\n")
1426 " ".join([short(f) for f in fetch]) + "\n")
1423
1427
1424 self.ui.debug(_("%d total queries\n") % reqcnt)
1428 self.ui.debug(_("%d total queries\n") % reqcnt)
1425
1429
1426 return base.keys(), fetch.keys(), heads
1430 return base.keys(), fetch.keys(), heads
1427
1431
1428 def findoutgoing(self, remote, base=None, heads=None, force=False):
1432 def findoutgoing(self, remote, base=None, heads=None, force=False):
1429 """Return list of nodes that are roots of subsets not in remote
1433 """Return list of nodes that are roots of subsets not in remote
1430
1434
1431 If base dict is specified, assume that these nodes and their parents
1435 If base dict is specified, assume that these nodes and their parents
1432 exist on the remote side.
1436 exist on the remote side.
1433 If a list of heads is specified, return only nodes which are heads
1437 If a list of heads is specified, return only nodes which are heads
1434 or ancestors of these heads, and return a second element which
1438 or ancestors of these heads, and return a second element which
1435 contains all remote heads which get new children.
1439 contains all remote heads which get new children.
1436 """
1440 """
1437 if base == None:
1441 if base == None:
1438 base = {}
1442 base = {}
1439 self.findincoming(remote, base, heads, force=force)
1443 self.findincoming(remote, base, heads, force=force)
1440
1444
1441 self.ui.debug(_("common changesets up to ")
1445 self.ui.debug(_("common changesets up to ")
1442 + " ".join(map(short, base.keys())) + "\n")
1446 + " ".join(map(short, base.keys())) + "\n")
1443
1447
1444 remain = dict.fromkeys(self.changelog.nodemap)
1448 remain = dict.fromkeys(self.changelog.nodemap)
1445
1449
1446 # prune everything remote has from the tree
1450 # prune everything remote has from the tree
1447 del remain[nullid]
1451 del remain[nullid]
1448 remove = base.keys()
1452 remove = base.keys()
1449 while remove:
1453 while remove:
1450 n = remove.pop(0)
1454 n = remove.pop(0)
1451 if n in remain:
1455 if n in remain:
1452 del remain[n]
1456 del remain[n]
1453 for p in self.changelog.parents(n):
1457 for p in self.changelog.parents(n):
1454 remove.append(p)
1458 remove.append(p)
1455
1459
1456 # find every node whose parents have been pruned
1460 # find every node whose parents have been pruned
1457 subset = []
1461 subset = []
1458 # find every remote head that will get new children
1462 # find every remote head that will get new children
1459 updated_heads = {}
1463 updated_heads = {}
1460 for n in remain:
1464 for n in remain:
1461 p1, p2 = self.changelog.parents(n)
1465 p1, p2 = self.changelog.parents(n)
1462 if p1 not in remain and p2 not in remain:
1466 if p1 not in remain and p2 not in remain:
1463 subset.append(n)
1467 subset.append(n)
1464 if heads:
1468 if heads:
1465 if p1 in heads:
1469 if p1 in heads:
1466 updated_heads[p1] = True
1470 updated_heads[p1] = True
1467 if p2 in heads:
1471 if p2 in heads:
1468 updated_heads[p2] = True
1472 updated_heads[p2] = True
1469
1473
1470 # this is the set of all roots we have to push
1474 # this is the set of all roots we have to push
1471 if heads:
1475 if heads:
1472 return subset, updated_heads.keys()
1476 return subset, updated_heads.keys()
1473 else:
1477 else:
1474 return subset
1478 return subset
1475
1479
1476 def pull(self, remote, heads=None, force=False):
1480 def pull(self, remote, heads=None, force=False):
1477 lock = self.lock()
1481 lock = self.lock()
1478 try:
1482 try:
1479 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1483 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1480 force=force)
1484 force=force)
1481 if fetch == [nullid]:
1485 if fetch == [nullid]:
1482 self.ui.status(_("requesting all changes\n"))
1486 self.ui.status(_("requesting all changes\n"))
1483
1487
1484 if not fetch:
1488 if not fetch:
1485 self.ui.status(_("no changes found\n"))
1489 self.ui.status(_("no changes found\n"))
1486 return 0
1490 return 0
1487
1491
1488 if heads is None and remote.capable('changegroupsubset'):
1492 if heads is None and remote.capable('changegroupsubset'):
1489 heads = rheads
1493 heads = rheads
1490
1494
1491 if heads is None:
1495 if heads is None:
1492 cg = remote.changegroup(fetch, 'pull')
1496 cg = remote.changegroup(fetch, 'pull')
1493 else:
1497 else:
1494 if not remote.capable('changegroupsubset'):
1498 if not remote.capable('changegroupsubset'):
1495 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1499 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1496 cg = remote.changegroupsubset(fetch, heads, 'pull')
1500 cg = remote.changegroupsubset(fetch, heads, 'pull')
1497 return self.addchangegroup(cg, 'pull', remote.url())
1501 return self.addchangegroup(cg, 'pull', remote.url())
1498 finally:
1502 finally:
1499 del lock
1503 del lock
1500
1504
1501 def push(self, remote, force=False, revs=None):
1505 def push(self, remote, force=False, revs=None):
1502 # there are two ways to push to remote repo:
1506 # there are two ways to push to remote repo:
1503 #
1507 #
1504 # addchangegroup assumes local user can lock remote
1508 # addchangegroup assumes local user can lock remote
1505 # repo (local filesystem, old ssh servers).
1509 # repo (local filesystem, old ssh servers).
1506 #
1510 #
1507 # unbundle assumes local user cannot lock remote repo (new ssh
1511 # unbundle assumes local user cannot lock remote repo (new ssh
1508 # servers, http servers).
1512 # servers, http servers).
1509
1513
1510 if remote.capable('unbundle'):
1514 if remote.capable('unbundle'):
1511 return self.push_unbundle(remote, force, revs)
1515 return self.push_unbundle(remote, force, revs)
1512 return self.push_addchangegroup(remote, force, revs)
1516 return self.push_addchangegroup(remote, force, revs)
1513
1517
1514 def prepush(self, remote, force, revs):
1518 def prepush(self, remote, force, revs):
1515 common = {}
1519 common = {}
1516 remote_heads = remote.heads()
1520 remote_heads = remote.heads()
1517 inc = self.findincoming(remote, common, remote_heads, force=force)
1521 inc = self.findincoming(remote, common, remote_heads, force=force)
1518
1522
1519 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1523 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1520 if revs is not None:
1524 if revs is not None:
1521 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1525 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1522 else:
1526 else:
1523 bases, heads = update, self.changelog.heads()
1527 bases, heads = update, self.changelog.heads()
1524
1528
1525 if not bases:
1529 if not bases:
1526 self.ui.status(_("no changes found\n"))
1530 self.ui.status(_("no changes found\n"))
1527 return None, 1
1531 return None, 1
1528 elif not force:
1532 elif not force:
1529 # check if we're creating new remote heads
1533 # check if we're creating new remote heads
1530 # to be a remote head after push, node must be either
1534 # to be a remote head after push, node must be either
1531 # - unknown locally
1535 # - unknown locally
1532 # - a local outgoing head descended from update
1536 # - a local outgoing head descended from update
1533 # - a remote head that's known locally and not
1537 # - a remote head that's known locally and not
1534 # ancestral to an outgoing head
1538 # ancestral to an outgoing head
1535
1539
1536 warn = 0
1540 warn = 0
1537
1541
1538 if remote_heads == [nullid]:
1542 if remote_heads == [nullid]:
1539 warn = 0
1543 warn = 0
1540 elif not revs and len(heads) > len(remote_heads):
1544 elif not revs and len(heads) > len(remote_heads):
1541 warn = 1
1545 warn = 1
1542 else:
1546 else:
1543 newheads = list(heads)
1547 newheads = list(heads)
1544 for r in remote_heads:
1548 for r in remote_heads:
1545 if r in self.changelog.nodemap:
1549 if r in self.changelog.nodemap:
1546 desc = self.changelog.heads(r, heads)
1550 desc = self.changelog.heads(r, heads)
1547 l = [h for h in heads if h in desc]
1551 l = [h for h in heads if h in desc]
1548 if not l:
1552 if not l:
1549 newheads.append(r)
1553 newheads.append(r)
1550 else:
1554 else:
1551 newheads.append(r)
1555 newheads.append(r)
1552 if len(newheads) > len(remote_heads):
1556 if len(newheads) > len(remote_heads):
1553 warn = 1
1557 warn = 1
1554
1558
1555 if warn:
1559 if warn:
1556 self.ui.warn(_("abort: push creates new remote heads!\n"))
1560 self.ui.warn(_("abort: push creates new remote heads!\n"))
1557 self.ui.status(_("(did you forget to merge?"
1561 self.ui.status(_("(did you forget to merge?"
1558 " use push -f to force)\n"))
1562 " use push -f to force)\n"))
1559 return None, 0
1563 return None, 0
1560 elif inc:
1564 elif inc:
1561 self.ui.warn(_("note: unsynced remote changes!\n"))
1565 self.ui.warn(_("note: unsynced remote changes!\n"))
1562
1566
1563
1567
1564 if revs is None:
1568 if revs is None:
1565 # use the fast path, no race possible on push
1569 # use the fast path, no race possible on push
1566 cg = self._changegroup(common.keys(), 'push')
1570 cg = self._changegroup(common.keys(), 'push')
1567 else:
1571 else:
1568 cg = self.changegroupsubset(update, revs, 'push')
1572 cg = self.changegroupsubset(update, revs, 'push')
1569 return cg, remote_heads
1573 return cg, remote_heads
1570
1574
1571 def push_addchangegroup(self, remote, force, revs):
1575 def push_addchangegroup(self, remote, force, revs):
1572 lock = remote.lock()
1576 lock = remote.lock()
1573 try:
1577 try:
1574 ret = self.prepush(remote, force, revs)
1578 ret = self.prepush(remote, force, revs)
1575 if ret[0] is not None:
1579 if ret[0] is not None:
1576 cg, remote_heads = ret
1580 cg, remote_heads = ret
1577 return remote.addchangegroup(cg, 'push', self.url())
1581 return remote.addchangegroup(cg, 'push', self.url())
1578 return ret[1]
1582 return ret[1]
1579 finally:
1583 finally:
1580 del lock
1584 del lock
1581
1585
1582 def push_unbundle(self, remote, force, revs):
1586 def push_unbundle(self, remote, force, revs):
1583 # local repo finds heads on server, finds out what revs it
1587 # local repo finds heads on server, finds out what revs it
1584 # must push. once revs transferred, if server finds it has
1588 # must push. once revs transferred, if server finds it has
1585 # different heads (someone else won commit/push race), server
1589 # different heads (someone else won commit/push race), server
1586 # aborts.
1590 # aborts.
1587
1591
1588 ret = self.prepush(remote, force, revs)
1592 ret = self.prepush(remote, force, revs)
1589 if ret[0] is not None:
1593 if ret[0] is not None:
1590 cg, remote_heads = ret
1594 cg, remote_heads = ret
1591 if force: remote_heads = ['force']
1595 if force: remote_heads = ['force']
1592 return remote.unbundle(cg, remote_heads, 'push')
1596 return remote.unbundle(cg, remote_heads, 'push')
1593 return ret[1]
1597 return ret[1]
1594
1598
1595 def changegroupinfo(self, nodes, source):
1599 def changegroupinfo(self, nodes, source):
1596 if self.ui.verbose or source == 'bundle':
1600 if self.ui.verbose or source == 'bundle':
1597 self.ui.status(_("%d changesets found\n") % len(nodes))
1601 self.ui.status(_("%d changesets found\n") % len(nodes))
1598 if self.ui.debugflag:
1602 if self.ui.debugflag:
1599 self.ui.debug(_("list of changesets:\n"))
1603 self.ui.debug(_("list of changesets:\n"))
1600 for node in nodes:
1604 for node in nodes:
1601 self.ui.debug("%s\n" % hex(node))
1605 self.ui.debug("%s\n" % hex(node))
1602
1606
1603 def changegroupsubset(self, bases, heads, source, extranodes=None):
1607 def changegroupsubset(self, bases, heads, source, extranodes=None):
1604 """This function generates a changegroup consisting of all the nodes
1608 """This function generates a changegroup consisting of all the nodes
1605 that are descendents of any of the bases, and ancestors of any of
1609 that are descendents of any of the bases, and ancestors of any of
1606 the heads.
1610 the heads.
1607
1611
1608 It is fairly complex as determining which filenodes and which
1612 It is fairly complex as determining which filenodes and which
1609 manifest nodes need to be included for the changeset to be complete
1613 manifest nodes need to be included for the changeset to be complete
1610 is non-trivial.
1614 is non-trivial.
1611
1615
1612 Another wrinkle is doing the reverse, figuring out which changeset in
1616 Another wrinkle is doing the reverse, figuring out which changeset in
1613 the changegroup a particular filenode or manifestnode belongs to.
1617 the changegroup a particular filenode or manifestnode belongs to.
1614
1618
1615 The caller can specify some nodes that must be included in the
1619 The caller can specify some nodes that must be included in the
1616 changegroup using the extranodes argument. It should be a dict
1620 changegroup using the extranodes argument. It should be a dict
1617 where the keys are the filenames (or 1 for the manifest), and the
1621 where the keys are the filenames (or 1 for the manifest), and the
1618 values are lists of (node, linknode) tuples, where node is a wanted
1622 values are lists of (node, linknode) tuples, where node is a wanted
1619 node and linknode is the changelog node that should be transmitted as
1623 node and linknode is the changelog node that should be transmitted as
1620 the linkrev.
1624 the linkrev.
1621 """
1625 """
1622
1626
1623 if extranodes is None:
1627 if extranodes is None:
1624 # can we go through the fast path ?
1628 # can we go through the fast path ?
1625 heads.sort()
1629 heads.sort()
1626 allheads = self.heads()
1630 allheads = self.heads()
1627 allheads.sort()
1631 allheads.sort()
1628 if heads == allheads:
1632 if heads == allheads:
1629 common = []
1633 common = []
1630 # parents of bases are known from both sides
1634 # parents of bases are known from both sides
1631 for n in bases:
1635 for n in bases:
1632 for p in self.changelog.parents(n):
1636 for p in self.changelog.parents(n):
1633 if p != nullid:
1637 if p != nullid:
1634 common.append(p)
1638 common.append(p)
1635 return self._changegroup(common, source)
1639 return self._changegroup(common, source)
1636
1640
1637 self.hook('preoutgoing', throw=True, source=source)
1641 self.hook('preoutgoing', throw=True, source=source)
1638
1642
1639 # Set up some initial variables
1643 # Set up some initial variables
1640 # Make it easy to refer to self.changelog
1644 # Make it easy to refer to self.changelog
1641 cl = self.changelog
1645 cl = self.changelog
1642 # msng is short for missing - compute the list of changesets in this
1646 # msng is short for missing - compute the list of changesets in this
1643 # changegroup.
1647 # changegroup.
1644 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1648 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1645 self.changegroupinfo(msng_cl_lst, source)
1649 self.changegroupinfo(msng_cl_lst, source)
1646 # Some bases may turn out to be superfluous, and some heads may be
1650 # Some bases may turn out to be superfluous, and some heads may be
1647 # too. nodesbetween will return the minimal set of bases and heads
1651 # too. nodesbetween will return the minimal set of bases and heads
1648 # necessary to re-create the changegroup.
1652 # necessary to re-create the changegroup.
1649
1653
1650 # Known heads are the list of heads that it is assumed the recipient
1654 # Known heads are the list of heads that it is assumed the recipient
1651 # of this changegroup will know about.
1655 # of this changegroup will know about.
1652 knownheads = {}
1656 knownheads = {}
1653 # We assume that all parents of bases are known heads.
1657 # We assume that all parents of bases are known heads.
1654 for n in bases:
1658 for n in bases:
1655 for p in cl.parents(n):
1659 for p in cl.parents(n):
1656 if p != nullid:
1660 if p != nullid:
1657 knownheads[p] = 1
1661 knownheads[p] = 1
1658 knownheads = knownheads.keys()
1662 knownheads = knownheads.keys()
1659 if knownheads:
1663 if knownheads:
1660 # Now that we know what heads are known, we can compute which
1664 # Now that we know what heads are known, we can compute which
1661 # changesets are known. The recipient must know about all
1665 # changesets are known. The recipient must know about all
1662 # changesets required to reach the known heads from the null
1666 # changesets required to reach the known heads from the null
1663 # changeset.
1667 # changeset.
1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1668 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1665 junk = None
1669 junk = None
1666 # Transform the list into an ersatz set.
1670 # Transform the list into an ersatz set.
1667 has_cl_set = dict.fromkeys(has_cl_set)
1671 has_cl_set = dict.fromkeys(has_cl_set)
1668 else:
1672 else:
1669 # If there were no known heads, the recipient cannot be assumed to
1673 # If there were no known heads, the recipient cannot be assumed to
1670 # know about any changesets.
1674 # know about any changesets.
1671 has_cl_set = {}
1675 has_cl_set = {}
1672
1676
1673 # Make it easy to refer to self.manifest
1677 # Make it easy to refer to self.manifest
1674 mnfst = self.manifest
1678 mnfst = self.manifest
1675 # We don't know which manifests are missing yet
1679 # We don't know which manifests are missing yet
1676 msng_mnfst_set = {}
1680 msng_mnfst_set = {}
1677 # Nor do we know which filenodes are missing.
1681 # Nor do we know which filenodes are missing.
1678 msng_filenode_set = {}
1682 msng_filenode_set = {}
1679
1683
1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1684 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1681 junk = None
1685 junk = None
1682
1686
1683 # A changeset always belongs to itself, so the changenode lookup
1687 # A changeset always belongs to itself, so the changenode lookup
1684 # function for a changenode is identity.
1688 # function for a changenode is identity.
1685 def identity(x):
1689 def identity(x):
1686 return x
1690 return x
1687
1691
1688 # A function generating function. Sets up an environment for the
1692 # A function generating function. Sets up an environment for the
1689 # inner function.
1693 # inner function.
1690 def cmp_by_rev_func(revlog):
1694 def cmp_by_rev_func(revlog):
1691 # Compare two nodes by their revision number in the environment's
1695 # Compare two nodes by their revision number in the environment's
1692 # revision history. Since the revision number both represents the
1696 # revision history. Since the revision number both represents the
1693 # most efficient order to read the nodes in, and represents a
1697 # most efficient order to read the nodes in, and represents a
1694 # topological sorting of the nodes, this function is often useful.
1698 # topological sorting of the nodes, this function is often useful.
1695 def cmp_by_rev(a, b):
1699 def cmp_by_rev(a, b):
1696 return cmp(revlog.rev(a), revlog.rev(b))
1700 return cmp(revlog.rev(a), revlog.rev(b))
1697 return cmp_by_rev
1701 return cmp_by_rev
1698
1702
1699 # If we determine that a particular file or manifest node must be a
1703 # If we determine that a particular file or manifest node must be a
1700 # node that the recipient of the changegroup will already have, we can
1704 # node that the recipient of the changegroup will already have, we can
1701 # also assume the recipient will have all the parents. This function
1705 # also assume the recipient will have all the parents. This function
1702 # prunes them from the set of missing nodes.
1706 # prunes them from the set of missing nodes.
1703 def prune_parents(revlog, hasset, msngset):
1707 def prune_parents(revlog, hasset, msngset):
1704 haslst = hasset.keys()
1708 haslst = hasset.keys()
1705 haslst.sort(cmp_by_rev_func(revlog))
1709 haslst.sort(cmp_by_rev_func(revlog))
1706 for node in haslst:
1710 for node in haslst:
1707 parentlst = [p for p in revlog.parents(node) if p != nullid]
1711 parentlst = [p for p in revlog.parents(node) if p != nullid]
1708 while parentlst:
1712 while parentlst:
1709 n = parentlst.pop()
1713 n = parentlst.pop()
1710 if n not in hasset:
1714 if n not in hasset:
1711 hasset[n] = 1
1715 hasset[n] = 1
1712 p = [p for p in revlog.parents(n) if p != nullid]
1716 p = [p for p in revlog.parents(n) if p != nullid]
1713 parentlst.extend(p)
1717 parentlst.extend(p)
1714 for n in hasset:
1718 for n in hasset:
1715 msngset.pop(n, None)
1719 msngset.pop(n, None)
1716
1720
1717 # This is a function generating function used to set up an environment
1721 # This is a function generating function used to set up an environment
1718 # for the inner function to execute in.
1722 # for the inner function to execute in.
1719 def manifest_and_file_collector(changedfileset):
1723 def manifest_and_file_collector(changedfileset):
1720 # This is an information gathering function that gathers
1724 # This is an information gathering function that gathers
1721 # information from each changeset node that goes out as part of
1725 # information from each changeset node that goes out as part of
1722 # the changegroup. The information gathered is a list of which
1726 # the changegroup. The information gathered is a list of which
1723 # manifest nodes are potentially required (the recipient may
1727 # manifest nodes are potentially required (the recipient may
1724 # already have them) and total list of all files which were
1728 # already have them) and total list of all files which were
1725 # changed in any changeset in the changegroup.
1729 # changed in any changeset in the changegroup.
1726 #
1730 #
1727 # We also remember the first changenode we saw any manifest
1731 # We also remember the first changenode we saw any manifest
1728 # referenced by so we can later determine which changenode 'owns'
1732 # referenced by so we can later determine which changenode 'owns'
1729 # the manifest.
1733 # the manifest.
1730 def collect_manifests_and_files(clnode):
1734 def collect_manifests_and_files(clnode):
1731 c = cl.read(clnode)
1735 c = cl.read(clnode)
1732 for f in c[3]:
1736 for f in c[3]:
1733 # This is to make sure we only have one instance of each
1737 # This is to make sure we only have one instance of each
1734 # filename string for each filename.
1738 # filename string for each filename.
1735 changedfileset.setdefault(f, f)
1739 changedfileset.setdefault(f, f)
1736 msng_mnfst_set.setdefault(c[0], clnode)
1740 msng_mnfst_set.setdefault(c[0], clnode)
1737 return collect_manifests_and_files
1741 return collect_manifests_and_files
1738
1742
1739 # Figure out which manifest nodes (of the ones we think might be part
1743 # Figure out which manifest nodes (of the ones we think might be part
1740 # of the changegroup) the recipient must know about and remove them
1744 # of the changegroup) the recipient must know about and remove them
1741 # from the changegroup.
1745 # from the changegroup.
1742 def prune_manifests():
1746 def prune_manifests():
1743 has_mnfst_set = {}
1747 has_mnfst_set = {}
1744 for n in msng_mnfst_set:
1748 for n in msng_mnfst_set:
1745 # If a 'missing' manifest thinks it belongs to a changenode
1749 # If a 'missing' manifest thinks it belongs to a changenode
1746 # the recipient is assumed to have, obviously the recipient
1750 # the recipient is assumed to have, obviously the recipient
1747 # must have that manifest.
1751 # must have that manifest.
1748 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1749 if linknode in has_cl_set:
1753 if linknode in has_cl_set:
1750 has_mnfst_set[n] = 1
1754 has_mnfst_set[n] = 1
1751 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1752
1756
1753 # Use the information collected in collect_manifests_and_files to say
1757 # Use the information collected in collect_manifests_and_files to say
1754 # which changenode any manifestnode belongs to.
1758 # which changenode any manifestnode belongs to.
1755 def lookup_manifest_link(mnfstnode):
1759 def lookup_manifest_link(mnfstnode):
1756 return msng_mnfst_set[mnfstnode]
1760 return msng_mnfst_set[mnfstnode]
1757
1761
1758 # A function generating function that sets up the initial environment
1762 # A function generating function that sets up the initial environment
1759 # the inner function.
1763 # the inner function.
1760 def filenode_collector(changedfiles):
1764 def filenode_collector(changedfiles):
1761 next_rev = [0]
1765 next_rev = [0]
1762 # This gathers information from each manifestnode included in the
1766 # This gathers information from each manifestnode included in the
1763 # changegroup about which filenodes the manifest node references
1767 # changegroup about which filenodes the manifest node references
1764 # so we can include those in the changegroup too.
1768 # so we can include those in the changegroup too.
1765 #
1769 #
1766 # It also remembers which changenode each filenode belongs to. It
1770 # It also remembers which changenode each filenode belongs to. It
1767 # does this by assuming the a filenode belongs to the changenode
1771 # does this by assuming the a filenode belongs to the changenode
1768 # the first manifest that references it belongs to.
1772 # the first manifest that references it belongs to.
1769 def collect_msng_filenodes(mnfstnode):
1773 def collect_msng_filenodes(mnfstnode):
1770 r = mnfst.rev(mnfstnode)
1774 r = mnfst.rev(mnfstnode)
1771 if r == next_rev[0]:
1775 if r == next_rev[0]:
1772 # If the last rev we looked at was the one just previous,
1776 # If the last rev we looked at was the one just previous,
1773 # we only need to see a diff.
1777 # we only need to see a diff.
1774 deltamf = mnfst.readdelta(mnfstnode)
1778 deltamf = mnfst.readdelta(mnfstnode)
1775 # For each line in the delta
1779 # For each line in the delta
1776 for f, fnode in deltamf.iteritems():
1780 for f, fnode in deltamf.iteritems():
1777 f = changedfiles.get(f, None)
1781 f = changedfiles.get(f, None)
1778 # And if the file is in the list of files we care
1782 # And if the file is in the list of files we care
1779 # about.
1783 # about.
1780 if f is not None:
1784 if f is not None:
1781 # Get the changenode this manifest belongs to
1785 # Get the changenode this manifest belongs to
1782 clnode = msng_mnfst_set[mnfstnode]
1786 clnode = msng_mnfst_set[mnfstnode]
1783 # Create the set of filenodes for the file if
1787 # Create the set of filenodes for the file if
1784 # there isn't one already.
1788 # there isn't one already.
1785 ndset = msng_filenode_set.setdefault(f, {})
1789 ndset = msng_filenode_set.setdefault(f, {})
1786 # And set the filenode's changelog node to the
1790 # And set the filenode's changelog node to the
1787 # manifest's if it hasn't been set already.
1791 # manifest's if it hasn't been set already.
1788 ndset.setdefault(fnode, clnode)
1792 ndset.setdefault(fnode, clnode)
1789 else:
1793 else:
1790 # Otherwise we need a full manifest.
1794 # Otherwise we need a full manifest.
1791 m = mnfst.read(mnfstnode)
1795 m = mnfst.read(mnfstnode)
1792 # For every file in we care about.
1796 # For every file in we care about.
1793 for f in changedfiles:
1797 for f in changedfiles:
1794 fnode = m.get(f, None)
1798 fnode = m.get(f, None)
1795 # If it's in the manifest
1799 # If it's in the manifest
1796 if fnode is not None:
1800 if fnode is not None:
1797 # See comments above.
1801 # See comments above.
1798 clnode = msng_mnfst_set[mnfstnode]
1802 clnode = msng_mnfst_set[mnfstnode]
1799 ndset = msng_filenode_set.setdefault(f, {})
1803 ndset = msng_filenode_set.setdefault(f, {})
1800 ndset.setdefault(fnode, clnode)
1804 ndset.setdefault(fnode, clnode)
1801 # Remember the revision we hope to see next.
1805 # Remember the revision we hope to see next.
1802 next_rev[0] = r + 1
1806 next_rev[0] = r + 1
1803 return collect_msng_filenodes
1807 return collect_msng_filenodes
1804
1808
1805 # We have a list of filenodes we think we need for a file, lets remove
1809 # We have a list of filenodes we think we need for a file, lets remove
1806 # all those we now the recipient must have.
1810 # all those we now the recipient must have.
1807 def prune_filenodes(f, filerevlog):
1811 def prune_filenodes(f, filerevlog):
1808 msngset = msng_filenode_set[f]
1812 msngset = msng_filenode_set[f]
1809 hasset = {}
1813 hasset = {}
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1814 # If a 'missing' filenode thinks it belongs to a changenode we
1811 # assume the recipient must have, then the recipient must have
1815 # assume the recipient must have, then the recipient must have
1812 # that filenode.
1816 # that filenode.
1813 for n in msngset:
1817 for n in msngset:
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1818 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 if clnode in has_cl_set:
1819 if clnode in has_cl_set:
1816 hasset[n] = 1
1820 hasset[n] = 1
1817 prune_parents(filerevlog, hasset, msngset)
1821 prune_parents(filerevlog, hasset, msngset)
1818
1822
1819 # A function generator function that sets up the a context for the
1823 # A function generator function that sets up the a context for the
1820 # inner function.
1824 # inner function.
1821 def lookup_filenode_link_func(fname):
1825 def lookup_filenode_link_func(fname):
1822 msngset = msng_filenode_set[fname]
1826 msngset = msng_filenode_set[fname]
1823 # Lookup the changenode the filenode belongs to.
1827 # Lookup the changenode the filenode belongs to.
1824 def lookup_filenode_link(fnode):
1828 def lookup_filenode_link(fnode):
1825 return msngset[fnode]
1829 return msngset[fnode]
1826 return lookup_filenode_link
1830 return lookup_filenode_link
1827
1831
1828 # Add the nodes that were explicitly requested.
1832 # Add the nodes that were explicitly requested.
1829 def add_extra_nodes(name, nodes):
1833 def add_extra_nodes(name, nodes):
1830 if not extranodes or name not in extranodes:
1834 if not extranodes or name not in extranodes:
1831 return
1835 return
1832
1836
1833 for node, linknode in extranodes[name]:
1837 for node, linknode in extranodes[name]:
1834 if node not in nodes:
1838 if node not in nodes:
1835 nodes[node] = linknode
1839 nodes[node] = linknode
1836
1840
1837 # Now that we have all theses utility functions to help out and
1841 # Now that we have all theses utility functions to help out and
1838 # logically divide up the task, generate the group.
1842 # logically divide up the task, generate the group.
1839 def gengroup():
1843 def gengroup():
1840 # The set of changed files starts empty.
1844 # The set of changed files starts empty.
1841 changedfiles = {}
1845 changedfiles = {}
1842 # Create a changenode group generator that will call our functions
1846 # Create a changenode group generator that will call our functions
1843 # back to lookup the owning changenode and collect information.
1847 # back to lookup the owning changenode and collect information.
1844 group = cl.group(msng_cl_lst, identity,
1848 group = cl.group(msng_cl_lst, identity,
1845 manifest_and_file_collector(changedfiles))
1849 manifest_and_file_collector(changedfiles))
1846 for chnk in group:
1850 for chnk in group:
1847 yield chnk
1851 yield chnk
1848
1852
1849 # The list of manifests has been collected by the generator
1853 # The list of manifests has been collected by the generator
1850 # calling our functions back.
1854 # calling our functions back.
1851 prune_manifests()
1855 prune_manifests()
1852 add_extra_nodes(1, msng_mnfst_set)
1856 add_extra_nodes(1, msng_mnfst_set)
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1857 msng_mnfst_lst = msng_mnfst_set.keys()
1854 # Sort the manifestnodes by revision number.
1858 # Sort the manifestnodes by revision number.
1855 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1859 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1856 # Create a generator for the manifestnodes that calls our lookup
1860 # Create a generator for the manifestnodes that calls our lookup
1857 # and data collection functions back.
1861 # and data collection functions back.
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1862 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 filenode_collector(changedfiles))
1863 filenode_collector(changedfiles))
1860 for chnk in group:
1864 for chnk in group:
1861 yield chnk
1865 yield chnk
1862
1866
1863 # These are no longer needed, dereference and toss the memory for
1867 # These are no longer needed, dereference and toss the memory for
1864 # them.
1868 # them.
1865 msng_mnfst_lst = None
1869 msng_mnfst_lst = None
1866 msng_mnfst_set.clear()
1870 msng_mnfst_set.clear()
1867
1871
1868 if extranodes:
1872 if extranodes:
1869 for fname in extranodes:
1873 for fname in extranodes:
1870 if isinstance(fname, int):
1874 if isinstance(fname, int):
1871 continue
1875 continue
1872 msng_filenode_set.setdefault(fname, {})
1876 msng_filenode_set.setdefault(fname, {})
1873 changedfiles[fname] = 1
1877 changedfiles[fname] = 1
1874 # Go through all our files in order sorted by name.
1878 # Go through all our files in order sorted by name.
1875 for fname in util.sort(changedfiles):
1879 for fname in util.sort(changedfiles):
1876 filerevlog = self.file(fname)
1880 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1881 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 # Toss out the filenodes that the recipient isn't really
1883 # Toss out the filenodes that the recipient isn't really
1880 # missing.
1884 # missing.
1881 if fname in msng_filenode_set:
1885 if fname in msng_filenode_set:
1882 prune_filenodes(fname, filerevlog)
1886 prune_filenodes(fname, filerevlog)
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1887 add_extra_nodes(fname, msng_filenode_set[fname])
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1888 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 else:
1889 else:
1886 msng_filenode_lst = []
1890 msng_filenode_lst = []
1887 # If any filenodes are left, generate the group for them,
1891 # If any filenodes are left, generate the group for them,
1888 # otherwise don't bother.
1892 # otherwise don't bother.
1889 if len(msng_filenode_lst) > 0:
1893 if len(msng_filenode_lst) > 0:
1890 yield changegroup.chunkheader(len(fname))
1894 yield changegroup.chunkheader(len(fname))
1891 yield fname
1895 yield fname
1892 # Sort the filenodes by their revision #
1896 # Sort the filenodes by their revision #
1893 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1897 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1894 # Create a group generator and only pass in a changenode
1898 # Create a group generator and only pass in a changenode
1895 # lookup function as we need to collect no information
1899 # lookup function as we need to collect no information
1896 # from filenodes.
1900 # from filenodes.
1897 group = filerevlog.group(msng_filenode_lst,
1901 group = filerevlog.group(msng_filenode_lst,
1898 lookup_filenode_link_func(fname))
1902 lookup_filenode_link_func(fname))
1899 for chnk in group:
1903 for chnk in group:
1900 yield chnk
1904 yield chnk
1901 if fname in msng_filenode_set:
1905 if fname in msng_filenode_set:
1902 # Don't need this anymore, toss it to free memory.
1906 # Don't need this anymore, toss it to free memory.
1903 del msng_filenode_set[fname]
1907 del msng_filenode_set[fname]
1904 # Signal that no more groups are left.
1908 # Signal that no more groups are left.
1905 yield changegroup.closechunk()
1909 yield changegroup.closechunk()
1906
1910
1907 if msng_cl_lst:
1911 if msng_cl_lst:
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909
1913
1910 return util.chunkbuffer(gengroup())
1914 return util.chunkbuffer(gengroup())
1911
1915
1912 def changegroup(self, basenodes, source):
1916 def changegroup(self, basenodes, source):
1913 # to avoid a race we use changegroupsubset() (issue1320)
1917 # to avoid a race we use changegroupsubset() (issue1320)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1918 return self.changegroupsubset(basenodes, self.heads(), source)
1915
1919
1916 def _changegroup(self, common, source):
1920 def _changegroup(self, common, source):
1917 """Generate a changegroup of all nodes that we have that a recipient
1921 """Generate a changegroup of all nodes that we have that a recipient
1918 doesn't.
1922 doesn't.
1919
1923
1920 This is much easier than the previous function as we can assume that
1924 This is much easier than the previous function as we can assume that
1921 the recipient has any changenode we aren't sending them.
1925 the recipient has any changenode we aren't sending them.
1922
1926
1923 common is the set of common nodes between remote and self"""
1927 common is the set of common nodes between remote and self"""
1924
1928
1925 self.hook('preoutgoing', throw=True, source=source)
1929 self.hook('preoutgoing', throw=True, source=source)
1926
1930
1927 cl = self.changelog
1931 cl = self.changelog
1928 nodes = cl.findmissing(common)
1932 nodes = cl.findmissing(common)
1929 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1933 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1930 self.changegroupinfo(nodes, source)
1934 self.changegroupinfo(nodes, source)
1931
1935
1932 def identity(x):
1936 def identity(x):
1933 return x
1937 return x
1934
1938
1935 def gennodelst(log):
1939 def gennodelst(log):
1936 for r in log:
1940 for r in log:
1937 if log.linkrev(r) in revset:
1941 if log.linkrev(r) in revset:
1938 yield log.node(r)
1942 yield log.node(r)
1939
1943
1940 def changed_file_collector(changedfileset):
1944 def changed_file_collector(changedfileset):
1941 def collect_changed_files(clnode):
1945 def collect_changed_files(clnode):
1942 c = cl.read(clnode)
1946 c = cl.read(clnode)
1943 for fname in c[3]:
1947 for fname in c[3]:
1944 changedfileset[fname] = 1
1948 changedfileset[fname] = 1
1945 return collect_changed_files
1949 return collect_changed_files
1946
1950
1947 def lookuprevlink_func(revlog):
1951 def lookuprevlink_func(revlog):
1948 def lookuprevlink(n):
1952 def lookuprevlink(n):
1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1953 return cl.node(revlog.linkrev(revlog.rev(n)))
1950 return lookuprevlink
1954 return lookuprevlink
1951
1955
1952 def gengroup():
1956 def gengroup():
1953 # construct a list of all changed files
1957 # construct a list of all changed files
1954 changedfiles = {}
1958 changedfiles = {}
1955
1959
1956 for chnk in cl.group(nodes, identity,
1960 for chnk in cl.group(nodes, identity,
1957 changed_file_collector(changedfiles)):
1961 changed_file_collector(changedfiles)):
1958 yield chnk
1962 yield chnk
1959
1963
1960 mnfst = self.manifest
1964 mnfst = self.manifest
1961 nodeiter = gennodelst(mnfst)
1965 nodeiter = gennodelst(mnfst)
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 yield chnk
1967 yield chnk
1964
1968
1965 for fname in util.sort(changedfiles):
1969 for fname in util.sort(changedfiles):
1966 filerevlog = self.file(fname)
1970 filerevlog = self.file(fname)
1967 if not len(filerevlog):
1971 if not len(filerevlog):
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 nodeiter = gennodelst(filerevlog)
1973 nodeiter = gennodelst(filerevlog)
1970 nodeiter = list(nodeiter)
1974 nodeiter = list(nodeiter)
1971 if nodeiter:
1975 if nodeiter:
1972 yield changegroup.chunkheader(len(fname))
1976 yield changegroup.chunkheader(len(fname))
1973 yield fname
1977 yield fname
1974 lookup = lookuprevlink_func(filerevlog)
1978 lookup = lookuprevlink_func(filerevlog)
1975 for chnk in filerevlog.group(nodeiter, lookup):
1979 for chnk in filerevlog.group(nodeiter, lookup):
1976 yield chnk
1980 yield chnk
1977
1981
1978 yield changegroup.closechunk()
1982 yield changegroup.closechunk()
1979
1983
1980 if nodes:
1984 if nodes:
1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982
1986
1983 return util.chunkbuffer(gengroup())
1987 return util.chunkbuffer(gengroup())
1984
1988
1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 """add changegroup to repo.
1990 """add changegroup to repo.
1987
1991
1988 return values:
1992 return values:
1989 - nothing changed or no source: 0
1993 - nothing changed or no source: 0
1990 - more heads than before: 1+added heads (2..n)
1994 - more heads than before: 1+added heads (2..n)
1991 - less heads than before: -1-removed heads (-2..-n)
1995 - less heads than before: -1-removed heads (-2..-n)
1992 - number of heads stays the same: 1
1996 - number of heads stays the same: 1
1993 """
1997 """
1994 def csmap(x):
1998 def csmap(x):
1995 self.ui.debug(_("add changeset %s\n") % short(x))
1999 self.ui.debug(_("add changeset %s\n") % short(x))
1996 return len(cl)
2000 return len(cl)
1997
2001
1998 def revmap(x):
2002 def revmap(x):
1999 return cl.rev(x)
2003 return cl.rev(x)
2000
2004
2001 if not source:
2005 if not source:
2002 return 0
2006 return 0
2003
2007
2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005
2009
2006 changesets = files = revisions = 0
2010 changesets = files = revisions = 0
2007
2011
2008 # write changelog data to temp files so concurrent readers will not see
2012 # write changelog data to temp files so concurrent readers will not see
2009 # inconsistent view
2013 # inconsistent view
2010 cl = self.changelog
2014 cl = self.changelog
2011 cl.delayupdate()
2015 cl.delayupdate()
2012 oldheads = len(cl.heads())
2016 oldheads = len(cl.heads())
2013
2017
2014 tr = self.transaction()
2018 tr = self.transaction()
2015 try:
2019 try:
2016 trp = weakref.proxy(tr)
2020 trp = weakref.proxy(tr)
2017 # pull off the changeset group
2021 # pull off the changeset group
2018 self.ui.status(_("adding changesets\n"))
2022 self.ui.status(_("adding changesets\n"))
2019 cor = len(cl) - 1
2023 cor = len(cl) - 1
2020 chunkiter = changegroup.chunkiter(source)
2024 chunkiter = changegroup.chunkiter(source)
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2025 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 raise util.Abort(_("received changelog group is empty"))
2026 raise util.Abort(_("received changelog group is empty"))
2023 cnr = len(cl) - 1
2027 cnr = len(cl) - 1
2024 changesets = cnr - cor
2028 changesets = cnr - cor
2025
2029
2026 # pull off the manifest group
2030 # pull off the manifest group
2027 self.ui.status(_("adding manifests\n"))
2031 self.ui.status(_("adding manifests\n"))
2028 chunkiter = changegroup.chunkiter(source)
2032 chunkiter = changegroup.chunkiter(source)
2029 # no need to check for empty manifest group here:
2033 # no need to check for empty manifest group here:
2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 # no new manifest will be created and the manifest group will
2035 # no new manifest will be created and the manifest group will
2032 # be empty during the pull
2036 # be empty during the pull
2033 self.manifest.addgroup(chunkiter, revmap, trp)
2037 self.manifest.addgroup(chunkiter, revmap, trp)
2034
2038
2035 # process the files
2039 # process the files
2036 self.ui.status(_("adding file changes\n"))
2040 self.ui.status(_("adding file changes\n"))
2037 while 1:
2041 while 1:
2038 f = changegroup.getchunk(source)
2042 f = changegroup.getchunk(source)
2039 if not f:
2043 if not f:
2040 break
2044 break
2041 self.ui.debug(_("adding %s revisions\n") % f)
2045 self.ui.debug(_("adding %s revisions\n") % f)
2042 fl = self.file(f)
2046 fl = self.file(f)
2043 o = len(fl)
2047 o = len(fl)
2044 chunkiter = changegroup.chunkiter(source)
2048 chunkiter = changegroup.chunkiter(source)
2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2049 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 raise util.Abort(_("received file revlog group is empty"))
2050 raise util.Abort(_("received file revlog group is empty"))
2047 revisions += len(fl) - o
2051 revisions += len(fl) - o
2048 files += 1
2052 files += 1
2049
2053
2050 newheads = len(self.changelog.heads())
2054 newheads = len(self.changelog.heads())
2051 heads = ""
2055 heads = ""
2052 if oldheads and newheads != oldheads:
2056 if oldheads and newheads != oldheads:
2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2057 heads = _(" (%+d heads)") % (newheads - oldheads)
2054
2058
2055 self.ui.status(_("added %d changesets"
2059 self.ui.status(_("added %d changesets"
2056 " with %d changes to %d files%s\n")
2060 " with %d changes to %d files%s\n")
2057 % (changesets, revisions, files, heads))
2061 % (changesets, revisions, files, heads))
2058
2062
2059 if changesets > 0:
2063 if changesets > 0:
2060 p = lambda: self.changelog.writepending() and self.root or ""
2064 p = lambda: self.changelog.writepending() and self.root or ""
2061 self.hook('pretxnchangegroup', throw=True,
2065 self.hook('pretxnchangegroup', throw=True,
2062 node=hex(self.changelog.node(cor+1)), source=srctype,
2066 node=hex(self.changelog.node(cor+1)), source=srctype,
2063 url=url, pending=p)
2067 url=url, pending=p)
2064
2068
2065 # make changelog see real files again
2069 # make changelog see real files again
2066 cl.finalize(trp)
2070 cl.finalize(trp)
2067
2071
2068 tr.close()
2072 tr.close()
2069 finally:
2073 finally:
2070 del tr
2074 del tr
2071
2075
2072 if changesets > 0:
2076 if changesets > 0:
2073 # forcefully update the on-disk branch cache
2077 # forcefully update the on-disk branch cache
2074 self.ui.debug(_("updating the branch cache\n"))
2078 self.ui.debug(_("updating the branch cache\n"))
2075 self.branchtags()
2079 self.branchtags()
2076 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2080 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2077 source=srctype, url=url)
2081 source=srctype, url=url)
2078
2082
2079 for i in xrange(cor + 1, cnr + 1):
2083 for i in xrange(cor + 1, cnr + 1):
2080 self.hook("incoming", node=hex(self.changelog.node(i)),
2084 self.hook("incoming", node=hex(self.changelog.node(i)),
2081 source=srctype, url=url)
2085 source=srctype, url=url)
2082
2086
2083 # never return 0 here:
2087 # never return 0 here:
2084 if newheads < oldheads:
2088 if newheads < oldheads:
2085 return newheads - oldheads - 1
2089 return newheads - oldheads - 1
2086 else:
2090 else:
2087 return newheads - oldheads + 1
2091 return newheads - oldheads + 1
2088
2092
2089
2093
2090 def stream_in(self, remote):
2094 def stream_in(self, remote):
2091 fp = remote.stream_out()
2095 fp = remote.stream_out()
2092 l = fp.readline()
2096 l = fp.readline()
2093 try:
2097 try:
2094 resp = int(l)
2098 resp = int(l)
2095 except ValueError:
2099 except ValueError:
2096 raise error.ResponseError(
2100 raise error.ResponseError(
2097 _('Unexpected response from remote server:'), l)
2101 _('Unexpected response from remote server:'), l)
2098 if resp == 1:
2102 if resp == 1:
2099 raise util.Abort(_('operation forbidden by server'))
2103 raise util.Abort(_('operation forbidden by server'))
2100 elif resp == 2:
2104 elif resp == 2:
2101 raise util.Abort(_('locking the remote repository failed'))
2105 raise util.Abort(_('locking the remote repository failed'))
2102 elif resp != 0:
2106 elif resp != 0:
2103 raise util.Abort(_('the server sent an unknown error code'))
2107 raise util.Abort(_('the server sent an unknown error code'))
2104 self.ui.status(_('streaming all changes\n'))
2108 self.ui.status(_('streaming all changes\n'))
2105 l = fp.readline()
2109 l = fp.readline()
2106 try:
2110 try:
2107 total_files, total_bytes = map(int, l.split(' ', 1))
2111 total_files, total_bytes = map(int, l.split(' ', 1))
2108 except (ValueError, TypeError):
2112 except (ValueError, TypeError):
2109 raise error.ResponseError(
2113 raise error.ResponseError(
2110 _('Unexpected response from remote server:'), l)
2114 _('Unexpected response from remote server:'), l)
2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2115 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 (total_files, util.bytecount(total_bytes)))
2116 (total_files, util.bytecount(total_bytes)))
2113 start = time.time()
2117 start = time.time()
2114 for i in xrange(total_files):
2118 for i in xrange(total_files):
2115 # XXX doesn't support '\n' or '\r' in filenames
2119 # XXX doesn't support '\n' or '\r' in filenames
2116 l = fp.readline()
2120 l = fp.readline()
2117 try:
2121 try:
2118 name, size = l.split('\0', 1)
2122 name, size = l.split('\0', 1)
2119 size = int(size)
2123 size = int(size)
2120 except (ValueError, TypeError):
2124 except (ValueError, TypeError):
2121 raise error.ResponseError(
2125 raise error.ResponseError(
2122 _('Unexpected response from remote server:'), l)
2126 _('Unexpected response from remote server:'), l)
2123 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2127 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2124 ofp = self.sopener(name, 'w')
2128 ofp = self.sopener(name, 'w')
2125 for chunk in util.filechunkiter(fp, limit=size):
2129 for chunk in util.filechunkiter(fp, limit=size):
2126 ofp.write(chunk)
2130 ofp.write(chunk)
2127 ofp.close()
2131 ofp.close()
2128 elapsed = time.time() - start
2132 elapsed = time.time() - start
2129 if elapsed <= 0:
2133 if elapsed <= 0:
2130 elapsed = 0.001
2134 elapsed = 0.001
2131 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2135 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2132 (util.bytecount(total_bytes), elapsed,
2136 (util.bytecount(total_bytes), elapsed,
2133 util.bytecount(total_bytes / elapsed)))
2137 util.bytecount(total_bytes / elapsed)))
2134 self.invalidate()
2138 self.invalidate()
2135 return len(self.heads()) + 1
2139 return len(self.heads()) + 1
2136
2140
2137 def clone(self, remote, heads=[], stream=False):
2141 def clone(self, remote, heads=[], stream=False):
2138 '''clone remote repository.
2142 '''clone remote repository.
2139
2143
2140 keyword arguments:
2144 keyword arguments:
2141 heads: list of revs to clone (forces use of pull)
2145 heads: list of revs to clone (forces use of pull)
2142 stream: use streaming clone if possible'''
2146 stream: use streaming clone if possible'''
2143
2147
2144 # now, all clients that can request uncompressed clones can
2148 # now, all clients that can request uncompressed clones can
2145 # read repo formats supported by all servers that can serve
2149 # read repo formats supported by all servers that can serve
2146 # them.
2150 # them.
2147
2151
2148 # if revlog format changes, client will have to check version
2152 # if revlog format changes, client will have to check version
2149 # and format flags on "stream" capability, and use
2153 # and format flags on "stream" capability, and use
2150 # uncompressed only if compatible.
2154 # uncompressed only if compatible.
2151
2155
2152 if stream and not heads and remote.capable('stream'):
2156 if stream and not heads and remote.capable('stream'):
2153 return self.stream_in(remote)
2157 return self.stream_in(remote)
2154 return self.pull(remote, heads)
2158 return self.pull(remote, heads)
2155
2159
2156 # used to avoid circular references so destructors work
2160 # used to avoid circular references so destructors work
2157 def aftertrans(files):
2161 def aftertrans(files):
2158 renamefiles = [tuple(t) for t in files]
2162 renamefiles = [tuple(t) for t in files]
2159 def a():
2163 def a():
2160 for src, dest in renamefiles:
2164 for src, dest in renamefiles:
2161 util.rename(src, dest)
2165 util.rename(src, dest)
2162 return a
2166 return a
2163
2167
2164 def instance(ui, path, create):
2168 def instance(ui, path, create):
2165 return localrepository(ui, util.drop_scheme('file', path), create)
2169 return localrepository(ui, util.drop_scheme('file', path), create)
2166
2170
2167 def islocal(path):
2171 def islocal(path):
2168 return True
2172 return True
@@ -1,107 +1,122
1 # lock.py - simple locking scheme for mercurial
1 # lock.py - simple locking scheme for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import errno, os, socket, time, util, error
8 import errno, os, socket, time, util, error
9
9
10 class lock(object):
10 class lock(object):
11 # lock is symlink on platforms that support it, file on others.
11 # lock is symlink on platforms that support it, file on others.
12
12
13 # symlink is used because create of directory entry and contents
13 # symlink is used because create of directory entry and contents
14 # are atomic even over nfs.
14 # are atomic even over nfs.
15
15
16 # old-style lock: symlink to pid
16 # old-style lock: symlink to pid
17 # new-style lock: symlink to hostname:pid
17 # new-style lock: symlink to hostname:pid
18
18
19 _host = None
19 _host = None
20
20
21 def __init__(self, file, timeout=-1, releasefn=None, desc=None):
21 def __init__(self, file, timeout=-1, releasefn=None, desc=None):
22 self.f = file
22 self.f = file
23 self.held = 0
23 self.held = 0
24 self.timeout = timeout
24 self.timeout = timeout
25 self.releasefn = releasefn
25 self.releasefn = releasefn
26 self.desc = desc
26 self.desc = desc
27 self.lock()
27 self.lock()
28
28
29 def __del__(self):
29 def __del__(self):
30 if self.held:
31 # ensure the lock will be removed
32 # even if recursive locking did occur
33 self.held = 1
34
30 self.release()
35 self.release()
31
36
32 def lock(self):
37 def lock(self):
33 timeout = self.timeout
38 timeout = self.timeout
34 while 1:
39 while 1:
35 try:
40 try:
36 self.trylock()
41 self.trylock()
37 return 1
42 return 1
38 except error.LockHeld, inst:
43 except error.LockHeld, inst:
39 if timeout != 0:
44 if timeout != 0:
40 time.sleep(1)
45 time.sleep(1)
41 if timeout > 0:
46 if timeout > 0:
42 timeout -= 1
47 timeout -= 1
43 continue
48 continue
44 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
49 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
45 inst.locker)
50 inst.locker)
46
51
47 def trylock(self):
52 def trylock(self):
53 if self.held:
54 self.held += 1
55 return
48 if lock._host is None:
56 if lock._host is None:
49 lock._host = socket.gethostname()
57 lock._host = socket.gethostname()
50 lockname = '%s:%s' % (lock._host, os.getpid())
58 lockname = '%s:%s' % (lock._host, os.getpid())
51 while not self.held:
59 while not self.held:
52 try:
60 try:
53 util.makelock(lockname, self.f)
61 util.makelock(lockname, self.f)
54 self.held = 1
62 self.held = 1
55 except (OSError, IOError), why:
63 except (OSError, IOError), why:
56 if why.errno == errno.EEXIST:
64 if why.errno == errno.EEXIST:
57 locker = self.testlock()
65 locker = self.testlock()
58 if locker is not None:
66 if locker is not None:
59 raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
67 raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
60 locker)
68 locker)
61 else:
69 else:
62 raise error.LockUnavailable(why.errno, why.strerror,
70 raise error.LockUnavailable(why.errno, why.strerror,
63 why.filename, self.desc)
71 why.filename, self.desc)
64
72
65 def testlock(self):
73 def testlock(self):
66 """return id of locker if lock is valid, else None.
74 """return id of locker if lock is valid, else None.
67
75
68 If old-style lock, we cannot tell what machine locker is on.
76 If old-style lock, we cannot tell what machine locker is on.
69 with new-style lock, if locker is on this machine, we can
77 with new-style lock, if locker is on this machine, we can
70 see if locker is alive. If locker is on this machine but
78 see if locker is alive. If locker is on this machine but
71 not alive, we can safely break lock.
79 not alive, we can safely break lock.
72
80
73 The lock file is only deleted when None is returned.
81 The lock file is only deleted when None is returned.
74
82
75 """
83 """
76 locker = util.readlock(self.f)
84 locker = util.readlock(self.f)
77 try:
85 try:
78 host, pid = locker.split(":", 1)
86 host, pid = locker.split(":", 1)
79 except ValueError:
87 except ValueError:
80 return locker
88 return locker
81 if host != lock._host:
89 if host != lock._host:
82 return locker
90 return locker
83 try:
91 try:
84 pid = int(pid)
92 pid = int(pid)
85 except:
93 except:
86 return locker
94 return locker
87 if util.testpid(pid):
95 if util.testpid(pid):
88 return locker
96 return locker
89 # if locker dead, break lock. must do this with another lock
97 # if locker dead, break lock. must do this with another lock
90 # held, or can race and break valid lock.
98 # held, or can race and break valid lock.
91 try:
99 try:
92 l = lock(self.f + '.break')
100 l = lock(self.f + '.break')
93 l.trylock()
101 l.trylock()
94 os.unlink(self.f)
102 os.unlink(self.f)
95 l.release()
103 l.release()
96 except error.LockError:
104 except error.LockError:
97 return locker
105 return locker
98
106
99 def release(self):
107 def release(self):
100 if self.held:
108 if self.held > 1:
109 self.held -= 1
110 elif self.held is 1:
101 self.held = 0
111 self.held = 0
102 if self.releasefn:
112 if self.releasefn:
103 self.releasefn()
113 self.releasefn()
104 try:
114 try:
105 os.unlink(self.f)
115 os.unlink(self.f)
106 except: pass
116 except: pass
107
117
118 def release(*locks):
119 for lock in locks:
120 if lock is not None:
121 lock.release()
122
General Comments 0
You need to be logged in to leave comments. Login now